problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_9074 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-630 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tests broken on Python < 3.8
```python
_______ ERROR collecting tests_async/transport/test_aiohttp_requests.py ________
ImportError while importing test module '/home/tseaver/projects/agendaless/Google/src/google-auth/tests_async/transport/test_aiohttp_requests.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/compat.py:14: in <module>
from unittest import IsolatedAsyncioTestCase, skipIf
E ImportError: cannot import name 'IsolatedAsyncioTestCase'
During handling of the above exception, another exception occurred:
/opt/Python-3.6.10/lib/python3.6/importlib/__init__.py:126: in import_module
return _bootstrap._gcd_import(name[level:], package, level)
tests_async/transport/test_aiohttp_requests.py:16: in <module>
from aioresponses import aioresponses, core
.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/__init__.py:2: in <module>
from .core import CallbackResult, aioresponses
.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/core.py:23: in <module>
from .compat import (
.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/compat.py:29: in <module>
from asynctest import fail_on, skipIf
E ModuleNotFoundError: No module named 'asynctest'
```
Due to https://github.com/pnuckowski/aioresponses/issues/172
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import nox
16
17 TEST_DEPENDENCIES = [
18 "flask",
19 "freezegun",
20 "mock",
21 "oauth2client",
22 "pyopenssl",
23 "pytest",
24 "pytest-cov",
25 "pytest-localserver",
26 "requests",
27 "urllib3",
28 "cryptography",
29 "responses",
30 "grpcio",
31 ]
32
33 ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses"]
34
35 BLACK_VERSION = "black==19.3b0"
36 BLACK_PATHS = [
37 "google",
38 "tests",
39 "tests_async",
40 "noxfile.py",
41 "setup.py",
42 "docs/conf.py",
43 ]
44
45
46 @nox.session(python="3.7")
47 def lint(session):
48 session.install("flake8", "flake8-import-order", "docutils", BLACK_VERSION)
49 session.install(".")
50 session.run("black", "--check", *BLACK_PATHS)
51 session.run(
52 "flake8",
53 "--import-order-style=google",
54 "--application-import-names=google,tests,system_tests",
55 "google",
56 "tests",
57 "tests_async",
58 )
59 session.run(
60 "python", "setup.py", "check", "--metadata", "--restructuredtext", "--strict"
61 )
62
63
64 @nox.session(python="3.6")
65 def blacken(session):
66 """Run black.
67
68 Format code to uniform standard.
69
70 This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
71 That run uses an image that doesn't have 3.6 installed. Before updating this
72 check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
73 """
74 session.install(BLACK_VERSION)
75 session.run("black", *BLACK_PATHS)
76
77
78 @nox.session(python=["3.6", "3.7", "3.8"])
79 def unit(session):
80 session.install(*TEST_DEPENDENCIES)
81 session.install(*(ASYNC_DEPENDENCIES))
82 session.install(".")
83 session.run(
84 "pytest",
85 "--cov=google.auth",
86 "--cov=google.oauth2",
87 "--cov=tests",
88 "tests",
89 "tests_async",
90 )
91
92
93 @nox.session(python=["2.7", "3.5"])
94 def unit_prev_versions(session):
95 session.install(*TEST_DEPENDENCIES)
96 session.install(".")
97 session.run(
98 "pytest", "--cov=google.auth", "--cov=google.oauth2", "--cov=tests", "tests"
99 )
100
101
102 @nox.session(python="3.7")
103 def cover(session):
104 session.install(*TEST_DEPENDENCIES)
105 session.install(*(ASYNC_DEPENDENCIES))
106 session.install(".")
107 session.run(
108 "pytest",
109 "--cov=google.auth",
110 "--cov=google.oauth2",
111 "--cov=tests",
112 "--cov=tests_async",
113 "--cov-report=",
114 "tests",
115 "tests_async",
116 )
117 session.run("coverage", "report", "--show-missing", "--fail-under=100")
118
119
120 @nox.session(python="3.7")
121 def docgen(session):
122 session.env["SPHINX_APIDOC_OPTIONS"] = "members,inherited-members,show-inheritance"
123 session.install(*TEST_DEPENDENCIES)
124 session.install("sphinx")
125 session.install(".")
126 session.run("rm", "-r", "docs/reference")
127 session.run(
128 "sphinx-apidoc",
129 "--output-dir",
130 "docs/reference",
131 "--separate",
132 "--module-first",
133 "google",
134 )
135
136
137 @nox.session(python="3.7")
138 def docs(session):
139 session.install("sphinx", "-r", "docs/requirements-docs.txt")
140 session.install(".")
141 session.run("make", "-C", "docs", "html")
142
143
144 @nox.session(python="pypy")
145 def pypy(session):
146 session.install(*TEST_DEPENDENCIES)
147 session.install(".")
148 session.run(
149 "pytest",
150 "--cov=google.auth",
151 "--cov=google.oauth2",
152 "--cov=tests",
153 "tests",
154 "tests_async",
155 )
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -30,7 +30,7 @@
"grpcio",
]
-ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses"]
+ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses", "asynctest"]
BLACK_VERSION = "black==19.3b0"
BLACK_PATHS = [
@@ -144,6 +144,7 @@
@nox.session(python="pypy")
def pypy(session):
session.install(*TEST_DEPENDENCIES)
+ session.install(*ASYNC_DEPENDENCIES)
session.install(".")
session.run(
"pytest",
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -30,7 +30,7 @@\n \"grpcio\",\n ]\n \n-ASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\"]\n+ASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\", \"asynctest\"]\n \n BLACK_VERSION = \"black==19.3b0\"\n BLACK_PATHS = [\n@@ -144,6 +144,7 @@\n @nox.session(python=\"pypy\")\n def pypy(session):\n session.install(*TEST_DEPENDENCIES)\n+ session.install(*ASYNC_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\",\n", "issue": "Tests broken on Python < 3.8\n```python\r\n_______ ERROR collecting tests_async/transport/test_aiohttp_requests.py ________\r\nImportError while importing test module '/home/tseaver/projects/agendaless/Google/src/google-auth/tests_async/transport/test_aiohttp_requests.py'.\r\nHint: make sure your test modules/packages have valid Python names.\r\nTraceback:\r\n.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/compat.py:14: in <module>\r\n from unittest import IsolatedAsyncioTestCase, skipIf\r\nE ImportError: cannot import name 'IsolatedAsyncioTestCase'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n/opt/Python-3.6.10/lib/python3.6/importlib/__init__.py:126: in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\ntests_async/transport/test_aiohttp_requests.py:16: in <module>\r\n from aioresponses import aioresponses, core\r\n.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/__init__.py:2: in <module>\r\n from .core import CallbackResult, aioresponses\r\n.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/core.py:23: in <module>\r\n from .compat import (\r\n.nox/unit-3-6/lib/python3.6/site-packages/aioresponses/compat.py:29: in <module>\r\n from asynctest import fail_on, skipIf\r\nE ModuleNotFoundError: No module named 'asynctest'\r\n```\r\n\r\nDue to https://github.com/pnuckowski/aioresponses/issues/172\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nox\n\nTEST_DEPENDENCIES = [\n \"flask\",\n \"freezegun\",\n \"mock\",\n \"oauth2client\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-localserver\",\n \"requests\",\n \"urllib3\",\n \"cryptography\",\n \"responses\",\n \"grpcio\",\n]\n\nASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\"]\n\nBLACK_VERSION = \"black==19.3b0\"\nBLACK_PATHS = [\n \"google\",\n \"tests\",\n \"tests_async\",\n \"noxfile.py\",\n \"setup.py\",\n \"docs/conf.py\",\n]\n\n\[email protected](python=\"3.7\")\ndef lint(session):\n session.install(\"flake8\", \"flake8-import-order\", \"docutils\", BLACK_VERSION)\n session.install(\".\")\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n session.run(\n \"flake8\",\n \"--import-order-style=google\",\n \"--application-import-names=google,tests,system_tests\",\n \"google\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\n \"python\", \"setup.py\", \"check\", \"--metadata\", \"--restructuredtext\", \"--strict\"\n )\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n\n\[email protected](python=[\"2.7\", \"3.5\"])\ndef unit_prev_versions(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\", \"--cov=google.auth\", \"--cov=google.oauth2\", \"--cov=tests\", \"tests\"\n )\n\n\[email protected](python=\"3.7\")\ndef cover(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"--cov=tests_async\",\n \"--cov-report=\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n\n\[email protected](python=\"3.7\")\ndef docgen(session):\n session.env[\"SPHINX_APIDOC_OPTIONS\"] = \"members,inherited-members,show-inheritance\"\n session.install(*TEST_DEPENDENCIES)\n session.install(\"sphinx\")\n session.install(\".\")\n session.run(\"rm\", \"-r\", \"docs/reference\")\n session.run(\n \"sphinx-apidoc\",\n \"--output-dir\",\n \"docs/reference\",\n \"--separate\",\n \"--module-first\",\n \"google\",\n )\n\n\[email protected](python=\"3.7\")\ndef docs(session):\n session.install(\"sphinx\", \"-r\", \"docs/requirements-docs.txt\")\n session.install(\".\")\n session.run(\"make\", \"-C\", \"docs\", \"html\")\n\n\[email protected](python=\"pypy\")\ndef pypy(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n", "path": "noxfile.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nox\n\nTEST_DEPENDENCIES = [\n \"flask\",\n \"freezegun\",\n \"mock\",\n \"oauth2client\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-localserver\",\n \"requests\",\n \"urllib3\",\n \"cryptography\",\n \"responses\",\n \"grpcio\",\n]\n\nASYNC_DEPENDENCIES = [\"pytest-asyncio\", \"aioresponses\", \"asynctest\"]\n\nBLACK_VERSION = \"black==19.3b0\"\nBLACK_PATHS = [\n \"google\",\n \"tests\",\n \"tests_async\",\n \"noxfile.py\",\n \"setup.py\",\n \"docs/conf.py\",\n]\n\n\[email protected](python=\"3.7\")\ndef lint(session):\n session.install(\"flake8\", \"flake8-import-order\", \"docutils\", BLACK_VERSION)\n session.install(\".\")\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n session.run(\n \"flake8\",\n \"--import-order-style=google\",\n \"--application-import-names=google,tests,system_tests\",\n \"google\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\n \"python\", \"setup.py\", \"check\", \"--metadata\", \"--restructuredtext\", \"--strict\"\n )\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n\n\[email protected](python=[\"2.7\", \"3.5\"])\ndef unit_prev_versions(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\", \"--cov=google.auth\", \"--cov=google.oauth2\", \"--cov=tests\", \"tests\"\n )\n\n\[email protected](python=\"3.7\")\ndef cover(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"--cov=tests_async\",\n \"--cov-report=\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n\n\[email protected](python=\"3.7\")\ndef docgen(session):\n session.env[\"SPHINX_APIDOC_OPTIONS\"] = \"members,inherited-members,show-inheritance\"\n session.install(*TEST_DEPENDENCIES)\n session.install(\"sphinx\")\n session.install(\".\")\n session.run(\"rm\", \"-r\", \"docs/reference\")\n session.run(\n \"sphinx-apidoc\",\n \"--output-dir\",\n \"docs/reference\",\n \"--separate\",\n \"--module-first\",\n \"google\",\n )\n\n\[email protected](python=\"3.7\")\ndef docs(session):\n session.install(\"sphinx\", \"-r\", \"docs/requirements-docs.txt\")\n session.install(\".\")\n session.run(\"make\", \"-C\", \"docs\", \"html\")\n\n\[email protected](python=\"pypy\")\ndef pypy(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*ASYNC_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n", "path": "noxfile.py"}]} | 2,001 | 164 |
gh_patches_debug_25585 | rasdani/github-patches | git_diff | talonhub__community-758 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve phrase history mechanism
instead of this:
https://github.com/knausj85/knausj_talon/blob/3e57e0165257cf07b0e21880d44a91e79cb3ef16/code/history.py#L19-L29
consider something like this:
```py
def on_phrase(j):
global history
words = j.get('text')
if words:
text = ' '.join(words)
history.append(text)
history = history[-setting_command_history_size.get() :]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `code/history.py`
Content:
```
1 from talon import imgui, Module, speech_system, actions, app
2
3 # We keep command_history_size lines of history, but by default display only
4 # command_history_display of them.
5 mod = Module()
6 setting_command_history_size = mod.setting("command_history_size", int, default=50)
7 setting_command_history_display = mod.setting(
8 "command_history_display", int, default=10
9 )
10
11 hist_more = False
12 history = []
13
14
15 def parse_phrase(word_list):
16 return " ".join(word.split("\\")[0] for word in word_list)
17
18
19 def on_phrase(j):
20 global history
21
22 try:
23 val = parse_phrase(getattr(j["parsed"], "_unmapped", j["phrase"]))
24 except:
25 val = parse_phrase(j["phrase"])
26
27 if val != "":
28 history.append(val)
29 history = history[-setting_command_history_size.get() :]
30
31
32 # todo: dynamic rect?
33 @imgui.open(y=0)
34 def gui(gui: imgui.GUI):
35 global history
36 gui.text("Command History")
37 gui.line()
38 text = (
39 history[:] if hist_more else history[-setting_command_history_display.get() :]
40 )
41 for line in text:
42 gui.text(line)
43
44 gui.spacer()
45 if gui.button("Command history close"):
46 actions.user.history_disable()
47
48
49 speech_system.register("phrase", on_phrase)
50
51
52 @mod.action_class
53 class Actions:
54 def history_toggle():
55 """Toggles viewing the history"""
56 if gui.showing:
57 gui.hide()
58 else:
59 gui.show()
60
61 def history_enable():
62 """Enables the history"""
63 gui.show()
64
65 def history_disable():
66 """Disables the history"""
67 gui.hide()
68
69 def history_clear():
70 """Clear the history"""
71 global history
72 history = []
73
74 def history_more():
75 """Show more history"""
76 global hist_more
77 hist_more = True
78
79 def history_less():
80 """Show less history"""
81 global hist_more
82 hist_more = False
83
84 def history_get(number: int):
85 """returns the history entry at the specified index"""
86 num = (0 - number) - 1
87 return history[num]
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/code/history.py b/code/history.py
--- a/code/history.py
+++ b/code/history.py
@@ -1,3 +1,4 @@
+from typing import Optional
from talon import imgui, Module, speech_system, actions, app
# We keep command_history_size lines of history, but by default display only
@@ -12,20 +13,15 @@
history = []
-def parse_phrase(word_list):
- return " ".join(word.split("\\")[0] for word in word_list)
-
-
def on_phrase(j):
global history
- try:
- val = parse_phrase(getattr(j["parsed"], "_unmapped", j["phrase"]))
- except:
- val = parse_phrase(j["phrase"])
+ words = j.get('text')
+
+ text = actions.user.history_transform_phrase_text(words)
- if val != "":
- history.append(val)
+ if text is not None:
+ history.append(text)
history = history[-setting_command_history_size.get() :]
@@ -85,3 +81,11 @@
"""returns the history entry at the specified index"""
num = (0 - number) - 1
return history[num]
+
+ def history_transform_phrase_text(words: list[str]) -> Optional[str]:
+ """Transforms phrase text for presentation in history. Return `None` to omit from history"""
+
+ if not actions.speech.enabled():
+ return None
+
+ return ' '.join(words) if words else None
\ No newline at end of file
| {"golden_diff": "diff --git a/code/history.py b/code/history.py\n--- a/code/history.py\n+++ b/code/history.py\n@@ -1,3 +1,4 @@\n+from typing import Optional\n from talon import imgui, Module, speech_system, actions, app\n \n # We keep command_history_size lines of history, but by default display only\n@@ -12,20 +13,15 @@\n history = []\n \n \n-def parse_phrase(word_list):\n- return \" \".join(word.split(\"\\\\\")[0] for word in word_list)\n-\n-\n def on_phrase(j):\n global history\n \n- try:\n- val = parse_phrase(getattr(j[\"parsed\"], \"_unmapped\", j[\"phrase\"]))\n- except:\n- val = parse_phrase(j[\"phrase\"])\n+ words = j.get('text')\n+\n+ text = actions.user.history_transform_phrase_text(words)\n \n- if val != \"\":\n- history.append(val)\n+ if text is not None:\n+ history.append(text)\n history = history[-setting_command_history_size.get() :]\n \n \n@@ -85,3 +81,11 @@\n \"\"\"returns the history entry at the specified index\"\"\"\n num = (0 - number) - 1\n return history[num]\n+\n+ def history_transform_phrase_text(words: list[str]) -> Optional[str]:\n+ \"\"\"Transforms phrase text for presentation in history. Return `None` to omit from history\"\"\"\n+\n+ if not actions.speech.enabled():\n+ return None\n+\n+ return ' '.join(words) if words else None\n\\ No newline at end of file\n", "issue": "Improve phrase history mechanism\ninstead of this:\r\n\r\nhttps://github.com/knausj85/knausj_talon/blob/3e57e0165257cf07b0e21880d44a91e79cb3ef16/code/history.py#L19-L29\r\n\r\nconsider something like this:\r\n\r\n```py\r\ndef on_phrase(j):\r\n global history\r\n words = j.get('text')\r\n if words:\r\n text = ' '.join(words)\r\n history.append(text)\r\n history = history[-setting_command_history_size.get() :]\r\n```\n", "before_files": [{"content": "from talon import imgui, Module, speech_system, actions, app\n\n# We keep command_history_size lines of history, but by default display only\n# command_history_display of them.\nmod = Module()\nsetting_command_history_size = mod.setting(\"command_history_size\", int, default=50)\nsetting_command_history_display = mod.setting(\n \"command_history_display\", int, default=10\n)\n\nhist_more = False\nhistory = []\n\n\ndef parse_phrase(word_list):\n return \" \".join(word.split(\"\\\\\")[0] for word in word_list)\n\n\ndef on_phrase(j):\n global history\n\n try:\n val = parse_phrase(getattr(j[\"parsed\"], \"_unmapped\", j[\"phrase\"]))\n except:\n val = parse_phrase(j[\"phrase\"])\n\n if val != \"\":\n history.append(val)\n history = history[-setting_command_history_size.get() :]\n\n\n# todo: dynamic rect?\[email protected](y=0)\ndef gui(gui: imgui.GUI):\n global history\n gui.text(\"Command History\")\n gui.line()\n text = (\n history[:] if hist_more else history[-setting_command_history_display.get() :]\n )\n for line in text:\n gui.text(line)\n\n gui.spacer()\n if gui.button(\"Command history close\"):\n actions.user.history_disable()\n\n\nspeech_system.register(\"phrase\", on_phrase)\n\n\[email protected]_class\nclass Actions:\n def history_toggle():\n \"\"\"Toggles viewing the history\"\"\"\n if gui.showing:\n gui.hide()\n else:\n gui.show()\n\n def history_enable():\n \"\"\"Enables the history\"\"\"\n gui.show()\n\n def history_disable():\n \"\"\"Disables the history\"\"\"\n gui.hide()\n\n def history_clear():\n \"\"\"Clear the history\"\"\"\n global history\n history = []\n\n def history_more():\n \"\"\"Show more history\"\"\"\n global hist_more\n hist_more = True\n\n def history_less():\n \"\"\"Show less history\"\"\"\n global hist_more\n hist_more = False\n\n def history_get(number: int):\n \"\"\"returns the history entry at the specified index\"\"\"\n num = (0 - number) - 1\n return history[num]\n", "path": "code/history.py"}], "after_files": [{"content": "from typing import Optional\nfrom talon import imgui, Module, speech_system, actions, app\n\n# We keep command_history_size lines of history, but by default display only\n# command_history_display of them.\nmod = Module()\nsetting_command_history_size = mod.setting(\"command_history_size\", int, default=50)\nsetting_command_history_display = mod.setting(\n \"command_history_display\", int, default=10\n)\n\nhist_more = False\nhistory = []\n\n\ndef on_phrase(j):\n global history\n\n words = j.get('text')\n\n text = actions.user.history_transform_phrase_text(words)\n\n if text is not None:\n history.append(text)\n history = history[-setting_command_history_size.get() :]\n\n\n# todo: dynamic rect?\[email protected](y=0)\ndef gui(gui: imgui.GUI):\n global history\n gui.text(\"Command History\")\n gui.line()\n text = (\n history[:] if hist_more else history[-setting_command_history_display.get() :]\n )\n for line in text:\n gui.text(line)\n\n gui.spacer()\n if gui.button(\"Command history close\"):\n actions.user.history_disable()\n\n\nspeech_system.register(\"phrase\", on_phrase)\n\n\[email protected]_class\nclass Actions:\n def history_toggle():\n \"\"\"Toggles viewing the history\"\"\"\n if gui.showing:\n gui.hide()\n else:\n gui.show()\n\n def history_enable():\n \"\"\"Enables the history\"\"\"\n gui.show()\n\n def history_disable():\n \"\"\"Disables the history\"\"\"\n gui.hide()\n\n def history_clear():\n \"\"\"Clear the history\"\"\"\n global history\n history = []\n\n def history_more():\n \"\"\"Show more history\"\"\"\n global hist_more\n hist_more = True\n\n def history_less():\n \"\"\"Show less history\"\"\"\n global hist_more\n hist_more = False\n\n def history_get(number: int):\n \"\"\"returns the history entry at the specified index\"\"\"\n num = (0 - number) - 1\n return history[num]\n\n def history_transform_phrase_text(words: list[str]) -> Optional[str]:\n \"\"\"Transforms phrase text for presentation in history. Return `None` to omit from history\"\"\"\n\n if not actions.speech.enabled():\n return None\n\n return ' '.join(words) if words else None", "path": "code/history.py"}]} | 1,026 | 341 |
gh_patches_debug_25169 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-617 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement adding a new empty table
## Problem
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
Users may want to create an empty table.
## Proposed solution
<!-- A clear and concise description of your proposed solution or feature. -->
The ["Basic Table Operations" design spec](https://wiki.mathesar.org/design/specs/table-operations) has a solution for this, which we need to implement on the frontend. We will be implementing a different design than the one in the spec, which will need to be improvised.
This issue involves the following portions of the spec:
- _User Experience_:
- User adds a new table
- User edits a new table name
- User adds an empty table
- _Interactions_:
- Sequential Table Names
Please note that we'll need to generate and display a default name of the table that the user can use. Following the logic in #449 is recommended.
## Additional context
<!-- Add any other context or screenshots about the feature request here.-->
- Backend work: #184
- Design issue: #185
- #449 is related.
Logs of conversation on Matrix from @pavish:
> For adding an empty table, instead of showing Empty table option within the tab, I think it might be better to show it on the top.
>
> The Add new table button can be changed to a dropdown with two options. 1. Empty table, 2. Import data.
>
> On clicking empty table, it would just send a create table request and create the table. Then the user can rename it if they want.
>
> This will have clean separation of concerns. Because Import data, involves importing from csv, remote file import and then copy/paste from spreadsheet..
>
> Empty table, would just create a new empty table.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/viewsets/tables.py`
Content:
```
1 from django_filters import rest_framework as filters
2 from psycopg2.errors import CheckViolation, DuplicateTable, InvalidTextRepresentation
3 from rest_framework import status, viewsets
4 from rest_framework.decorators import action
5 from rest_framework.exceptions import ValidationError, APIException
6 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
7 from rest_framework.response import Response
8 from sqlalchemy.exc import ProgrammingError, DataError, IntegrityError
9
10 from db.types.alteration import UnsupportedTypeException
11 from mathesar.api.filters import TableFilter
12 from mathesar.api.pagination import DefaultLimitOffsetPagination
13 from mathesar.api.serializers.tables import TableSerializer, TablePreviewSerializer
14 from mathesar.models import Table
15 from mathesar.utils.tables import (
16 get_table_column_types, create_table_from_datafile, create_empty_table,
17 gen_table_name
18 )
19
20
21 class TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):
22 serializer_class = TableSerializer
23 pagination_class = DefaultLimitOffsetPagination
24 filter_backends = (filters.DjangoFilterBackend,)
25 filterset_class = TableFilter
26
27 def get_queryset(self):
28 return Table.objects.all().order_by('-created_at')
29
30 def create(self, request):
31 serializer = TableSerializer(data=request.data, context={'request': request})
32 serializer.is_valid(raise_exception=True)
33
34 if not serializer.validated_data['name']:
35 name = gen_table_name(
36 serializer.validated_data['schema'],
37 serializer.validated_data['data_files'],
38 )
39 else:
40 name = serializer.validated_data['name']
41
42 try:
43 if serializer.validated_data['data_files']:
44 table = create_table_from_datafile(
45 serializer.validated_data['data_files'],
46 name,
47 serializer.validated_data['schema'],
48 )
49 else:
50 table = create_empty_table(
51 name,
52 serializer.validated_data['schema']
53 )
54 except ProgrammingError as e:
55 if type(e.orig) == DuplicateTable:
56 raise ValidationError(
57 f"Relation {request.data['name']} already exists in schema {request.data['schema']}"
58 )
59 else:
60 raise APIException(e)
61
62 serializer = TableSerializer(table, context={'request': request})
63 return Response(serializer.data, status=status.HTTP_201_CREATED)
64
65 def partial_update(self, request, pk=None):
66 serializer = TableSerializer(
67 data=request.data, context={'request': request}, partial=True
68 )
69 serializer.is_valid(raise_exception=True)
70 table = self.get_object()
71
72 # Save the fields that are stored in the model.
73 present_model_fields = []
74 for model_field in table.MODEL_FIELDS:
75 if model_field in serializer.validated_data:
76 setattr(table, model_field, serializer.validated_data[model_field])
77 present_model_fields.append(model_field)
78 table.save(update_fields=present_model_fields)
79 for key in present_model_fields:
80 del serializer.validated_data[key]
81
82 # Save the fields that are stored in the underlying DB.
83 try:
84 table.update_sa_table(serializer.validated_data)
85 except ValueError as e:
86 raise ValidationError(e)
87
88 # Reload the table to avoid cached properties
89 table = self.get_object()
90 serializer = TableSerializer(table, context={'request': request})
91 return Response(serializer.data)
92
93 def destroy(self, request, pk=None):
94 table = self.get_object()
95 table.delete_sa_table()
96 table.delete()
97 return Response(status=status.HTTP_204_NO_CONTENT)
98
99 @action(methods=['get'], detail=True)
100 def type_suggestions(self, request, pk=None):
101 table = self.get_object()
102 col_types = get_table_column_types(table)
103 return Response(col_types)
104
105 @action(methods=['post'], detail=True)
106 def previews(self, request, pk=None):
107 table = self.get_object()
108 serializer = TablePreviewSerializer(data=request.data)
109 serializer.is_valid(raise_exception=True)
110 columns = serializer.data["columns"]
111
112 column_names = [col["name"] for col in columns]
113 if not len(column_names) == len(set(column_names)):
114 raise ValidationError("Column names must be distinct")
115 if not len(columns) == len(table.sa_columns):
116 raise ValidationError("Incorrect number of columns in request.")
117
118 table_data = TableSerializer(table, context={"request": request}).data
119 try:
120 preview_records = table.get_preview(columns)
121 except (DataError, IntegrityError) as e:
122 if type(e.orig) == InvalidTextRepresentation or type(e.orig) == CheckViolation:
123 raise ValidationError("Invalid type cast requested.")
124 else:
125 raise APIException
126 except UnsupportedTypeException as e:
127 raise ValidationError(e)
128 except Exception as e:
129 raise APIException(e)
130
131 table_data.update(
132 {
133 # There's no way to reflect actual column data without
134 # creating a view, so we just use the submission, assuming
135 # no errors means we changed to the desired names and types
136 "columns": columns,
137 "records": preview_records
138 }
139 )
140
141 return Response(table_data)
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/viewsets/tables.py b/mathesar/api/viewsets/tables.py
--- a/mathesar/api/viewsets/tables.py
+++ b/mathesar/api/viewsets/tables.py
@@ -31,26 +31,15 @@
serializer = TableSerializer(data=request.data, context={'request': request})
serializer.is_valid(raise_exception=True)
- if not serializer.validated_data['name']:
- name = gen_table_name(
- serializer.validated_data['schema'],
- serializer.validated_data['data_files'],
- )
- else:
- name = serializer.validated_data['name']
+ schema = serializer.validated_data['schema']
+ data_files = serializer.validated_data.get('data_files')
+ name = serializer.validated_data.get('name') or gen_table_name(schema, data_files)
try:
- if serializer.validated_data['data_files']:
- table = create_table_from_datafile(
- serializer.validated_data['data_files'],
- name,
- serializer.validated_data['schema'],
- )
+ if data_files:
+ table = create_table_from_datafile(data_files, name, schema)
else:
- table = create_empty_table(
- name,
- serializer.validated_data['schema']
- )
+ table = create_empty_table(name, schema)
except ProgrammingError as e:
if type(e.orig) == DuplicateTable:
raise ValidationError(
| {"golden_diff": "diff --git a/mathesar/api/viewsets/tables.py b/mathesar/api/viewsets/tables.py\n--- a/mathesar/api/viewsets/tables.py\n+++ b/mathesar/api/viewsets/tables.py\n@@ -31,26 +31,15 @@\n serializer = TableSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n \n- if not serializer.validated_data['name']:\n- name = gen_table_name(\n- serializer.validated_data['schema'],\n- serializer.validated_data['data_files'],\n- )\n- else:\n- name = serializer.validated_data['name']\n+ schema = serializer.validated_data['schema']\n+ data_files = serializer.validated_data.get('data_files')\n+ name = serializer.validated_data.get('name') or gen_table_name(schema, data_files)\n \n try:\n- if serializer.validated_data['data_files']:\n- table = create_table_from_datafile(\n- serializer.validated_data['data_files'],\n- name,\n- serializer.validated_data['schema'],\n- )\n+ if data_files:\n+ table = create_table_from_datafile(data_files, name, schema)\n else:\n- table = create_empty_table(\n- name,\n- serializer.validated_data['schema']\n- )\n+ table = create_empty_table(name, schema)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateTable:\n raise ValidationError(\n", "issue": "Implement adding a new empty table\n## Problem\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nUsers may want to create an empty table.\r\n\r\n## Proposed solution\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nThe [\"Basic Table Operations\" design spec](https://wiki.mathesar.org/design/specs/table-operations) has a solution for this, which we need to implement on the frontend. We will be implementing a different design than the one in the spec, which will need to be improvised.\r\n\r\nThis issue involves the following portions of the spec:\r\n- _User Experience_: \r\n - User adds a new table\r\n - User edits a new table name\r\n - User adds an empty table\r\n- _Interactions_: \r\n - Sequential Table Names\r\n \r\nPlease note that we'll need to generate and display a default name of the table that the user can use. Following the logic in #449 is recommended.\r\n\r\n## Additional context\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n- Backend work: #184 \r\n- Design issue: #185\r\n- #449 is related.\r\n\r\nLogs of conversation on Matrix from @pavish:\r\n\r\n> For adding an empty table, instead of showing Empty table option within the tab, I think it might be better to show it on the top.\r\n> \r\n> The Add new table button can be changed to a dropdown with two options. 1. Empty table, 2. Import data.\r\n> \r\n> On clicking empty table, it would just send a create table request and create the table. Then the user can rename it if they want.\r\n> \r\n> This will have clean separation of concerns. Because Import data, involves importing from csv, remote file import and then copy/paste from spreadsheet..\r\n> \r\n> Empty table, would just create a new empty table.\n", "before_files": [{"content": "from django_filters import rest_framework as filters\nfrom psycopg2.errors import CheckViolation, DuplicateTable, InvalidTextRepresentation\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError, APIException\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\nfrom sqlalchemy.exc import ProgrammingError, DataError, IntegrityError\n\nfrom db.types.alteration import UnsupportedTypeException\nfrom mathesar.api.filters import TableFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.tables import TableSerializer, TablePreviewSerializer\nfrom mathesar.models import Table\nfrom mathesar.utils.tables import (\n get_table_column_types, create_table_from_datafile, create_empty_table,\n gen_table_name\n)\n\n\nclass TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n serializer_class = TableSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = TableFilter\n\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n def create(self, request):\n serializer = TableSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n\n if not serializer.validated_data['name']:\n name = gen_table_name(\n serializer.validated_data['schema'],\n serializer.validated_data['data_files'],\n )\n else:\n name = serializer.validated_data['name']\n\n try:\n if serializer.validated_data['data_files']:\n table = create_table_from_datafile(\n serializer.validated_data['data_files'],\n name,\n serializer.validated_data['schema'],\n )\n else:\n table = create_empty_table(\n name,\n serializer.validated_data['schema']\n )\n except ProgrammingError as e:\n if type(e.orig) == DuplicateTable:\n raise ValidationError(\n f\"Relation {request.data['name']} already exists in schema {request.data['schema']}\"\n )\n else:\n raise APIException(e)\n\n serializer = TableSerializer(table, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None):\n serializer = TableSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n table = self.get_object()\n\n # Save the fields that are stored in the model.\n present_model_fields = []\n for model_field in table.MODEL_FIELDS:\n if model_field in serializer.validated_data:\n setattr(table, model_field, serializer.validated_data[model_field])\n present_model_fields.append(model_field)\n table.save(update_fields=present_model_fields)\n for key in present_model_fields:\n del serializer.validated_data[key]\n\n # Save the fields that are stored in the underlying DB.\n try:\n table.update_sa_table(serializer.validated_data)\n except ValueError as e:\n raise ValidationError(e)\n\n # Reload the table to avoid cached properties\n table = self.get_object()\n serializer = TableSerializer(table, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n table = self.get_object()\n table.delete_sa_table()\n table.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def type_suggestions(self, request, pk=None):\n table = self.get_object()\n col_types = get_table_column_types(table)\n return Response(col_types)\n\n @action(methods=['post'], detail=True)\n def previews(self, request, pk=None):\n table = self.get_object()\n serializer = TablePreviewSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n columns = serializer.data[\"columns\"]\n\n column_names = [col[\"name\"] for col in columns]\n if not len(column_names) == len(set(column_names)):\n raise ValidationError(\"Column names must be distinct\")\n if not len(columns) == len(table.sa_columns):\n raise ValidationError(\"Incorrect number of columns in request.\")\n\n table_data = TableSerializer(table, context={\"request\": request}).data\n try:\n preview_records = table.get_preview(columns)\n except (DataError, IntegrityError) as e:\n if type(e.orig) == InvalidTextRepresentation or type(e.orig) == CheckViolation:\n raise ValidationError(\"Invalid type cast requested.\")\n else:\n raise APIException\n except UnsupportedTypeException as e:\n raise ValidationError(e)\n except Exception as e:\n raise APIException(e)\n\n table_data.update(\n {\n # There's no way to reflect actual column data without\n # creating a view, so we just use the submission, assuming\n # no errors means we changed to the desired names and types\n \"columns\": columns,\n \"records\": preview_records\n }\n )\n\n return Response(table_data)\n", "path": "mathesar/api/viewsets/tables.py"}], "after_files": [{"content": "from django_filters import rest_framework as filters\nfrom psycopg2.errors import CheckViolation, DuplicateTable, InvalidTextRepresentation\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError, APIException\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\nfrom sqlalchemy.exc import ProgrammingError, DataError, IntegrityError\n\nfrom db.types.alteration import UnsupportedTypeException\nfrom mathesar.api.filters import TableFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.tables import TableSerializer, TablePreviewSerializer\nfrom mathesar.models import Table\nfrom mathesar.utils.tables import (\n get_table_column_types, create_table_from_datafile, create_empty_table,\n gen_table_name\n)\n\n\nclass TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n serializer_class = TableSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = TableFilter\n\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n def create(self, request):\n serializer = TableSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n\n schema = serializer.validated_data['schema']\n data_files = serializer.validated_data.get('data_files')\n name = serializer.validated_data.get('name') or gen_table_name(schema, data_files)\n\n try:\n if data_files:\n table = create_table_from_datafile(data_files, name, schema)\n else:\n table = create_empty_table(name, schema)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateTable:\n raise ValidationError(\n f\"Relation {request.data['name']} already exists in schema {request.data['schema']}\"\n )\n else:\n raise APIException(e)\n\n serializer = TableSerializer(table, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None):\n serializer = TableSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n table = self.get_object()\n\n # Save the fields that are stored in the model.\n present_model_fields = []\n for model_field in table.MODEL_FIELDS:\n if model_field in serializer.validated_data:\n setattr(table, model_field, serializer.validated_data[model_field])\n present_model_fields.append(model_field)\n table.save(update_fields=present_model_fields)\n for key in present_model_fields:\n del serializer.validated_data[key]\n\n # Save the fields that are stored in the underlying DB.\n try:\n table.update_sa_table(serializer.validated_data)\n except ValueError as e:\n raise ValidationError(e)\n\n # Reload the table to avoid cached properties\n table = self.get_object()\n serializer = TableSerializer(table, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n table = self.get_object()\n table.delete_sa_table()\n table.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def type_suggestions(self, request, pk=None):\n table = self.get_object()\n col_types = get_table_column_types(table)\n return Response(col_types)\n\n @action(methods=['post'], detail=True)\n def previews(self, request, pk=None):\n table = self.get_object()\n serializer = TablePreviewSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n columns = serializer.data[\"columns\"]\n\n column_names = [col[\"name\"] for col in columns]\n if not len(column_names) == len(set(column_names)):\n raise ValidationError(\"Column names must be distinct\")\n if not len(columns) == len(table.sa_columns):\n raise ValidationError(\"Incorrect number of columns in request.\")\n\n table_data = TableSerializer(table, context={\"request\": request}).data\n try:\n preview_records = table.get_preview(columns)\n except (DataError, IntegrityError) as e:\n if type(e.orig) == InvalidTextRepresentation or type(e.orig) == CheckViolation:\n raise ValidationError(\"Invalid type cast requested.\")\n else:\n raise APIException\n except UnsupportedTypeException as e:\n raise ValidationError(e)\n except Exception as e:\n raise APIException(e)\n\n table_data.update(\n {\n # There's no way to reflect actual column data without\n # creating a view, so we just use the submission, assuming\n # no errors means we changed to the desired names and types\n \"columns\": columns,\n \"records\": preview_records\n }\n )\n\n return Response(table_data)\n", "path": "mathesar/api/viewsets/tables.py"}]} | 2,039 | 317 |
gh_patches_debug_54590 | rasdani/github-patches | git_diff | zulip__zulip-20491 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove "Send a reply" new user tip
After implementing #19900, there are two places where new users are told how to reply to a message: in the Welcome Bot text and in the "Send a reply" new user tip immediately below.
To simplify and avoid redundancy, we should remove the "Send a reply" new user tip.
<img width="909" alt="Screen_Shot_2021-12-06_at_10_08_14_AM" src="https://user-images.githubusercontent.com/2090066/144938995-080268ce-510d-4b76-b3c1-b691fbb814f4.png">
[CZO thread](https://chat.zulip.org/#narrow/stream/101-design/topic/.22click.20to.20reply.22.20whale)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/hotspots.py`
Content:
```
1 # See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html
2 # for documentation on this subsystem.
3 from typing import Dict, List
4
5 from django.conf import settings
6 from django.utils.functional import Promise
7 from django.utils.translation import gettext_lazy
8
9 from zerver.models import UserHotspot, UserProfile
10
11 INTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {
12 "intro_reply": {
13 "title": gettext_lazy("Reply to a message"),
14 "description": gettext_lazy("Click anywhere on a message to reply."),
15 },
16 "intro_streams": {
17 "title": gettext_lazy("Catch up on a stream"),
18 "description": gettext_lazy(
19 "Messages sent to a stream are seen by everyone subscribed "
20 "to that stream. Try clicking on one of the stream links below."
21 ),
22 },
23 "intro_topics": {
24 "title": gettext_lazy("Topics"),
25 "description": gettext_lazy(
26 "Every message has a topic. Topics keep conversations "
27 "easy to follow, and make it easy to reply to conversations that start "
28 "while you are offline."
29 ),
30 },
31 "intro_gear": {
32 "title": gettext_lazy("Settings"),
33 "description": gettext_lazy(
34 "Go to Settings to configure your notifications and display settings."
35 ),
36 },
37 "intro_compose": {
38 "title": gettext_lazy("Compose"),
39 "description": gettext_lazy(
40 "Click here to start a new conversation. Pick a topic "
41 "(2-3 words is best), and give it a go!"
42 ),
43 },
44 }
45
46 # We would most likely implement new hotspots in the future that aren't
47 # a part of the initial tutorial. To that end, classifying them into
48 # categories which are aggregated in ALL_HOTSPOTS, seems like a good start.
49 ALL_HOTSPOTS: Dict[str, Dict[str, Promise]] = {
50 **INTRO_HOTSPOTS,
51 }
52
53
54 def get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:
55 # For manual testing, it can be convenient to set
56 # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to
57 # make it easy to click on all of the hotspots. Note that
58 # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link
59 # above) for details.
60 #
61 # Since this is just for development purposes, it's convenient for us to send
62 # all the hotspots rather than any specific category.
63 if settings.ALWAYS_SEND_ALL_HOTSPOTS:
64 return [
65 {
66 "name": hotspot,
67 "title": str(ALL_HOTSPOTS[hotspot]["title"]),
68 "description": str(ALL_HOTSPOTS[hotspot]["description"]),
69 "delay": 0,
70 }
71 for hotspot in ALL_HOTSPOTS
72 ]
73
74 # If a Zulip server has disabled the tutorial, never send hotspots.
75 if not settings.TUTORIAL_ENABLED:
76 return []
77
78 if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:
79 return []
80
81 seen_hotspots = frozenset(
82 UserHotspot.objects.filter(user=user).values_list("hotspot", flat=True)
83 )
84 for hotspot in INTRO_HOTSPOTS.keys():
85 if hotspot not in seen_hotspots:
86 return [
87 {
88 "name": hotspot,
89 "title": str(INTRO_HOTSPOTS[hotspot]["title"]),
90 "description": str(INTRO_HOTSPOTS[hotspot]["description"]),
91 "delay": 0.5,
92 }
93 ]
94
95 user.tutorial_status = UserProfile.TUTORIAL_FINISHED
96 user.save(update_fields=["tutorial_status"])
97 return []
98
99
100 def copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:
101 for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):
102 UserHotspot.objects.create(
103 user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp
104 )
105
106 target_profile.tutorial_status = source_profile.tutorial_status
107 target_profile.onboarding_steps = source_profile.onboarding_steps
108 target_profile.save(update_fields=["tutorial_status", "onboarding_steps"])
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py
--- a/zerver/lib/hotspots.py
+++ b/zerver/lib/hotspots.py
@@ -9,10 +9,6 @@
from zerver.models import UserHotspot, UserProfile
INTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {
- "intro_reply": {
- "title": gettext_lazy("Reply to a message"),
- "description": gettext_lazy("Click anywhere on a message to reply."),
- },
"intro_streams": {
"title": gettext_lazy("Catch up on a stream"),
"description": gettext_lazy(
| {"golden_diff": "diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py\n--- a/zerver/lib/hotspots.py\n+++ b/zerver/lib/hotspots.py\n@@ -9,10 +9,6 @@\n from zerver.models import UserHotspot, UserProfile\n \n INTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n- \"intro_reply\": {\n- \"title\": gettext_lazy(\"Reply to a message\"),\n- \"description\": gettext_lazy(\"Click anywhere on a message to reply.\"),\n- },\n \"intro_streams\": {\n \"title\": gettext_lazy(\"Catch up on a stream\"),\n \"description\": gettext_lazy(\n", "issue": "Remove \"Send a reply\" new user tip\nAfter implementing #19900, there are two places where new users are told how to reply to a message: in the Welcome Bot text and in the \"Send a reply\" new user tip immediately below.\r\n\r\nTo simplify and avoid redundancy, we should remove the \"Send a reply\" new user tip.\r\n\r\n<img width=\"909\" alt=\"Screen_Shot_2021-12-06_at_10_08_14_AM\" src=\"https://user-images.githubusercontent.com/2090066/144938995-080268ce-510d-4b76-b3c1-b691fbb814f4.png\">\r\n\r\n[CZO thread](https://chat.zulip.org/#narrow/stream/101-design/topic/.22click.20to.20reply.22.20whale)\n", "before_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom typing import Dict, List\n\nfrom django.conf import settings\nfrom django.utils.functional import Promise\nfrom django.utils.translation import gettext_lazy\n\nfrom zerver.models import UserHotspot, UserProfile\n\nINTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n \"intro_reply\": {\n \"title\": gettext_lazy(\"Reply to a message\"),\n \"description\": gettext_lazy(\"Click anywhere on a message to reply.\"),\n },\n \"intro_streams\": {\n \"title\": gettext_lazy(\"Catch up on a stream\"),\n \"description\": gettext_lazy(\n \"Messages sent to a stream are seen by everyone subscribed \"\n \"to that stream. Try clicking on one of the stream links below.\"\n ),\n },\n \"intro_topics\": {\n \"title\": gettext_lazy(\"Topics\"),\n \"description\": gettext_lazy(\n \"Every message has a topic. Topics keep conversations \"\n \"easy to follow, and make it easy to reply to conversations that start \"\n \"while you are offline.\"\n ),\n },\n \"intro_gear\": {\n \"title\": gettext_lazy(\"Settings\"),\n \"description\": gettext_lazy(\n \"Go to Settings to configure your notifications and display settings.\"\n ),\n },\n \"intro_compose\": {\n \"title\": gettext_lazy(\"Compose\"),\n \"description\": gettext_lazy(\n \"Click here to start a new conversation. Pick a topic \"\n \"(2-3 words is best), and give it a go!\"\n ),\n },\n}\n\n# We would most likely implement new hotspots in the future that aren't\n# a part of the initial tutorial. To that end, classifying them into\n# categories which are aggregated in ALL_HOTSPOTS, seems like a good start.\nALL_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n **INTRO_HOTSPOTS,\n}\n\n\ndef get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots. Note that\n # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link\n # above) for details.\n #\n # Since this is just for development purposes, it's convenient for us to send\n # all the hotspots rather than any specific category.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [\n {\n \"name\": hotspot,\n \"title\": str(ALL_HOTSPOTS[hotspot][\"title\"]),\n \"description\": str(ALL_HOTSPOTS[hotspot][\"description\"]),\n \"delay\": 0,\n }\n for hotspot in ALL_HOTSPOTS\n ]\n\n # If a Zulip server has disabled the tutorial, never send hotspots.\n if not settings.TUTORIAL_ENABLED:\n return []\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return []\n\n seen_hotspots = frozenset(\n UserHotspot.objects.filter(user=user).values_list(\"hotspot\", flat=True)\n )\n for hotspot in INTRO_HOTSPOTS.keys():\n if hotspot not in seen_hotspots:\n return [\n {\n \"name\": hotspot,\n \"title\": str(INTRO_HOTSPOTS[hotspot][\"title\"]),\n \"description\": str(INTRO_HOTSPOTS[hotspot][\"description\"]),\n \"delay\": 0.5,\n }\n ]\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=[\"tutorial_status\"])\n return []\n\n\ndef copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):\n UserHotspot.objects.create(\n user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp\n )\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=[\"tutorial_status\", \"onboarding_steps\"])\n", "path": "zerver/lib/hotspots.py"}], "after_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom typing import Dict, List\n\nfrom django.conf import settings\nfrom django.utils.functional import Promise\nfrom django.utils.translation import gettext_lazy\n\nfrom zerver.models import UserHotspot, UserProfile\n\nINTRO_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n \"intro_streams\": {\n \"title\": gettext_lazy(\"Catch up on a stream\"),\n \"description\": gettext_lazy(\n \"Messages sent to a stream are seen by everyone subscribed \"\n \"to that stream. Try clicking on one of the stream links below.\"\n ),\n },\n \"intro_topics\": {\n \"title\": gettext_lazy(\"Topics\"),\n \"description\": gettext_lazy(\n \"Every message has a topic. Topics keep conversations \"\n \"easy to follow, and make it easy to reply to conversations that start \"\n \"while you are offline.\"\n ),\n },\n \"intro_gear\": {\n \"title\": gettext_lazy(\"Settings\"),\n \"description\": gettext_lazy(\n \"Go to Settings to configure your notifications and display settings.\"\n ),\n },\n \"intro_compose\": {\n \"title\": gettext_lazy(\"Compose\"),\n \"description\": gettext_lazy(\n \"Click here to start a new conversation. Pick a topic \"\n \"(2-3 words is best), and give it a go!\"\n ),\n },\n}\n\n# We would most likely implement new hotspots in the future that aren't\n# a part of the initial tutorial. To that end, classifying them into\n# categories which are aggregated in ALL_HOTSPOTS, seems like a good start.\nALL_HOTSPOTS: Dict[str, Dict[str, Promise]] = {\n **INTRO_HOTSPOTS,\n}\n\n\ndef get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots. Note that\n # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link\n # above) for details.\n #\n # Since this is just for development purposes, it's convenient for us to send\n # all the hotspots rather than any specific category.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [\n {\n \"name\": hotspot,\n \"title\": str(ALL_HOTSPOTS[hotspot][\"title\"]),\n \"description\": str(ALL_HOTSPOTS[hotspot][\"description\"]),\n \"delay\": 0,\n }\n for hotspot in ALL_HOTSPOTS\n ]\n\n # If a Zulip server has disabled the tutorial, never send hotspots.\n if not settings.TUTORIAL_ENABLED:\n return []\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return []\n\n seen_hotspots = frozenset(\n UserHotspot.objects.filter(user=user).values_list(\"hotspot\", flat=True)\n )\n for hotspot in INTRO_HOTSPOTS.keys():\n if hotspot not in seen_hotspots:\n return [\n {\n \"name\": hotspot,\n \"title\": str(INTRO_HOTSPOTS[hotspot][\"title\"]),\n \"description\": str(INTRO_HOTSPOTS[hotspot][\"description\"]),\n \"delay\": 0.5,\n }\n ]\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=[\"tutorial_status\"])\n return []\n\n\ndef copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):\n UserHotspot.objects.create(\n user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp\n )\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=[\"tutorial_status\", \"onboarding_steps\"])\n", "path": "zerver/lib/hotspots.py"}]} | 1,593 | 143 |
gh_patches_debug_5182 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1852 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`atexit.register` does not work
### What I did
```python3
from pwn import *
atexit.register(print, "hello world")
exit()
```
### What I expected to see
```python3 test.py
hello world
```
### What I saw
Nothing
I noticed this because `asm()`, which adds an `atexit` handler to remove the `/tmp/pwn-asm-XXXXXX` folder, does not in fact remove it, meaning multiple script runs leads to many similar folders.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/atexit.py`
Content:
```
1 """
2 Replacement for the Python standard library's atexit.py.
3
4 Whereas the standard :mod:`atexit` module only defines :func:`atexit.register`,
5 this replacement module also defines :func:`unregister`.
6
7 This module also fixes a the issue that exceptions raised by an exit handler is
8 printed twice when the standard :mod:`atexit` is used.
9 """
10 from __future__ import absolute_import
11 from __future__ import division
12
13 import sys
14 import threading
15 import traceback
16
17 from pwnlib.context import context
18
19 __all__ = ['register', 'unregister']
20
21 _lock = threading.Lock()
22 _ident = 0
23 _handlers = {}
24
25 def register(func, *args, **kwargs):
26 """register(func, *args, **kwargs)
27
28 Registers a function to be called on program termination. The function will
29 be called with positional arguments `args` and keyword arguments `kwargs`,
30 i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will
31 be the one used when the handler is run.
32
33 E.g. to suppress logging output from an exit-handler one could write::
34
35 with context.local(log_level = 'error'):
36 atexit.register(handler)
37
38 An identifier is returned which can be used to unregister the exit-handler.
39
40 This function can be used as a decorator::
41
42 @atexit.register
43 def handler():
44 ...
45
46 Notice however that this will bind ``handler`` to the identifier and not the
47 actual exit-handler. The exit-handler can then be unregistered with::
48
49 atexit.unregister(handler)
50
51 This function is thread safe.
52
53 """
54 global _ident
55 with _lock:
56 ident = _ident
57 _ident += 1
58 _handlers[ident] = (func, args, kwargs, vars(context))
59 return ident
60
61 def unregister(ident):
62 """unregister(ident)
63
64 Remove the exit-handler identified by `ident` from the list of registered
65 handlers. If `ident` isn't registered this is a no-op.
66 """
67 if ident in _handlers:
68 del _handlers[ident]
69
70 def _run_handlers():
71 """_run_handlers()
72
73 Run registered exit-handlers. They run in the reverse order of which they
74 were registered.
75
76 If a handler raises an exception, it will be printed but nothing else
77 happens, i.e. other handlers will be run and `sys.excepthook` will not be
78 called for that reason.
79 """
80 context.clear()
81 for _ident, (func, args, kwargs, ctx) in \
82 sorted(_handlers.items(), reverse = True):
83 try:
84 with context.local(**ctx):
85 func(*args, **kwargs)
86 except SystemExit:
87 pass
88 except Exception:
89 # extract the current exception and rewind the traceback to where it
90 # originated
91 typ, val, tb = sys.exc_info()
92 traceback.print_exception(typ, val, tb.tb_next)
93
94 # if there's already an exitfunc registered be sure to run that too
95 if hasattr(sys, "exitfunc"):
96 register(sys.exitfunc)
97
98 sys.exitfunc = _run_handlers
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py
--- a/pwnlib/atexit.py
+++ b/pwnlib/atexit.py
@@ -13,6 +13,7 @@
import sys
import threading
import traceback
+import atexit as std_atexit
from pwnlib.context import context
@@ -95,4 +96,8 @@
if hasattr(sys, "exitfunc"):
register(sys.exitfunc)
-sys.exitfunc = _run_handlers
+if sys.version_info[0] < 3:
+ sys.exitfunc = _run_handlers
+else:
+ std_atexit.register(_run_handlers)
+
| {"golden_diff": "diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py\n--- a/pwnlib/atexit.py\n+++ b/pwnlib/atexit.py\n@@ -13,6 +13,7 @@\n import sys\n import threading\n import traceback\n+import atexit as std_atexit\n \n from pwnlib.context import context\n \n@@ -95,4 +96,8 @@\n if hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n \n-sys.exitfunc = _run_handlers\n+if sys.version_info[0] < 3:\n+ sys.exitfunc = _run_handlers\n+else:\n+ std_atexit.register(_run_handlers)\n+\n", "issue": "`atexit.register` does not work\n### What I did\r\n```python3\r\nfrom pwn import *\r\natexit.register(print, \"hello world\")\r\nexit()\r\n```\r\n### What I expected to see\r\n```python3 test.py\r\nhello world\r\n```\r\n### What I saw\r\nNothing\r\n\r\nI noticed this because `asm()`, which adds an `atexit` handler to remove the `/tmp/pwn-asm-XXXXXX` folder, does not in fact remove it, meaning multiple script runs leads to many similar folders.\n", "before_files": [{"content": "\"\"\"\nReplacement for the Python standard library's atexit.py.\n\nWhereas the standard :mod:`atexit` module only defines :func:`atexit.register`,\nthis replacement module also defines :func:`unregister`.\n\nThis module also fixes a the issue that exceptions raised by an exit handler is\nprinted twice when the standard :mod:`atexit` is used.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sys\nimport threading\nimport traceback\n\nfrom pwnlib.context import context\n\n__all__ = ['register', 'unregister']\n\n_lock = threading.Lock()\n_ident = 0\n_handlers = {}\n\ndef register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n\n Registers a function to be called on program termination. The function will\n be called with positional arguments `args` and keyword arguments `kwargs`,\n i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will\n be the one used when the handler is run.\n\n E.g. to suppress logging output from an exit-handler one could write::\n\n with context.local(log_level = 'error'):\n atexit.register(handler)\n\n An identifier is returned which can be used to unregister the exit-handler.\n\n This function can be used as a decorator::\n\n @atexit.register\n def handler():\n ...\n\n Notice however that this will bind ``handler`` to the identifier and not the\n actual exit-handler. The exit-handler can then be unregistered with::\n\n atexit.unregister(handler)\n\n This function is thread safe.\n\n \"\"\"\n global _ident\n with _lock:\n ident = _ident\n _ident += 1\n _handlers[ident] = (func, args, kwargs, vars(context))\n return ident\n\ndef unregister(ident):\n \"\"\"unregister(ident)\n\n Remove the exit-handler identified by `ident` from the list of registered\n handlers. If `ident` isn't registered this is a no-op.\n \"\"\"\n if ident in _handlers:\n del _handlers[ident]\n\ndef _run_handlers():\n \"\"\"_run_handlers()\n\n Run registered exit-handlers. They run in the reverse order of which they\n were registered.\n\n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n context.clear()\n for _ident, (func, args, kwargs, ctx) in \\\n sorted(_handlers.items(), reverse = True):\n try:\n with context.local(**ctx):\n func(*args, **kwargs)\n except SystemExit:\n pass\n except Exception:\n # extract the current exception and rewind the traceback to where it\n # originated\n typ, val, tb = sys.exc_info()\n traceback.print_exception(typ, val, tb.tb_next)\n\n# if there's already an exitfunc registered be sure to run that too\nif hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n\nsys.exitfunc = _run_handlers\n", "path": "pwnlib/atexit.py"}], "after_files": [{"content": "\"\"\"\nReplacement for the Python standard library's atexit.py.\n\nWhereas the standard :mod:`atexit` module only defines :func:`atexit.register`,\nthis replacement module also defines :func:`unregister`.\n\nThis module also fixes a the issue that exceptions raised by an exit handler is\nprinted twice when the standard :mod:`atexit` is used.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport sys\nimport threading\nimport traceback\nimport atexit as std_atexit\n\nfrom pwnlib.context import context\n\n__all__ = ['register', 'unregister']\n\n_lock = threading.Lock()\n_ident = 0\n_handlers = {}\n\ndef register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n\n Registers a function to be called on program termination. The function will\n be called with positional arguments `args` and keyword arguments `kwargs`,\n i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will\n be the one used when the handler is run.\n\n E.g. to suppress logging output from an exit-handler one could write::\n\n with context.local(log_level = 'error'):\n atexit.register(handler)\n\n An identifier is returned which can be used to unregister the exit-handler.\n\n This function can be used as a decorator::\n\n @atexit.register\n def handler():\n ...\n\n Notice however that this will bind ``handler`` to the identifier and not the\n actual exit-handler. The exit-handler can then be unregistered with::\n\n atexit.unregister(handler)\n\n This function is thread safe.\n\n \"\"\"\n global _ident\n with _lock:\n ident = _ident\n _ident += 1\n _handlers[ident] = (func, args, kwargs, vars(context))\n return ident\n\ndef unregister(ident):\n \"\"\"unregister(ident)\n\n Remove the exit-handler identified by `ident` from the list of registered\n handlers. If `ident` isn't registered this is a no-op.\n \"\"\"\n if ident in _handlers:\n del _handlers[ident]\n\ndef _run_handlers():\n \"\"\"_run_handlers()\n\n Run registered exit-handlers. They run in the reverse order of which they\n were registered.\n\n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n context.clear()\n for _ident, (func, args, kwargs, ctx) in \\\n sorted(_handlers.items(), reverse = True):\n try:\n with context.local(**ctx):\n func(*args, **kwargs)\n except SystemExit:\n pass\n except Exception:\n # extract the current exception and rewind the traceback to where it\n # originated\n typ, val, tb = sys.exc_info()\n traceback.print_exception(typ, val, tb.tb_next)\n\n# if there's already an exitfunc registered be sure to run that too\nif hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n\nif sys.version_info[0] < 3:\n sys.exitfunc = _run_handlers\nelse:\n std_atexit.register(_run_handlers)\n\n", "path": "pwnlib/atexit.py"}]} | 1,241 | 146 |
gh_patches_debug_20723 | rasdani/github-patches | git_diff | streamlit__streamlit-1737 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
st.write throws value is null when string is too big
# Summary
calling `st.write` with a string that is too big will result on an error message on the front end with message "value is null"
<img width="551" alt="Screen Shot 2019-10-16 at 3 57 57 PM" src="https://user-images.githubusercontent.com/934511/66950384-a17d0e00-f02e-11e9-87bb-1df7158794b5.png">
# Steps to reproduce
1. create a script with a variable that holds a string that's more than weights more than 50mb
2. streamlit run yourscript.py
3. message will appear on the front end
## Expected behavior:
The error message should be clearer.
## Actual behavior:
Error message does not point to the string length constrain of `st.write`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/server/server_util.py`
Content:
```
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Server related utility functions"""
16
17 from typing import Callable, List, Optional, Union
18
19 from streamlit import config
20 from streamlit import net_util
21 from streamlit import type_util
22 from streamlit import url_util
23 from streamlit.ForwardMsgCache import populate_hash_if_needed
24
25 # Largest message that can be sent via the WebSocket connection.
26 # (Limit was picked arbitrarily)
27 # TODO: Break message in several chunks if too large.
28 MESSAGE_SIZE_LIMIT = 50 * 1e6 # 50MB
29
30
31 def is_cacheable_msg(msg):
32 """True if the given message qualifies for caching.
33
34 Parameters
35 ----------
36 msg : ForwardMsg
37
38 Returns
39 -------
40 bool
41 True if we should cache the message.
42
43 """
44 if msg.WhichOneof("type") in {"ref_hash", "initialize"}:
45 # Some message types never get cached
46 return False
47 return msg.ByteSize() >= config.get_option("global.minCachedMessageSize")
48
49
50 def serialize_forward_msg(msg):
51 """Serialize a ForwardMsg to send to a client.
52
53 If the message is too large, it will be converted to an exception message
54 instead.
55
56 Parameters
57 ----------
58 msg : ForwardMsg
59 The message to serialize
60
61 Returns
62 -------
63 str
64 The serialized byte string to send
65
66 """
67 populate_hash_if_needed(msg)
68 msg_str = msg.SerializeToString()
69
70 if len(msg_str) > MESSAGE_SIZE_LIMIT:
71 _convert_msg_to_exception_msg(msg, RuntimeError("Data too large"))
72 msg_str = msg.SerializeToString()
73
74 return msg_str
75
76
77 def _convert_msg_to_exception_msg(msg, e):
78 import streamlit.elements.exception_proto as exception_proto
79
80 delta_id = msg.metadata.delta_id
81 msg.Clear()
82 msg.metadata.delta_id = delta_id
83
84 exception_proto.marshall(msg.delta.new_element.exception, e)
85
86
87 def is_url_from_allowed_origins(url):
88 """Return True if URL is from allowed origins (for CORS purpose).
89
90 Allowed origins:
91 1. localhost
92 2. The internal and external IP addresses of the machine where this
93 function was called from.
94 3. The cloud storage domain configured in `s3.bucket`.
95
96 If `server.enableCORS` is False, this allows all origins.
97
98 Parameters
99 ----------
100 url : str
101 The URL to check
102
103 Returns
104 -------
105 bool
106 True if URL is accepted. False otherwise.
107
108 """
109 if not config.get_option("server.enableCORS"):
110 # Allow everything when CORS is disabled.
111 return True
112
113 hostname = url_util.get_hostname(url)
114
115 allowed_domains = [ # List[Union[str, Callable[[], Optional[str]]]]
116 # Check localhost first.
117 "localhost",
118 "0.0.0.0",
119 "127.0.0.1",
120 # Try to avoid making unecessary HTTP requests by checking if the user
121 # manually specified a server address.
122 _get_server_address_if_manually_set,
123 _get_s3_url_host_if_manually_set,
124 # Then try the options that depend on HTTP requests or opening sockets.
125 net_util.get_internal_ip,
126 net_util.get_external_ip,
127 lambda: config.get_option("s3.bucket"),
128 ]
129
130 for allowed_domain in allowed_domains:
131 if callable(allowed_domain):
132 allowed_domain = allowed_domain()
133
134 if allowed_domain is None:
135 continue
136
137 if hostname == allowed_domain:
138 return True
139
140 return False
141
142
143 def _get_server_address_if_manually_set() -> Optional[str]:
144 if config.is_manually_set("browser.serverAddress"):
145 return url_util.get_hostname(config.get_option("browser.serverAddress"))
146 return None
147
148
149 def _get_s3_url_host_if_manually_set() -> Optional[str]:
150 if config.is_manually_set("s3.url"):
151 return url_util.get_hostname(config.get_option("s3.url"))
152 return None
153
154
155 def make_url_path_regex(*path, **kwargs):
156 """Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz)."""
157 path = [x.strip("/") for x in path if x] # Filter out falsy components.
158 path_format = r"^/%s/?$" if kwargs.get("trailing_slash", True) else r"^/%s$"
159 return path_format % "/".join(path)
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/streamlit/server/server_util.py b/lib/streamlit/server/server_util.py
--- a/lib/streamlit/server/server_util.py
+++ b/lib/streamlit/server/server_util.py
@@ -68,22 +68,19 @@
msg_str = msg.SerializeToString()
if len(msg_str) > MESSAGE_SIZE_LIMIT:
- _convert_msg_to_exception_msg(msg, RuntimeError("Data too large"))
+ import streamlit.elements.exception_proto as exception_proto
+
+ error = RuntimeError(
+ f"Data of size {len(msg_str)/1e6:.1f}MB exceeds write limit of {MESSAGE_SIZE_LIMIT/1e6}MB"
+ )
+ # Overwrite the offending ForwardMsg.delta with an error to display.
+ # This assumes that the size limit wasn't exceeded due to metadata.
+ exception_proto.marshall(msg.delta.new_element.exception, error)
msg_str = msg.SerializeToString()
return msg_str
-def _convert_msg_to_exception_msg(msg, e):
- import streamlit.elements.exception_proto as exception_proto
-
- delta_id = msg.metadata.delta_id
- msg.Clear()
- msg.metadata.delta_id = delta_id
-
- exception_proto.marshall(msg.delta.new_element.exception, e)
-
-
def is_url_from_allowed_origins(url):
"""Return True if URL is from allowed origins (for CORS purpose).
| {"golden_diff": "diff --git a/lib/streamlit/server/server_util.py b/lib/streamlit/server/server_util.py\n--- a/lib/streamlit/server/server_util.py\n+++ b/lib/streamlit/server/server_util.py\n@@ -68,22 +68,19 @@\n msg_str = msg.SerializeToString()\n \n if len(msg_str) > MESSAGE_SIZE_LIMIT:\n- _convert_msg_to_exception_msg(msg, RuntimeError(\"Data too large\"))\n+ import streamlit.elements.exception_proto as exception_proto\n+\n+ error = RuntimeError(\n+ f\"Data of size {len(msg_str)/1e6:.1f}MB exceeds write limit of {MESSAGE_SIZE_LIMIT/1e6}MB\"\n+ )\n+ # Overwrite the offending ForwardMsg.delta with an error to display.\n+ # This assumes that the size limit wasn't exceeded due to metadata.\n+ exception_proto.marshall(msg.delta.new_element.exception, error)\n msg_str = msg.SerializeToString()\n \n return msg_str\n \n \n-def _convert_msg_to_exception_msg(msg, e):\n- import streamlit.elements.exception_proto as exception_proto\n-\n- delta_id = msg.metadata.delta_id\n- msg.Clear()\n- msg.metadata.delta_id = delta_id\n-\n- exception_proto.marshall(msg.delta.new_element.exception, e)\n-\n-\n def is_url_from_allowed_origins(url):\n \"\"\"Return True if URL is from allowed origins (for CORS purpose).\n", "issue": "st.write throws value is null when string is too big\n# Summary\r\ncalling `st.write` with a string that is too big will result on an error message on the front end with message \"value is null\"\r\n\r\n<img width=\"551\" alt=\"Screen Shot 2019-10-16 at 3 57 57 PM\" src=\"https://user-images.githubusercontent.com/934511/66950384-a17d0e00-f02e-11e9-87bb-1df7158794b5.png\">\r\n\r\n# Steps to reproduce\r\n1. create a script with a variable that holds a string that's more than weights more than 50mb\r\n2. streamlit run yourscript.py\r\n3. message will appear on the front end\r\n\r\n\r\n## Expected behavior:\r\nThe error message should be clearer.\r\n\r\n## Actual behavior:\r\nError message does not point to the string length constrain of `st.write`.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Server related utility functions\"\"\"\n\nfrom typing import Callable, List, Optional, Union\n\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import type_util\nfrom streamlit import url_util\nfrom streamlit.ForwardMsgCache import populate_hash_if_needed\n\n# Largest message that can be sent via the WebSocket connection.\n# (Limit was picked arbitrarily)\n# TODO: Break message in several chunks if too large.\nMESSAGE_SIZE_LIMIT = 50 * 1e6 # 50MB\n\n\ndef is_cacheable_msg(msg):\n \"\"\"True if the given message qualifies for caching.\n\n Parameters\n ----------\n msg : ForwardMsg\n\n Returns\n -------\n bool\n True if we should cache the message.\n\n \"\"\"\n if msg.WhichOneof(\"type\") in {\"ref_hash\", \"initialize\"}:\n # Some message types never get cached\n return False\n return msg.ByteSize() >= config.get_option(\"global.minCachedMessageSize\")\n\n\ndef serialize_forward_msg(msg):\n \"\"\"Serialize a ForwardMsg to send to a client.\n\n If the message is too large, it will be converted to an exception message\n instead.\n\n Parameters\n ----------\n msg : ForwardMsg\n The message to serialize\n\n Returns\n -------\n str\n The serialized byte string to send\n\n \"\"\"\n populate_hash_if_needed(msg)\n msg_str = msg.SerializeToString()\n\n if len(msg_str) > MESSAGE_SIZE_LIMIT:\n _convert_msg_to_exception_msg(msg, RuntimeError(\"Data too large\"))\n msg_str = msg.SerializeToString()\n\n return msg_str\n\n\ndef _convert_msg_to_exception_msg(msg, e):\n import streamlit.elements.exception_proto as exception_proto\n\n delta_id = msg.metadata.delta_id\n msg.Clear()\n msg.metadata.delta_id = delta_id\n\n exception_proto.marshall(msg.delta.new_element.exception, e)\n\n\ndef is_url_from_allowed_origins(url):\n \"\"\"Return True if URL is from allowed origins (for CORS purpose).\n\n Allowed origins:\n 1. localhost\n 2. The internal and external IP addresses of the machine where this\n function was called from.\n 3. The cloud storage domain configured in `s3.bucket`.\n\n If `server.enableCORS` is False, this allows all origins.\n\n Parameters\n ----------\n url : str\n The URL to check\n\n Returns\n -------\n bool\n True if URL is accepted. False otherwise.\n\n \"\"\"\n if not config.get_option(\"server.enableCORS\"):\n # Allow everything when CORS is disabled.\n return True\n\n hostname = url_util.get_hostname(url)\n\n allowed_domains = [ # List[Union[str, Callable[[], Optional[str]]]]\n # Check localhost first.\n \"localhost\",\n \"0.0.0.0\",\n \"127.0.0.1\",\n # Try to avoid making unecessary HTTP requests by checking if the user\n # manually specified a server address.\n _get_server_address_if_manually_set,\n _get_s3_url_host_if_manually_set,\n # Then try the options that depend on HTTP requests or opening sockets.\n net_util.get_internal_ip,\n net_util.get_external_ip,\n lambda: config.get_option(\"s3.bucket\"),\n ]\n\n for allowed_domain in allowed_domains:\n if callable(allowed_domain):\n allowed_domain = allowed_domain()\n\n if allowed_domain is None:\n continue\n\n if hostname == allowed_domain:\n return True\n\n return False\n\n\ndef _get_server_address_if_manually_set() -> Optional[str]:\n if config.is_manually_set(\"browser.serverAddress\"):\n return url_util.get_hostname(config.get_option(\"browser.serverAddress\"))\n return None\n\n\ndef _get_s3_url_host_if_manually_set() -> Optional[str]:\n if config.is_manually_set(\"s3.url\"):\n return url_util.get_hostname(config.get_option(\"s3.url\"))\n return None\n\n\ndef make_url_path_regex(*path, **kwargs):\n \"\"\"Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz).\"\"\"\n path = [x.strip(\"/\") for x in path if x] # Filter out falsy components.\n path_format = r\"^/%s/?$\" if kwargs.get(\"trailing_slash\", True) else r\"^/%s$\"\n return path_format % \"/\".join(path)\n", "path": "lib/streamlit/server/server_util.py"}], "after_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Server related utility functions\"\"\"\n\nfrom typing import Callable, List, Optional, Union\n\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import type_util\nfrom streamlit import url_util\nfrom streamlit.ForwardMsgCache import populate_hash_if_needed\n\n# Largest message that can be sent via the WebSocket connection.\n# (Limit was picked arbitrarily)\n# TODO: Break message in several chunks if too large.\nMESSAGE_SIZE_LIMIT = 50 * 1e6 # 50MB\n\n\ndef is_cacheable_msg(msg):\n \"\"\"True if the given message qualifies for caching.\n\n Parameters\n ----------\n msg : ForwardMsg\n\n Returns\n -------\n bool\n True if we should cache the message.\n\n \"\"\"\n if msg.WhichOneof(\"type\") in {\"ref_hash\", \"initialize\"}:\n # Some message types never get cached\n return False\n return msg.ByteSize() >= config.get_option(\"global.minCachedMessageSize\")\n\n\ndef serialize_forward_msg(msg):\n \"\"\"Serialize a ForwardMsg to send to a client.\n\n If the message is too large, it will be converted to an exception message\n instead.\n\n Parameters\n ----------\n msg : ForwardMsg\n The message to serialize\n\n Returns\n -------\n str\n The serialized byte string to send\n\n \"\"\"\n populate_hash_if_needed(msg)\n msg_str = msg.SerializeToString()\n\n if len(msg_str) > MESSAGE_SIZE_LIMIT:\n import streamlit.elements.exception_proto as exception_proto\n\n error = RuntimeError(\n f\"Data of size {len(msg_str)/1e6:.1f}MB exceeds write limit of {MESSAGE_SIZE_LIMIT/1e6}MB\"\n )\n # Overwrite the offending ForwardMsg.delta with an error to display.\n # This assumes that the size limit wasn't exceeded due to metadata.\n exception_proto.marshall(msg.delta.new_element.exception, error)\n msg_str = msg.SerializeToString()\n\n return msg_str\n\n\ndef is_url_from_allowed_origins(url):\n \"\"\"Return True if URL is from allowed origins (for CORS purpose).\n\n Allowed origins:\n 1. localhost\n 2. The internal and external IP addresses of the machine where this\n function was called from.\n 3. The cloud storage domain configured in `s3.bucket`.\n\n If `server.enableCORS` is False, this allows all origins.\n\n Parameters\n ----------\n url : str\n The URL to check\n\n Returns\n -------\n bool\n True if URL is accepted. False otherwise.\n\n \"\"\"\n if not config.get_option(\"server.enableCORS\"):\n # Allow everything when CORS is disabled.\n return True\n\n hostname = url_util.get_hostname(url)\n\n allowed_domains = [ # List[Union[str, Callable[[], Optional[str]]]]\n # Check localhost first.\n \"localhost\",\n \"0.0.0.0\",\n \"127.0.0.1\",\n # Try to avoid making unecessary HTTP requests by checking if the user\n # manually specified a server address.\n _get_server_address_if_manually_set,\n _get_s3_url_host_if_manually_set,\n # Then try the options that depend on HTTP requests or opening sockets.\n net_util.get_internal_ip,\n net_util.get_external_ip,\n lambda: config.get_option(\"s3.bucket\"),\n ]\n\n for allowed_domain in allowed_domains:\n if callable(allowed_domain):\n allowed_domain = allowed_domain()\n\n if allowed_domain is None:\n continue\n\n if hostname == allowed_domain:\n return True\n\n return False\n\n\ndef _get_server_address_if_manually_set() -> Optional[str]:\n if config.is_manually_set(\"browser.serverAddress\"):\n return url_util.get_hostname(config.get_option(\"browser.serverAddress\"))\n return None\n\n\ndef _get_s3_url_host_if_manually_set() -> Optional[str]:\n if config.is_manually_set(\"s3.url\"):\n return url_util.get_hostname(config.get_option(\"s3.url\"))\n return None\n\n\ndef make_url_path_regex(*path, **kwargs):\n \"\"\"Get a regex of the form ^/foo/bar/baz/?$ for a path (foo, bar, baz).\"\"\"\n path = [x.strip(\"/\") for x in path if x] # Filter out falsy components.\n path_format = r\"^/%s/?$\" if kwargs.get(\"trailing_slash\", True) else r\"^/%s$\"\n return path_format % \"/\".join(path)\n", "path": "lib/streamlit/server/server_util.py"}]} | 1,945 | 298 |
gh_patches_debug_26174 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-776 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove botan from our library
according to [this](https://github.com/botanio/sdk#py) botan has it's own implementation for python. No need to reinvent the wheel. I suggest we remove it from ptb in the next major (8.0) version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/contrib/__init__.py`
Content:
```
1 from .botan import Botan
2
3 __all__ = ['Botan']
4
```
Path: `telegram/contrib/botan.py`
Content:
```
1 import logging
2
3 from future.moves.urllib.parse import quote
4 from future.moves.urllib.error import HTTPError, URLError
5 from future.moves.urllib.request import urlopen, Request
6
7 logging.getLogger(__name__).addHandler(logging.NullHandler())
8
9
10 class Botan(object):
11 """This class helps to send incoming events to your botan analytics account.
12 See more: https://github.com/botanio/sdk#botan-sdk
13 """
14
15 token = ''
16 url_template = 'https://api.botan.io/track?token={token}' \
17 '&uid={uid}&name={name}&src=python-telegram-bot'
18
19 def __init__(self, token):
20 self.token = token
21 self.logger = logging.getLogger(__name__)
22
23 def track(self, message, event_name='event'):
24 try:
25 uid = message.chat_id
26 except AttributeError:
27 self.logger.warn('No chat_id in message')
28 return False
29 data = message.to_json()
30 try:
31 url = self.url_template.format(
32 token=str(self.token), uid=str(uid), name=quote(event_name))
33 request = Request(
34 url, data=data.encode(), headers={'Content-Type': 'application/json'})
35 urlopen(request)
36 return True
37 except HTTPError as error:
38 self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(
39 'utf-8'))
40 return False
41 except URLError as error:
42 self.logger.warn('Botan track error ' + str(error.reason))
43 return False
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/telegram/contrib/__init__.py b/telegram/contrib/__init__.py
deleted file mode 100644
--- a/telegram/contrib/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .botan import Botan
-
-__all__ = ['Botan']
diff --git a/telegram/contrib/botan.py b/telegram/contrib/botan.py
deleted file mode 100644
--- a/telegram/contrib/botan.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import logging
-
-from future.moves.urllib.parse import quote
-from future.moves.urllib.error import HTTPError, URLError
-from future.moves.urllib.request import urlopen, Request
-
-logging.getLogger(__name__).addHandler(logging.NullHandler())
-
-
-class Botan(object):
- """This class helps to send incoming events to your botan analytics account.
- See more: https://github.com/botanio/sdk#botan-sdk
- """
-
- token = ''
- url_template = 'https://api.botan.io/track?token={token}' \
- '&uid={uid}&name={name}&src=python-telegram-bot'
-
- def __init__(self, token):
- self.token = token
- self.logger = logging.getLogger(__name__)
-
- def track(self, message, event_name='event'):
- try:
- uid = message.chat_id
- except AttributeError:
- self.logger.warn('No chat_id in message')
- return False
- data = message.to_json()
- try:
- url = self.url_template.format(
- token=str(self.token), uid=str(uid), name=quote(event_name))
- request = Request(
- url, data=data.encode(), headers={'Content-Type': 'application/json'})
- urlopen(request)
- return True
- except HTTPError as error:
- self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(
- 'utf-8'))
- return False
- except URLError as error:
- self.logger.warn('Botan track error ' + str(error.reason))
- return False
| {"golden_diff": "diff --git a/telegram/contrib/__init__.py b/telegram/contrib/__init__.py\ndeleted file mode 100644\n--- a/telegram/contrib/__init__.py\n+++ /dev/null\n@@ -1,3 +0,0 @@\n-from .botan import Botan\n-\n-__all__ = ['Botan']\ndiff --git a/telegram/contrib/botan.py b/telegram/contrib/botan.py\ndeleted file mode 100644\n--- a/telegram/contrib/botan.py\n+++ /dev/null\n@@ -1,43 +0,0 @@\n-import logging\n-\n-from future.moves.urllib.parse import quote\n-from future.moves.urllib.error import HTTPError, URLError\n-from future.moves.urllib.request import urlopen, Request\n-\n-logging.getLogger(__name__).addHandler(logging.NullHandler())\n-\n-\n-class Botan(object):\n- \"\"\"This class helps to send incoming events to your botan analytics account.\n- See more: https://github.com/botanio/sdk#botan-sdk\n- \"\"\"\n-\n- token = ''\n- url_template = 'https://api.botan.io/track?token={token}' \\\n- '&uid={uid}&name={name}&src=python-telegram-bot'\n-\n- def __init__(self, token):\n- self.token = token\n- self.logger = logging.getLogger(__name__)\n-\n- def track(self, message, event_name='event'):\n- try:\n- uid = message.chat_id\n- except AttributeError:\n- self.logger.warn('No chat_id in message')\n- return False\n- data = message.to_json()\n- try:\n- url = self.url_template.format(\n- token=str(self.token), uid=str(uid), name=quote(event_name))\n- request = Request(\n- url, data=data.encode(), headers={'Content-Type': 'application/json'})\n- urlopen(request)\n- return True\n- except HTTPError as error:\n- self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(\n- 'utf-8'))\n- return False\n- except URLError as error:\n- self.logger.warn('Botan track error ' + str(error.reason))\n- return False\n", "issue": "Remove botan from our library\naccording to [this](https://github.com/botanio/sdk#py) botan has it's own implementation for python. No need to reinvent the wheel. I suggest we remove it from ptb in the next major (8.0) version.\n", "before_files": [{"content": "from .botan import Botan\n\n__all__ = ['Botan']\n", "path": "telegram/contrib/__init__.py"}, {"content": "import logging\n\nfrom future.moves.urllib.parse import quote\nfrom future.moves.urllib.error import HTTPError, URLError\nfrom future.moves.urllib.request import urlopen, Request\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n\n\nclass Botan(object):\n \"\"\"This class helps to send incoming events to your botan analytics account.\n See more: https://github.com/botanio/sdk#botan-sdk\n \"\"\"\n\n token = ''\n url_template = 'https://api.botan.io/track?token={token}' \\\n '&uid={uid}&name={name}&src=python-telegram-bot'\n\n def __init__(self, token):\n self.token = token\n self.logger = logging.getLogger(__name__)\n\n def track(self, message, event_name='event'):\n try:\n uid = message.chat_id\n except AttributeError:\n self.logger.warn('No chat_id in message')\n return False\n data = message.to_json()\n try:\n url = self.url_template.format(\n token=str(self.token), uid=str(uid), name=quote(event_name))\n request = Request(\n url, data=data.encode(), headers={'Content-Type': 'application/json'})\n urlopen(request)\n return True\n except HTTPError as error:\n self.logger.warn('Botan track error ' + str(error.code) + ':' + error.read().decode(\n 'utf-8'))\n return False\n except URLError as error:\n self.logger.warn('Botan track error ' + str(error.reason))\n return False\n", "path": "telegram/contrib/botan.py"}], "after_files": [{"content": null, "path": "telegram/contrib/__init__.py"}, {"content": null, "path": "telegram/contrib/botan.py"}]} | 769 | 498 |
gh_patches_debug_16746 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1927 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DOC: Reduce size image in plot_inpaint
In reference to #1920
- Reduce the size of the image in doc/examples/filters/plot_inpaint.py to show more clearly the result of the algorithm.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/examples/filters/plot_inpaint.py`
Content:
```
1 """
2 ===========
3 Inpainting
4 ===========
5 Inpainting [1]_ is the process of reconstructing lost or deteriorated
6 parts of images and videos.
7
8 The reconstruction is supposed to be performed in fully automatic way by
9 exploiting the information presented in non-damaged regions.
10
11 In this example, we show how the masked pixels get inpainted by
12 inpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_.
13
14 .. [1] Wikipedia. Inpainting
15 https://en.wikipedia.org/wiki/Inpainting
16 .. [2] Wikipedia. Biharmonic equation
17 https://en.wikipedia.org/wiki/Biharmonic_equation
18 .. [3] N.S.Hoang, S.B.Damelin, "On surface completion and image
19 inpainting by biharmonic functions: numerical aspects",
20 http://www.ima.umn.edu/~damelin/biharmonic
21 """
22
23 import numpy as np
24 import matplotlib.pyplot as plt
25
26 from skimage import data, color
27 from skimage.restoration import inpaint
28
29 image_orig = data.astronaut()
30
31 # Create mask with three defect regions: left, middle, right respectively
32 mask = np.zeros(image_orig.shape[:-1])
33 mask[20:60, 0:20] = 1
34 mask[200:300, 150:170] = 1
35 mask[50:100, 400:430] = 1
36
37 # Defect image over the same region in each color channel
38 image_defect = image_orig.copy()
39 for layer in range(image_defect.shape[-1]):
40 image_defect[np.where(mask)] = 0
41
42 image_result = inpaint.inpaint_biharmonic(image_defect, mask, multichannel=True)
43
44 fig, axes = plt.subplots(ncols=2, nrows=2)
45 ax0, ax1, ax2, ax3 = axes.ravel()
46
47 ax0.set_title('Original image')
48 ax0.imshow(image_orig)
49 ax0.axis('off')
50
51 ax1.set_title('Mask')
52 ax1.imshow(mask, cmap=plt.cm.gray)
53 ax1.axis('off')
54
55 ax2.set_title('Defected image')
56 ax2.imshow(image_defect)
57 ax2.axis('off')
58
59 ax3.set_title('Inpainted image')
60 ax3.imshow(image_result)
61 ax3.axis('off')
62
63 plt.tight_layout()
64 plt.show()
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/examples/filters/plot_inpaint.py b/doc/examples/filters/plot_inpaint.py
--- a/doc/examples/filters/plot_inpaint.py
+++ b/doc/examples/filters/plot_inpaint.py
@@ -26,13 +26,13 @@
from skimage import data, color
from skimage.restoration import inpaint
-image_orig = data.astronaut()
+image_orig = data.astronaut()[0:200, 0:200]
# Create mask with three defect regions: left, middle, right respectively
mask = np.zeros(image_orig.shape[:-1])
mask[20:60, 0:20] = 1
-mask[200:300, 150:170] = 1
-mask[50:100, 400:430] = 1
+mask[160:180, 70:155] = 1
+mask[30:60, 170:195] = 1
# Defect image over the same region in each color channel
image_defect = image_orig.copy()
@@ -60,5 +60,5 @@
ax3.imshow(image_result)
ax3.axis('off')
-plt.tight_layout()
+fig.tight_layout()
plt.show()
| {"golden_diff": "diff --git a/doc/examples/filters/plot_inpaint.py b/doc/examples/filters/plot_inpaint.py\n--- a/doc/examples/filters/plot_inpaint.py\n+++ b/doc/examples/filters/plot_inpaint.py\n@@ -26,13 +26,13 @@\n from skimage import data, color\n from skimage.restoration import inpaint\n \n-image_orig = data.astronaut()\n+image_orig = data.astronaut()[0:200, 0:200]\n \n # Create mask with three defect regions: left, middle, right respectively\n mask = np.zeros(image_orig.shape[:-1])\n mask[20:60, 0:20] = 1\n-mask[200:300, 150:170] = 1\n-mask[50:100, 400:430] = 1\n+mask[160:180, 70:155] = 1\n+mask[30:60, 170:195] = 1\n \n # Defect image over the same region in each color channel\n image_defect = image_orig.copy()\n@@ -60,5 +60,5 @@\n ax3.imshow(image_result)\n ax3.axis('off')\n \n-plt.tight_layout()\n+fig.tight_layout()\n plt.show()\n", "issue": "DOC: Reduce size image in plot_inpaint\nIn reference to #1920 \n- Reduce the size of the image in doc/examples/filters/plot_inpaint.py to show more clearly the result of the algorithm.\n\n", "before_files": [{"content": "\"\"\"\n===========\nInpainting\n===========\nInpainting [1]_ is the process of reconstructing lost or deteriorated\nparts of images and videos.\n\nThe reconstruction is supposed to be performed in fully automatic way by\nexploiting the information presented in non-damaged regions.\n\nIn this example, we show how the masked pixels get inpainted by\ninpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_.\n\n.. [1] Wikipedia. Inpainting\n https://en.wikipedia.org/wiki/Inpainting\n.. [2] Wikipedia. Biharmonic equation\n https://en.wikipedia.org/wiki/Biharmonic_equation\n.. [3] N.S.Hoang, S.B.Damelin, \"On surface completion and image\n inpainting by biharmonic functions: numerical aspects\",\n http://www.ima.umn.edu/~damelin/biharmonic\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data, color\nfrom skimage.restoration import inpaint\n\nimage_orig = data.astronaut()\n\n# Create mask with three defect regions: left, middle, right respectively\nmask = np.zeros(image_orig.shape[:-1])\nmask[20:60, 0:20] = 1\nmask[200:300, 150:170] = 1\nmask[50:100, 400:430] = 1\n\n# Defect image over the same region in each color channel\nimage_defect = image_orig.copy()\nfor layer in range(image_defect.shape[-1]):\n image_defect[np.where(mask)] = 0\n\nimage_result = inpaint.inpaint_biharmonic(image_defect, mask, multichannel=True)\n\nfig, axes = plt.subplots(ncols=2, nrows=2)\nax0, ax1, ax2, ax3 = axes.ravel()\n\nax0.set_title('Original image')\nax0.imshow(image_orig)\nax0.axis('off')\n\nax1.set_title('Mask')\nax1.imshow(mask, cmap=plt.cm.gray)\nax1.axis('off')\n\nax2.set_title('Defected image')\nax2.imshow(image_defect)\nax2.axis('off')\n\nax3.set_title('Inpainted image')\nax3.imshow(image_result)\nax3.axis('off')\n\nplt.tight_layout()\nplt.show()\n", "path": "doc/examples/filters/plot_inpaint.py"}], "after_files": [{"content": "\"\"\"\n===========\nInpainting\n===========\nInpainting [1]_ is the process of reconstructing lost or deteriorated\nparts of images and videos.\n\nThe reconstruction is supposed to be performed in fully automatic way by\nexploiting the information presented in non-damaged regions.\n\nIn this example, we show how the masked pixels get inpainted by\ninpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_.\n\n.. [1] Wikipedia. Inpainting\n https://en.wikipedia.org/wiki/Inpainting\n.. [2] Wikipedia. Biharmonic equation\n https://en.wikipedia.org/wiki/Biharmonic_equation\n.. [3] N.S.Hoang, S.B.Damelin, \"On surface completion and image\n inpainting by biharmonic functions: numerical aspects\",\n http://www.ima.umn.edu/~damelin/biharmonic\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data, color\nfrom skimage.restoration import inpaint\n\nimage_orig = data.astronaut()[0:200, 0:200]\n\n# Create mask with three defect regions: left, middle, right respectively\nmask = np.zeros(image_orig.shape[:-1])\nmask[20:60, 0:20] = 1\nmask[160:180, 70:155] = 1\nmask[30:60, 170:195] = 1\n\n# Defect image over the same region in each color channel\nimage_defect = image_orig.copy()\nfor layer in range(image_defect.shape[-1]):\n image_defect[np.where(mask)] = 0\n\nimage_result = inpaint.inpaint_biharmonic(image_defect, mask, multichannel=True)\n\nfig, axes = plt.subplots(ncols=2, nrows=2)\nax0, ax1, ax2, ax3 = axes.ravel()\n\nax0.set_title('Original image')\nax0.imshow(image_orig)\nax0.axis('off')\n\nax1.set_title('Mask')\nax1.imshow(mask, cmap=plt.cm.gray)\nax1.axis('off')\n\nax2.set_title('Defected image')\nax2.imshow(image_defect)\nax2.axis('off')\n\nax3.set_title('Inpainted image')\nax3.imshow(image_result)\nax3.axis('off')\n\nfig.tight_layout()\nplt.show()\n", "path": "doc/examples/filters/plot_inpaint.py"}]} | 950 | 300 |
gh_patches_debug_37093 | rasdani/github-patches | git_diff | TheAlgorithms__Python-2221 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
maths/number_of_digits.py is naive!
<code>[maths/number_of_digits.py](https://github.com/TheAlgorithms/Python/blob/master/maths/number_of_digits.py)</code> has a naive method. The suggested method has a time complexity of O(n). We can count number of digits of a number in O(1).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `maths/number_of_digits.py`
Content:
```
1 def num_digits(n: int) -> int:
2 """
3 Find the number of digits in a number.
4
5 >>> num_digits(12345)
6 5
7 >>> num_digits(123)
8 3
9 """
10 digits = 0
11 while n > 0:
12 n = n // 10
13 digits += 1
14 return digits
15
16
17 if __name__ == "__main__":
18 print(num_digits(12345)) # ===> 5
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/maths/number_of_digits.py b/maths/number_of_digits.py
--- a/maths/number_of_digits.py
+++ b/maths/number_of_digits.py
@@ -1,3 +1,7 @@
+import math
+from timeit import timeit
+
+
def num_digits(n: int) -> int:
"""
Find the number of digits in a number.
@@ -14,5 +18,82 @@
return digits
+def num_digits_fast(n: int) -> int:
+ """
+ Find the number of digits in a number.
+ abs() is used as logarithm for negative numbers is not defined.
+
+ >>> num_digits_fast(12345)
+ 5
+ >>> num_digits_fast(123)
+ 3
+ """
+ return (math.floor(math.log(abs(n), 10) + 1))
+
+
+def num_digits_faster(n: int) -> int:
+ """
+ Find the number of digits in a number.
+ abs() is used for negative numbers
+
+ >>> num_digits_faster(12345)
+ 5
+ >>> num_digits_faster(123)
+ 3
+ """
+ return (len(str(abs(n))))
+
+
+def benchmark() -> None:
+ """
+ Benchmark code for comparing 3 functions,
+ with 3 different length int values.
+ """
+ print('\nFor small_num = ', small_num, ':')
+ print("> num_digits()",
+ '\t\tans =', num_digits(small_num),
+ '\ttime =', timeit("z.num_digits(z.small_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_fast()",
+ '\tans =', num_digits_fast(small_num),
+ '\ttime =', timeit("z.num_digits_fast(z.small_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_faster()",
+ '\tans =', num_digits_faster(small_num),
+ '\ttime =', timeit("z.num_digits_faster(z.small_num)",
+ setup="import __main__ as z"), "seconds")
+
+ print('\nFor medium_num = ', medium_num, ':')
+ print("> num_digits()",
+ '\t\tans =', num_digits(medium_num),
+ '\ttime =', timeit("z.num_digits(z.medium_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_fast()",
+ '\tans =', num_digits_fast(medium_num),
+ '\ttime =', timeit("z.num_digits_fast(z.medium_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_faster()",
+ '\tans =', num_digits_faster(medium_num),
+ '\ttime =', timeit("z.num_digits_faster(z.medium_num)",
+ setup="import __main__ as z"), "seconds")
+
+ print('\nFor large_num = ', large_num, ':')
+ print("> num_digits()",
+ '\t\tans =', num_digits(large_num),
+ '\ttime =', timeit("z.num_digits(z.large_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_fast()",
+ '\tans =', num_digits_fast(large_num),
+ '\ttime =', timeit("z.num_digits_fast(z.large_num)",
+ setup="import __main__ as z"), "seconds")
+ print("> num_digits_faster()",
+ '\tans =', num_digits_faster(large_num),
+ '\ttime =', timeit("z.num_digits_faster(z.large_num)",
+ setup="import __main__ as z"), "seconds")
+
+
if __name__ == "__main__":
- print(num_digits(12345)) # ===> 5
+ small_num = 262144
+ medium_num = 1125899906842624
+ large_num = 1267650600228229401496703205376
+ benchmark()
| {"golden_diff": "diff --git a/maths/number_of_digits.py b/maths/number_of_digits.py\n--- a/maths/number_of_digits.py\n+++ b/maths/number_of_digits.py\n@@ -1,3 +1,7 @@\n+import math\n+from timeit import timeit\n+\n+\n def num_digits(n: int) -> int:\n \"\"\"\n Find the number of digits in a number.\n@@ -14,5 +18,82 @@\n return digits\n \n \n+def num_digits_fast(n: int) -> int:\n+ \"\"\"\n+ Find the number of digits in a number.\n+ abs() is used as logarithm for negative numbers is not defined.\n+\n+ >>> num_digits_fast(12345)\n+ 5\n+ >>> num_digits_fast(123)\n+ 3\n+ \"\"\"\n+ return (math.floor(math.log(abs(n), 10) + 1))\n+\n+\n+def num_digits_faster(n: int) -> int:\n+ \"\"\"\n+ Find the number of digits in a number.\n+ abs() is used for negative numbers\n+\n+ >>> num_digits_faster(12345)\n+ 5\n+ >>> num_digits_faster(123)\n+ 3\n+ \"\"\"\n+ return (len(str(abs(n))))\n+\n+\n+def benchmark() -> None:\n+ \"\"\"\n+ Benchmark code for comparing 3 functions,\n+ with 3 different length int values.\n+ \"\"\"\n+ print('\\nFor small_num = ', small_num, ':')\n+ print(\"> num_digits()\",\n+ '\\t\\tans =', num_digits(small_num),\n+ '\\ttime =', timeit(\"z.num_digits(z.small_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_fast()\",\n+ '\\tans =', num_digits_fast(small_num),\n+ '\\ttime =', timeit(\"z.num_digits_fast(z.small_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_faster()\",\n+ '\\tans =', num_digits_faster(small_num),\n+ '\\ttime =', timeit(\"z.num_digits_faster(z.small_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+\n+ print('\\nFor medium_num = ', medium_num, ':')\n+ print(\"> num_digits()\",\n+ '\\t\\tans =', num_digits(medium_num),\n+ '\\ttime =', timeit(\"z.num_digits(z.medium_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_fast()\",\n+ '\\tans =', num_digits_fast(medium_num),\n+ '\\ttime =', timeit(\"z.num_digits_fast(z.medium_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_faster()\",\n+ '\\tans =', num_digits_faster(medium_num),\n+ '\\ttime =', timeit(\"z.num_digits_faster(z.medium_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+\n+ print('\\nFor large_num = ', large_num, ':')\n+ print(\"> num_digits()\",\n+ '\\t\\tans =', num_digits(large_num),\n+ '\\ttime =', timeit(\"z.num_digits(z.large_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_fast()\",\n+ '\\tans =', num_digits_fast(large_num),\n+ '\\ttime =', timeit(\"z.num_digits_fast(z.large_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+ print(\"> num_digits_faster()\",\n+ '\\tans =', num_digits_faster(large_num),\n+ '\\ttime =', timeit(\"z.num_digits_faster(z.large_num)\",\n+ setup=\"import __main__ as z\"), \"seconds\")\n+\n+\n if __name__ == \"__main__\":\n- print(num_digits(12345)) # ===> 5\n+ small_num = 262144\n+ medium_num = 1125899906842624\n+ large_num = 1267650600228229401496703205376\n+ benchmark()\n", "issue": "maths/number_of_digits.py is naive!\n<code>[maths/number_of_digits.py](https://github.com/TheAlgorithms/Python/blob/master/maths/number_of_digits.py)</code> has a naive method. The suggested method has a time complexity of O(n). We can count number of digits of a number in O(1).\n", "before_files": [{"content": "def num_digits(n: int) -> int:\n \"\"\"\n Find the number of digits in a number.\n\n >>> num_digits(12345)\n 5\n >>> num_digits(123)\n 3\n \"\"\"\n digits = 0\n while n > 0:\n n = n // 10\n digits += 1\n return digits\n\n\nif __name__ == \"__main__\":\n print(num_digits(12345)) # ===> 5\n", "path": "maths/number_of_digits.py"}], "after_files": [{"content": "import math\nfrom timeit import timeit\n\n\ndef num_digits(n: int) -> int:\n \"\"\"\n Find the number of digits in a number.\n\n >>> num_digits(12345)\n 5\n >>> num_digits(123)\n 3\n \"\"\"\n digits = 0\n while n > 0:\n n = n // 10\n digits += 1\n return digits\n\n\ndef num_digits_fast(n: int) -> int:\n \"\"\"\n Find the number of digits in a number.\n abs() is used as logarithm for negative numbers is not defined.\n\n >>> num_digits_fast(12345)\n 5\n >>> num_digits_fast(123)\n 3\n \"\"\"\n return (math.floor(math.log(abs(n), 10) + 1))\n\n\ndef num_digits_faster(n: int) -> int:\n \"\"\"\n Find the number of digits in a number.\n abs() is used for negative numbers\n\n >>> num_digits_faster(12345)\n 5\n >>> num_digits_faster(123)\n 3\n \"\"\"\n return (len(str(abs(n))))\n\n\ndef benchmark() -> None:\n \"\"\"\n Benchmark code for comparing 3 functions,\n with 3 different length int values.\n \"\"\"\n print('\\nFor small_num = ', small_num, ':')\n print(\"> num_digits()\",\n '\\t\\tans =', num_digits(small_num),\n '\\ttime =', timeit(\"z.num_digits(z.small_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n print(\"> num_digits_fast()\",\n '\\tans =', num_digits_fast(small_num),\n '\\ttime =', timeit(\"z.num_digits_fast(z.small_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n print(\"> num_digits_faster()\",\n '\\tans =', num_digits_faster(small_num),\n '\\ttime =', timeit(\"z.num_digits_faster(z.small_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n\n print('\\nFor medium_num = ', medium_num, ':')\n print(\"> num_digits()\",\n '\\t\\tans =', num_digits(medium_num),\n '\\ttime =', timeit(\"z.num_digits(z.medium_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n print(\"> num_digits_fast()\",\n '\\tans =', num_digits_fast(medium_num),\n '\\ttime =', timeit(\"z.num_digits_fast(z.medium_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n print(\"> num_digits_faster()\",\n '\\tans =', num_digits_faster(medium_num),\n '\\ttime =', timeit(\"z.num_digits_faster(z.medium_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n\n print('\\nFor large_num = ', large_num, ':')\n print(\"> num_digits()\",\n '\\t\\tans =', num_digits(large_num),\n '\\ttime =', timeit(\"z.num_digits(z.large_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n print(\"> num_digits_fast()\",\n '\\tans =', num_digits_fast(large_num),\n '\\ttime =', timeit(\"z.num_digits_fast(z.large_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n print(\"> num_digits_faster()\",\n '\\tans =', num_digits_faster(large_num),\n '\\ttime =', timeit(\"z.num_digits_faster(z.large_num)\",\n setup=\"import __main__ as z\"), \"seconds\")\n\n\nif __name__ == \"__main__\":\n small_num = 262144\n medium_num = 1125899906842624\n large_num = 1267650600228229401496703205376\n benchmark()\n", "path": "maths/number_of_digits.py"}]} | 474 | 977 |
gh_patches_debug_3043 | rasdani/github-patches | git_diff | docker__docker-py-1250 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
attach is causing an "Invalid Argument" exception from os.read
``` python
stream = client.attach(container, stream=True, stdout=True, stderr=True)
for chunk in stream:
pass
```
Results in:
```
File "/Users/michael/work/oss/marina/marina/build.py", line 695, in watcher
for chunk in stream:
File ".venv/lib/python3.5/site-packages/docker/utils/socket.py", line 67, in frames_iter
yield read(socket, n)
File ".venv/lib/python3.5/site-packages/docker/utils/socket.py", line 25, in read
return os.read(socket.fileno(), n)
OSError: [Errno 22] Invalid argument
```
Using docker-py 1.10.2 on OS X 10.11.6 with docker for mac 1.12.0-rc3. Reverting to 1.9.0 fixes the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/utils/socket.py`
Content:
```
1 import errno
2 import os
3 import select
4 import struct
5
6 import six
7
8 try:
9 from ..transport import NpipeSocket
10 except ImportError:
11 NpipeSocket = type(None)
12
13
14 class SocketError(Exception):
15 pass
16
17
18 def read(socket, n=4096):
19 """
20 Reads at most n bytes from socket
21 """
22
23 recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
24
25 # wait for data to become available
26 if not isinstance(socket, NpipeSocket):
27 select.select([socket], [], [])
28
29 try:
30 if hasattr(socket, 'recv'):
31 return socket.recv(n)
32 return os.read(socket.fileno(), n)
33 except EnvironmentError as e:
34 if e.errno not in recoverable_errors:
35 raise
36
37
38 def read_exactly(socket, n):
39 """
40 Reads exactly n bytes from socket
41 Raises SocketError if there isn't enough data
42 """
43 data = six.binary_type()
44 while len(data) < n:
45 next_data = read(socket, n - len(data))
46 if not next_data:
47 raise SocketError("Unexpected EOF")
48 data += next_data
49 return data
50
51
52 def next_frame_size(socket):
53 """
54 Returns the size of the next frame of data waiting to be read from socket,
55 according to the protocol defined here:
56
57 https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
58 """
59 try:
60 data = read_exactly(socket, 8)
61 except SocketError:
62 return 0
63
64 _, actual = struct.unpack('>BxxxL', data)
65 return actual
66
67
68 def frames_iter(socket):
69 """
70 Returns a generator of frames read from socket
71 """
72 n = next_frame_size(socket)
73 while n > 0:
74 yield read(socket, n)
75 n = next_frame_size(socket)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/utils/socket.py b/docker/utils/socket.py
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -69,7 +69,11 @@
"""
Returns a generator of frames read from socket
"""
- n = next_frame_size(socket)
- while n > 0:
- yield read(socket, n)
+ while True:
n = next_frame_size(socket)
+ if n == 0:
+ break
+ while n > 0:
+ result = read(socket, n)
+ n -= len(result)
+ yield result
| {"golden_diff": "diff --git a/docker/utils/socket.py b/docker/utils/socket.py\n--- a/docker/utils/socket.py\n+++ b/docker/utils/socket.py\n@@ -69,7 +69,11 @@\n \"\"\"\n Returns a generator of frames read from socket\n \"\"\"\n- n = next_frame_size(socket)\n- while n > 0:\n- yield read(socket, n)\n+ while True:\n n = next_frame_size(socket)\n+ if n == 0:\n+ break\n+ while n > 0:\n+ result = read(socket, n)\n+ n -= len(result)\n+ yield result\n", "issue": "attach is causing an \"Invalid Argument\" exception from os.read\n``` python\nstream = client.attach(container, stream=True, stdout=True, stderr=True)\nfor chunk in stream:\n pass\n```\n\nResults in:\n\n```\n File \"/Users/michael/work/oss/marina/marina/build.py\", line 695, in watcher\n for chunk in stream:\n File \".venv/lib/python3.5/site-packages/docker/utils/socket.py\", line 67, in frames_iter\n yield read(socket, n)\n File \".venv/lib/python3.5/site-packages/docker/utils/socket.py\", line 25, in read\n return os.read(socket.fileno(), n)\nOSError: [Errno 22] Invalid argument\n```\n\nUsing docker-py 1.10.2 on OS X 10.11.6 with docker for mac 1.12.0-rc3. Reverting to 1.9.0 fixes the issue.\n\n", "before_files": [{"content": "import errno\nimport os\nimport select\nimport struct\n\nimport six\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nclass SocketError(Exception):\n pass\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n # wait for data to become available\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n return os.read(socket.fileno(), n)\n except EnvironmentError as e:\n if e.errno not in recoverable_errors:\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = six.binary_type()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_size(socket):\n \"\"\"\n Returns the size of the next frame of data waiting to be read from socket,\n according to the protocol defined here:\n\n https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return 0\n\n _, actual = struct.unpack('>BxxxL', data)\n return actual\n\n\ndef frames_iter(socket):\n \"\"\"\n Returns a generator of frames read from socket\n \"\"\"\n n = next_frame_size(socket)\n while n > 0:\n yield read(socket, n)\n n = next_frame_size(socket)\n", "path": "docker/utils/socket.py"}], "after_files": [{"content": "import errno\nimport os\nimport select\nimport struct\n\nimport six\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nclass SocketError(Exception):\n pass\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n # wait for data to become available\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n return os.read(socket.fileno(), n)\n except EnvironmentError as e:\n if e.errno not in recoverable_errors:\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = six.binary_type()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_size(socket):\n \"\"\"\n Returns the size of the next frame of data waiting to be read from socket,\n according to the protocol defined here:\n\n https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return 0\n\n _, actual = struct.unpack('>BxxxL', data)\n return actual\n\n\ndef frames_iter(socket):\n \"\"\"\n Returns a generator of frames read from socket\n \"\"\"\n while True:\n n = next_frame_size(socket)\n if n == 0:\n break\n while n > 0:\n result = read(socket, n)\n n -= len(result)\n yield result\n", "path": "docker/utils/socket.py"}]} | 1,028 | 134 |
gh_patches_debug_2193 | rasdani/github-patches | git_diff | ansible-collections__community.general-6695 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
read_csv - Key 'Name' was not found in the CSV header fields
##### SUMMARY
The `read_csv` module fails to identify a field, yet displaces the field in the list of available fields.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
read_csv
##### ANSIBLE VERSION
```
ansible 2.9.10
config file = /home/anton/git/ansible-deploy-vmware-vm/ansible.cfg
configured module search path = ['/home/anton/git/ansible-deploy-vmware-vm/library']
ansible python module location = /home/anton/.local/lib/python3.6/site-packages/ansible
executable location = /home/anton/.local/bin/ansible
python version = 3.6.9 (default, Apr 18 2020, 01:56:04) [GCC 8.4.0]
```
##### CONFIGURATION
```
# config file for ansible -- http://ansible.com/
# ==============================================
# nearly all parameters can be overridden in ansible-playbook
# or with command line flags. ansible will read ANSIBLE_CONFIG,
# ansible.cfg in the current working directory, .ansible.cfg in
# the home directory or /etc/ansible/ansible.cfg, whichever it
# finds first
[defaults]
host_key_checking = False
host_key_check = False
ansible_python_interpreter=/usr/bin/python3
log_path = ./ansible.log
#bin_ansible_callbacks=True
#stdout_callback = debug
# some basic default values...
library = ./library
# additional paths to search for roles in, colon separated
roles_path = ./roles
[ssh_connection]
# ssh arguments to use
ssh_args = -o StrictHostKeyChecking=no
timeout=60
```
##### OS / ENVIRONMENT
Ubuntu 20:04
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
---
- name: Right-size VMs
gather_facts: false
hosts: all
connection: local
tasks:
# Read a CSV file and access the first item
- name: Read users from CSV file and return a list
read_csv:
path: "files/vms/6-19-20 Optimization Report - Oversized Virtual Machines Prod2.csv"
key: Name
register: users
- debug:
msg: 'User {{ users.list.2.Name}}'
# msg: 'User {{ users.list.2.Name}} has UID {{ users.list.2.ReclaimablevCPUs}} and GID {{ users.list.2.ReclaimableMemory}}'
# msg: "{{ users }}"
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
Expect to be able to read CSV values by col name (field) as based on module documentation.
##### ACTUAL RESULTS
```
fatal: [localhost]: FAILED! => {"ansible_facts": {"discovered_interpreter_python": "/usr/bin/python"}, "changed": false, "msg": "Key 'Name' was not found in the CSV header fields: Name, Configured-vCPU, ReclaimablevCPUs, ConfiguredMemory, ReclaimableMemory, ParentvCenter"}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/module_utils/csv.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <[email protected]>
4 # Copyright (c) 2018, Dag Wieers (@dagwieers) <[email protected]>
5 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
6 # SPDX-License-Identifier: GPL-3.0-or-later
7
8 from __future__ import absolute_import, division, print_function
9 __metaclass__ = type
10
11 import csv
12 from io import BytesIO, StringIO
13
14 from ansible.module_utils.common.text.converters import to_native
15 from ansible.module_utils.six import PY3
16
17
18 class CustomDialectFailureError(Exception):
19 pass
20
21
22 class DialectNotAvailableError(Exception):
23 pass
24
25
26 CSVError = csv.Error
27
28
29 def initialize_dialect(dialect, **kwargs):
30 # Add Unix dialect from Python 3
31 class unix_dialect(csv.Dialect):
32 """Describe the usual properties of Unix-generated CSV files."""
33 delimiter = ','
34 quotechar = '"'
35 doublequote = True
36 skipinitialspace = False
37 lineterminator = '\n'
38 quoting = csv.QUOTE_ALL
39
40 csv.register_dialect("unix", unix_dialect)
41
42 if dialect not in csv.list_dialects():
43 raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect)
44
45 # Create a dictionary from only set options
46 dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)
47 if dialect_params:
48 try:
49 csv.register_dialect('custom', dialect, **dialect_params)
50 except TypeError as e:
51 raise CustomDialectFailureError("Unable to create custom dialect: %s" % to_native(e))
52 dialect = 'custom'
53
54 return dialect
55
56
57 def read_csv(data, dialect, fieldnames=None):
58
59 data = to_native(data, errors='surrogate_or_strict')
60
61 if PY3:
62 fake_fh = StringIO(data)
63 else:
64 fake_fh = BytesIO(data)
65
66 reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
67
68 return reader
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py
--- a/plugins/module_utils/csv.py
+++ b/plugins/module_utils/csv.py
@@ -55,8 +55,10 @@
def read_csv(data, dialect, fieldnames=None):
-
+ BOM = to_native(u'\ufeff')
data = to_native(data, errors='surrogate_or_strict')
+ if data.startswith(BOM):
+ data = data[len(BOM):]
if PY3:
fake_fh = StringIO(data)
| {"golden_diff": "diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py\n--- a/plugins/module_utils/csv.py\n+++ b/plugins/module_utils/csv.py\n@@ -55,8 +55,10 @@\n \n \n def read_csv(data, dialect, fieldnames=None):\n-\n+ BOM = to_native(u'\\ufeff')\n data = to_native(data, errors='surrogate_or_strict')\n+ if data.startswith(BOM):\n+ data = data[len(BOM):]\n \n if PY3:\n fake_fh = StringIO(data)\n", "issue": "read_csv - Key 'Name' was not found in the CSV header fields\n##### SUMMARY\r\nThe `read_csv` module fails to identify a field, yet displaces the field in the list of available fields.\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\nread_csv\r\n\r\n##### ANSIBLE VERSION\r\n\r\n```\r\nansible 2.9.10\r\n config file = /home/anton/git/ansible-deploy-vmware-vm/ansible.cfg\r\n configured module search path = ['/home/anton/git/ansible-deploy-vmware-vm/library']\r\n ansible python module location = /home/anton/.local/lib/python3.6/site-packages/ansible\r\n executable location = /home/anton/.local/bin/ansible\r\n python version = 3.6.9 (default, Apr 18 2020, 01:56:04) [GCC 8.4.0]\r\n\r\n```\r\n\r\n##### CONFIGURATION\r\n\r\n```\r\n# config file for ansible -- http://ansible.com/\r\n# ==============================================\r\n\r\n# nearly all parameters can be overridden in ansible-playbook\r\n# or with command line flags. ansible will read ANSIBLE_CONFIG,\r\n# ansible.cfg in the current working directory, .ansible.cfg in\r\n# the home directory or /etc/ansible/ansible.cfg, whichever it\r\n# finds first\r\n\r\n[defaults]\r\nhost_key_checking = False\r\nhost_key_check = False\r\nansible_python_interpreter=/usr/bin/python3\r\nlog_path = ./ansible.log\r\n#bin_ansible_callbacks=True\r\n#stdout_callback = debug\r\n\r\n\r\n# some basic default values...\r\nlibrary = ./library\r\n\r\n# additional paths to search for roles in, colon separated\r\nroles_path = ./roles\r\n\r\n[ssh_connection]\r\n# ssh arguments to use\r\nssh_args = -o StrictHostKeyChecking=no\r\ntimeout=60\r\n\r\n\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\nUbuntu 20:04\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```\r\n---\r\n- name: Right-size VMs\r\n gather_facts: false\r\n hosts: all\r\n connection: local\r\n tasks:\r\n # Read a CSV file and access the first item\r\n - name: Read users from CSV file and return a list\r\n read_csv:\r\n path: \"files/vms/6-19-20 Optimization Report - Oversized Virtual Machines Prod2.csv\"\r\n key: Name\r\n register: users\r\n\r\n - debug:\r\n msg: 'User {{ users.list.2.Name}}'\r\n # msg: 'User {{ users.list.2.Name}} has UID {{ users.list.2.ReclaimablevCPUs}} and GID {{ users.list.2.ReclaimableMemory}}'\r\n # msg: \"{{ users }}\"\r\n\r\n\r\n\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\nExpect to be able to read CSV values by col name (field) as based on module documentation.\r\n\r\n\r\n##### ACTUAL RESULTS\r\n```\r\nfatal: [localhost]: FAILED! => {\"ansible_facts\": {\"discovered_interpreter_python\": \"/usr/bin/python\"}, \"changed\": false, \"msg\": \"Key 'Name' was not found in the CSV header fields: \ufeffName, Configured-vCPU, ReclaimablevCPUs, ConfiguredMemory, ReclaimableMemory, ParentvCenter\"}\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <[email protected]>\n# Copyright (c) 2018, Dag Wieers (@dagwieers) <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport csv\nfrom io import BytesIO, StringIO\n\nfrom ansible.module_utils.common.text.converters import to_native\nfrom ansible.module_utils.six import PY3\n\n\nclass CustomDialectFailureError(Exception):\n pass\n\n\nclass DialectNotAvailableError(Exception):\n pass\n\n\nCSVError = csv.Error\n\n\ndef initialize_dialect(dialect, **kwargs):\n # Add Unix dialect from Python 3\n class unix_dialect(csv.Dialect):\n \"\"\"Describe the usual properties of Unix-generated CSV files.\"\"\"\n delimiter = ','\n quotechar = '\"'\n doublequote = True\n skipinitialspace = False\n lineterminator = '\\n'\n quoting = csv.QUOTE_ALL\n\n csv.register_dialect(\"unix\", unix_dialect)\n\n if dialect not in csv.list_dialects():\n raise DialectNotAvailableError(\"Dialect '%s' is not supported by your version of python.\" % dialect)\n\n # Create a dictionary from only set options\n dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)\n if dialect_params:\n try:\n csv.register_dialect('custom', dialect, **dialect_params)\n except TypeError as e:\n raise CustomDialectFailureError(\"Unable to create custom dialect: %s\" % to_native(e))\n dialect = 'custom'\n\n return dialect\n\n\ndef read_csv(data, dialect, fieldnames=None):\n\n data = to_native(data, errors='surrogate_or_strict')\n\n if PY3:\n fake_fh = StringIO(data)\n else:\n fake_fh = BytesIO(data)\n\n reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)\n\n return reader\n", "path": "plugins/module_utils/csv.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <[email protected]>\n# Copyright (c) 2018, Dag Wieers (@dagwieers) <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport csv\nfrom io import BytesIO, StringIO\n\nfrom ansible.module_utils.common.text.converters import to_native\nfrom ansible.module_utils.six import PY3\n\n\nclass CustomDialectFailureError(Exception):\n pass\n\n\nclass DialectNotAvailableError(Exception):\n pass\n\n\nCSVError = csv.Error\n\n\ndef initialize_dialect(dialect, **kwargs):\n # Add Unix dialect from Python 3\n class unix_dialect(csv.Dialect):\n \"\"\"Describe the usual properties of Unix-generated CSV files.\"\"\"\n delimiter = ','\n quotechar = '\"'\n doublequote = True\n skipinitialspace = False\n lineterminator = '\\n'\n quoting = csv.QUOTE_ALL\n\n csv.register_dialect(\"unix\", unix_dialect)\n\n if dialect not in csv.list_dialects():\n raise DialectNotAvailableError(\"Dialect '%s' is not supported by your version of python.\" % dialect)\n\n # Create a dictionary from only set options\n dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)\n if dialect_params:\n try:\n csv.register_dialect('custom', dialect, **dialect_params)\n except TypeError as e:\n raise CustomDialectFailureError(\"Unable to create custom dialect: %s\" % to_native(e))\n dialect = 'custom'\n\n return dialect\n\n\ndef read_csv(data, dialect, fieldnames=None):\n BOM = to_native(u'\\ufeff')\n data = to_native(data, errors='surrogate_or_strict')\n if data.startswith(BOM):\n data = data[len(BOM):]\n\n if PY3:\n fake_fh = StringIO(data)\n else:\n fake_fh = BytesIO(data)\n\n reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)\n\n return reader\n", "path": "plugins/module_utils/csv.py"}]} | 1,631 | 116 |
gh_patches_debug_11630 | rasdani/github-patches | git_diff | mozilla__bugbug-407 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not all training tasks need commits DB
Indeed I think none of the ones we currently run as part of the data pipeline need the commits.
We should:
- Make the trainer script only download the DBs which are necessary;
- Remove the dependency on the commit retrieval task in the data-pipeline.yml.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/trainer.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import argparse
4 import lzma
5 import os
6 import shutil
7 from logging import INFO, basicConfig, getLogger
8 from urllib.request import urlretrieve
9
10 from bugbug.models.component import ComponentModel
11 from bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel
12 from bugbug.models.regression import RegressionModel
13 from bugbug.models.tracking import TrackingModel
14
15 basicConfig(level=INFO)
16 logger = getLogger(__name__)
17
18 BASE_URL = "https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.latest/artifacts/public"
19
20
21 class Trainer(object):
22 def decompress_file(self, path):
23 with lzma.open(f"{path}.xz", "rb") as input_f:
24 with open(path, "wb") as output_f:
25 shutil.copyfileobj(input_f, output_f)
26
27 def compress_file(self, path):
28 with open(path, "rb") as input_f:
29 with lzma.open(f"{path}.xz", "wb") as output_f:
30 shutil.copyfileobj(input_f, output_f)
31
32 def train_defect_enhancement_task(self):
33 logger.info("Training *defect vs enhancement vs task* model")
34 model = DefectEnhancementTaskModel()
35 model.train()
36 self.compress_file("defectenhancementtaskmodel")
37
38 def train_component(self):
39 logger.info("Training *component* model")
40 model = ComponentModel()
41 model.train()
42 self.compress_file("componentmodel")
43
44 def train_regression(self):
45 logger.info("Training *regression vs non-regression* model")
46 model = RegressionModel()
47 model.train()
48 self.compress_file("regressionmodel")
49
50 def train_tracking(self):
51 logger.info("Training *tracking* model")
52 model = TrackingModel()
53 model.train()
54 self.compress_file("trackingmodel")
55
56 def go(self, model):
57 # TODO: Stop hard-coding them
58 valid_models = ["defect", "component", "regression", "tracking"]
59
60 if model not in valid_models:
61 exception = (
62 f"Invalid model {model!r} name, use one of {valid_models!r} instead"
63 )
64 raise ValueError(exception)
65
66 # Download datasets that were built by bugbug_data.
67 os.makedirs("data", exist_ok=True)
68
69 # Bugs.json
70 logger.info("Downloading bugs database")
71 bugs_url = BASE_URL.format("bugs")
72 urlretrieve(f"{bugs_url}/bugs.json.xz", "data/bugs.json.xz")
73 logger.info("Decompressing bugs database")
74 self.decompress_file("data/bugs.json")
75
76 # Commits.json
77 logger.info("Downloading commits database")
78 commits_url = BASE_URL.format("commits")
79 urlretrieve(f"{commits_url}/commits.json.xz", "data/commits.json.xz")
80 logger.info("Decompressing commits database")
81 self.decompress_file("data/commits.json")
82
83 if model == "defect":
84 # Train classifier for defect-vs-enhancement-vs-task.
85 self.train_defect_enhancement_task()
86 elif model == "component":
87 # Train classifier for the component of a bug.
88 self.train_component()
89 elif model == "regression":
90 # Train classifier for regression-vs-nonregression.
91 self.train_regression()
92 elif model == "tracking":
93 # Train classifier for tracking bugs.
94 self.train_tracking()
95 else:
96 # We shouldn't be here
97 raise Exception("valid_models is likely not up-to-date anymore")
98
99
100 def main():
101 description = "Train the models"
102 parser = argparse.ArgumentParser(description=description)
103
104 parser.add_argument("model", help="Which model to train.")
105
106 args = parser.parse_args()
107
108 retriever = Trainer()
109 retriever.go(args.model)
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/trainer.py b/scripts/trainer.py
--- a/scripts/trainer.py
+++ b/scripts/trainer.py
@@ -73,13 +73,6 @@
logger.info("Decompressing bugs database")
self.decompress_file("data/bugs.json")
- # Commits.json
- logger.info("Downloading commits database")
- commits_url = BASE_URL.format("commits")
- urlretrieve(f"{commits_url}/commits.json.xz", "data/commits.json.xz")
- logger.info("Decompressing commits database")
- self.decompress_file("data/commits.json")
-
if model == "defect":
# Train classifier for defect-vs-enhancement-vs-task.
self.train_defect_enhancement_task()
| {"golden_diff": "diff --git a/scripts/trainer.py b/scripts/trainer.py\n--- a/scripts/trainer.py\n+++ b/scripts/trainer.py\n@@ -73,13 +73,6 @@\n logger.info(\"Decompressing bugs database\")\n self.decompress_file(\"data/bugs.json\")\n \n- # Commits.json\n- logger.info(\"Downloading commits database\")\n- commits_url = BASE_URL.format(\"commits\")\n- urlretrieve(f\"{commits_url}/commits.json.xz\", \"data/commits.json.xz\")\n- logger.info(\"Decompressing commits database\")\n- self.decompress_file(\"data/commits.json\")\n-\n if model == \"defect\":\n # Train classifier for defect-vs-enhancement-vs-task.\n self.train_defect_enhancement_task()\n", "issue": "Not all training tasks need commits DB\nIndeed I think none of the ones we currently run as part of the data pipeline need the commits.\r\nWe should:\r\n- Make the trainer script only download the DBs which are necessary;\r\n- Remove the dependency on the commit retrieval task in the data-pipeline.yml.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport lzma\nimport os\nimport shutil\nfrom logging import INFO, basicConfig, getLogger\nfrom urllib.request import urlretrieve\n\nfrom bugbug.models.component import ComponentModel\nfrom bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel\nfrom bugbug.models.regression import RegressionModel\nfrom bugbug.models.tracking import TrackingModel\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\nBASE_URL = \"https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.latest/artifacts/public\"\n\n\nclass Trainer(object):\n def decompress_file(self, path):\n with lzma.open(f\"{path}.xz\", \"rb\") as input_f:\n with open(path, \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def compress_file(self, path):\n with open(path, \"rb\") as input_f:\n with lzma.open(f\"{path}.xz\", \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def train_defect_enhancement_task(self):\n logger.info(\"Training *defect vs enhancement vs task* model\")\n model = DefectEnhancementTaskModel()\n model.train()\n self.compress_file(\"defectenhancementtaskmodel\")\n\n def train_component(self):\n logger.info(\"Training *component* model\")\n model = ComponentModel()\n model.train()\n self.compress_file(\"componentmodel\")\n\n def train_regression(self):\n logger.info(\"Training *regression vs non-regression* model\")\n model = RegressionModel()\n model.train()\n self.compress_file(\"regressionmodel\")\n\n def train_tracking(self):\n logger.info(\"Training *tracking* model\")\n model = TrackingModel()\n model.train()\n self.compress_file(\"trackingmodel\")\n\n def go(self, model):\n # TODO: Stop hard-coding them\n valid_models = [\"defect\", \"component\", \"regression\", \"tracking\"]\n\n if model not in valid_models:\n exception = (\n f\"Invalid model {model!r} name, use one of {valid_models!r} instead\"\n )\n raise ValueError(exception)\n\n # Download datasets that were built by bugbug_data.\n os.makedirs(\"data\", exist_ok=True)\n\n # Bugs.json\n logger.info(\"Downloading bugs database\")\n bugs_url = BASE_URL.format(\"bugs\")\n urlretrieve(f\"{bugs_url}/bugs.json.xz\", \"data/bugs.json.xz\")\n logger.info(\"Decompressing bugs database\")\n self.decompress_file(\"data/bugs.json\")\n\n # Commits.json\n logger.info(\"Downloading commits database\")\n commits_url = BASE_URL.format(\"commits\")\n urlretrieve(f\"{commits_url}/commits.json.xz\", \"data/commits.json.xz\")\n logger.info(\"Decompressing commits database\")\n self.decompress_file(\"data/commits.json\")\n\n if model == \"defect\":\n # Train classifier for defect-vs-enhancement-vs-task.\n self.train_defect_enhancement_task()\n elif model == \"component\":\n # Train classifier for the component of a bug.\n self.train_component()\n elif model == \"regression\":\n # Train classifier for regression-vs-nonregression.\n self.train_regression()\n elif model == \"tracking\":\n # Train classifier for tracking bugs.\n self.train_tracking()\n else:\n # We shouldn't be here\n raise Exception(\"valid_models is likely not up-to-date anymore\")\n\n\ndef main():\n description = \"Train the models\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to train.\")\n\n args = parser.parse_args()\n\n retriever = Trainer()\n retriever.go(args.model)\n", "path": "scripts/trainer.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport lzma\nimport os\nimport shutil\nfrom logging import INFO, basicConfig, getLogger\nfrom urllib.request import urlretrieve\n\nfrom bugbug.models.component import ComponentModel\nfrom bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel\nfrom bugbug.models.regression import RegressionModel\nfrom bugbug.models.tracking import TrackingModel\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\nBASE_URL = \"https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.latest/artifacts/public\"\n\n\nclass Trainer(object):\n def decompress_file(self, path):\n with lzma.open(f\"{path}.xz\", \"rb\") as input_f:\n with open(path, \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def compress_file(self, path):\n with open(path, \"rb\") as input_f:\n with lzma.open(f\"{path}.xz\", \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def train_defect_enhancement_task(self):\n logger.info(\"Training *defect vs enhancement vs task* model\")\n model = DefectEnhancementTaskModel()\n model.train()\n self.compress_file(\"defectenhancementtaskmodel\")\n\n def train_component(self):\n logger.info(\"Training *component* model\")\n model = ComponentModel()\n model.train()\n self.compress_file(\"componentmodel\")\n\n def train_regression(self):\n logger.info(\"Training *regression vs non-regression* model\")\n model = RegressionModel()\n model.train()\n self.compress_file(\"regressionmodel\")\n\n def train_tracking(self):\n logger.info(\"Training *tracking* model\")\n model = TrackingModel()\n model.train()\n self.compress_file(\"trackingmodel\")\n\n def go(self, model):\n # TODO: Stop hard-coding them\n valid_models = [\"defect\", \"component\", \"regression\", \"tracking\"]\n\n if model not in valid_models:\n exception = (\n f\"Invalid model {model!r} name, use one of {valid_models!r} instead\"\n )\n raise ValueError(exception)\n\n # Download datasets that were built by bugbug_data.\n os.makedirs(\"data\", exist_ok=True)\n\n # Bugs.json\n logger.info(\"Downloading bugs database\")\n bugs_url = BASE_URL.format(\"bugs\")\n urlretrieve(f\"{bugs_url}/bugs.json.xz\", \"data/bugs.json.xz\")\n logger.info(\"Decompressing bugs database\")\n self.decompress_file(\"data/bugs.json\")\n\n if model == \"defect\":\n # Train classifier for defect-vs-enhancement-vs-task.\n self.train_defect_enhancement_task()\n elif model == \"component\":\n # Train classifier for the component of a bug.\n self.train_component()\n elif model == \"regression\":\n # Train classifier for regression-vs-nonregression.\n self.train_regression()\n elif model == \"tracking\":\n # Train classifier for tracking bugs.\n self.train_tracking()\n else:\n # We shouldn't be here\n raise Exception(\"valid_models is likely not up-to-date anymore\")\n\n\ndef main():\n description = \"Train the models\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to train.\")\n\n args = parser.parse_args()\n\n retriever = Trainer()\n retriever.go(args.model)\n", "path": "scripts/trainer.py"}]} | 1,363 | 173 |
gh_patches_debug_10182 | rasdani/github-patches | git_diff | getredash__redash-998 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Embed query description appearing larger than vizualization name
The query description is appearing larger then the visualization name:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/embed.py`
Content:
```
1 import json
2
3 from funcy import project
4 from flask import render_template, request
5 from flask_login import login_required, current_user
6 from flask_restful import abort
7
8 from redash import models, settings
9 from redash import serializers
10 from redash.utils import json_dumps
11 from redash.handlers import routes
12 from redash.handlers.base import org_scoped_rule
13 from redash.permissions import require_access, view_only
14 from authentication import current_org
15
16
17 @routes.route(org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])
18 @login_required
19 def embed(query_id, visualization_id, org_slug=None):
20 # TODO: add event for embed access
21 query = models.Query.get_by_id_and_org(query_id, current_org)
22 require_access(query.groups, current_user, view_only)
23 vis = query.visualizations.where(models.Visualization.id == visualization_id).first()
24 qr = {}
25
26 if vis is not None:
27 vis = vis.to_dict()
28 qr = query.latest_query_data
29 if qr is None:
30 abort(400, message="No Results for this query")
31 else:
32 qr = qr.to_dict()
33 else:
34 abort(404, message="Visualization not found.")
35
36 client_config = {}
37 client_config.update(settings.COMMON_CLIENT_CONFIG)
38
39 qr = project(qr, ('data', 'id', 'retrieved_at'))
40 vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))
41 vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))
42
43 return render_template("embed.html",
44
45 client_config=json_dumps(client_config),
46 visualization=json_dumps(vis),
47 query_result=json_dumps(qr))
48
49
50 @routes.route(org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])
51 @login_required
52 def public_dashboard(token, org_slug=None):
53 # TODO: verify object is a dashboard?
54 if not isinstance(current_user, models.ApiUser):
55 api_key = models.ApiKey.get_by_api_key(token)
56 dashboard = api_key.object
57 else:
58 dashboard = current_user.object
59
60 user = {
61 'permissions': [],
62 'apiKey': current_user.id
63 }
64
65 headers = {
66 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'
67 }
68
69 response = render_template("public.html",
70 headless='embed' in request.args,
71 user=json.dumps(user),
72 seed_data=json_dumps({
73 'dashboard': serializers.public_dashboard(dashboard)
74 }),
75 client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))
76
77 return response, 200, headers
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/handlers/embed.py b/redash/handlers/embed.py
--- a/redash/handlers/embed.py
+++ b/redash/handlers/embed.py
@@ -41,7 +41,6 @@
vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))
return render_template("embed.html",
-
client_config=json_dumps(client_config),
visualization=json_dumps(vis),
query_result=json_dumps(qr))
| {"golden_diff": "diff --git a/redash/handlers/embed.py b/redash/handlers/embed.py\n--- a/redash/handlers/embed.py\n+++ b/redash/handlers/embed.py\n@@ -41,7 +41,6 @@\n vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))\n \n return render_template(\"embed.html\",\n-\n client_config=json_dumps(client_config),\n visualization=json_dumps(vis),\n query_result=json_dumps(qr))\n", "issue": "Embed query description appearing larger than vizualization name\nThe query description is appearing larger then the visualization name:\n\n\n\n", "before_files": [{"content": "import json\n\nfrom funcy import project\nfrom flask import render_template, request\nfrom flask_login import login_required, current_user\nfrom flask_restful import abort\n\nfrom redash import models, settings\nfrom redash import serializers\nfrom redash.utils import json_dumps\nfrom redash.handlers import routes\nfrom redash.handlers.base import org_scoped_rule\nfrom redash.permissions import require_access, view_only\nfrom authentication import current_org\n\n\[email protected](org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])\n@login_required\ndef embed(query_id, visualization_id, org_slug=None):\n # TODO: add event for embed access\n query = models.Query.get_by_id_and_org(query_id, current_org)\n require_access(query.groups, current_user, view_only)\n vis = query.visualizations.where(models.Visualization.id == visualization_id).first()\n qr = {}\n\n if vis is not None:\n vis = vis.to_dict()\n qr = query.latest_query_data\n if qr is None:\n abort(400, message=\"No Results for this query\")\n else:\n qr = qr.to_dict()\n else:\n abort(404, message=\"Visualization not found.\")\n\n client_config = {}\n client_config.update(settings.COMMON_CLIENT_CONFIG)\n\n qr = project(qr, ('data', 'id', 'retrieved_at'))\n vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))\n vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))\n\n return render_template(\"embed.html\",\n\n client_config=json_dumps(client_config),\n visualization=json_dumps(vis),\n query_result=json_dumps(qr))\n\n\[email protected](org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])\n@login_required\ndef public_dashboard(token, org_slug=None):\n # TODO: verify object is a dashboard?\n if not isinstance(current_user, models.ApiUser):\n api_key = models.ApiKey.get_by_api_key(token)\n dashboard = api_key.object\n else:\n dashboard = current_user.object\n\n user = {\n 'permissions': [],\n 'apiKey': current_user.id\n }\n\n headers = {\n 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'\n }\n\n response = render_template(\"public.html\",\n headless='embed' in request.args,\n user=json.dumps(user),\n seed_data=json_dumps({\n 'dashboard': serializers.public_dashboard(dashboard)\n }),\n client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))\n\n return response, 200, headers\n", "path": "redash/handlers/embed.py"}], "after_files": [{"content": "import json\n\nfrom funcy import project\nfrom flask import render_template, request\nfrom flask_login import login_required, current_user\nfrom flask_restful import abort\n\nfrom redash import models, settings\nfrom redash import serializers\nfrom redash.utils import json_dumps\nfrom redash.handlers import routes\nfrom redash.handlers.base import org_scoped_rule\nfrom redash.permissions import require_access, view_only\nfrom authentication import current_org\n\n\[email protected](org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])\n@login_required\ndef embed(query_id, visualization_id, org_slug=None):\n # TODO: add event for embed access\n query = models.Query.get_by_id_and_org(query_id, current_org)\n require_access(query.groups, current_user, view_only)\n vis = query.visualizations.where(models.Visualization.id == visualization_id).first()\n qr = {}\n\n if vis is not None:\n vis = vis.to_dict()\n qr = query.latest_query_data\n if qr is None:\n abort(400, message=\"No Results for this query\")\n else:\n qr = qr.to_dict()\n else:\n abort(404, message=\"Visualization not found.\")\n\n client_config = {}\n client_config.update(settings.COMMON_CLIENT_CONFIG)\n\n qr = project(qr, ('data', 'id', 'retrieved_at'))\n vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))\n vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))\n\n return render_template(\"embed.html\",\n client_config=json_dumps(client_config),\n visualization=json_dumps(vis),\n query_result=json_dumps(qr))\n\n\[email protected](org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])\n@login_required\ndef public_dashboard(token, org_slug=None):\n # TODO: verify object is a dashboard?\n if not isinstance(current_user, models.ApiUser):\n api_key = models.ApiKey.get_by_api_key(token)\n dashboard = api_key.object\n else:\n dashboard = current_user.object\n\n user = {\n 'permissions': [],\n 'apiKey': current_user.id\n }\n\n headers = {\n 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'\n }\n\n response = render_template(\"public.html\",\n headless='embed' in request.args,\n user=json.dumps(user),\n seed_data=json_dumps({\n 'dashboard': serializers.public_dashboard(dashboard)\n }),\n client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))\n\n return response, 200, headers\n", "path": "redash/handlers/embed.py"}]} | 1,097 | 124 |
gh_patches_debug_12620 | rasdani/github-patches | git_diff | kivy__kivy-5187 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kivy breaks Clipboard
### Versions
* Python: 2.7.12
* OS: Windows 10
* Kivy: 1.9.2-dev0
* Kivy installation method: wheel
### Description
When pasting some data into a `TextInput`, the clipboard breaks across the system, and copying and pasting is not possible until the Kivy app is terminated. Specifically, I found the following steps to reproduce the problem:
1. Try copying a file into the `TextInput` box (nothing will paste in as expected)
2. Try copying some text somewhere else (does not have to be in the `TextInput`)
After step 1, nothing is copied or pasted and the Kivy application must be terminated before the clipboard starts working again.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/core/clipboard/clipboard_winctypes.py`
Content:
```
1 '''
2 Clipboard windows: an implementation of the Clipboard using ctypes.
3 '''
4
5 __all__ = ('ClipboardWindows', )
6
7 from kivy.utils import platform
8 from kivy.core.clipboard import ClipboardBase
9
10 if platform != 'win':
11 raise SystemError('unsupported platform for Windows clipboard')
12
13 import ctypes
14 from ctypes import wintypes
15 user32 = ctypes.windll.user32
16 kernel32 = ctypes.windll.kernel32
17 msvcrt = ctypes.cdll.msvcrt
18 c_char_p = ctypes.c_char_p
19 c_wchar_p = ctypes.c_wchar_p
20
21
22 class ClipboardWindows(ClipboardBase):
23
24 def get(self, mimetype='text/plain'):
25 GetClipboardData = user32.GetClipboardData
26 GetClipboardData.argtypes = [wintypes.UINT]
27 GetClipboardData.restype = wintypes.HANDLE
28
29 user32.OpenClipboard(user32.GetActiveWindow())
30 # 1 is CF_TEXT
31 pcontents = GetClipboardData(13)
32 if not pcontents:
33 return ''
34 data = c_wchar_p(pcontents).value.encode(self._encoding)
35 user32.CloseClipboard()
36 return data
37
38 def put(self, text, mimetype='text/plain'):
39 text = text.decode(self._encoding) # auto converted later
40 text += u'\x00'
41
42 SetClipboardData = user32.SetClipboardData
43 SetClipboardData.argtypes = [wintypes.UINT, wintypes.HANDLE]
44 SetClipboardData.restype = wintypes.HANDLE
45
46 GlobalAlloc = kernel32.GlobalAlloc
47 GlobalAlloc.argtypes = [wintypes.UINT, ctypes.c_size_t]
48 GlobalAlloc.restype = wintypes.HGLOBAL
49
50 CF_UNICODETEXT = 13
51
52 user32.OpenClipboard(user32.GetActiveWindow())
53 user32.EmptyClipboard()
54 hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))
55 msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))
56 SetClipboardData(CF_UNICODETEXT, hCd)
57 user32.CloseClipboard()
58
59 def get_types(self):
60 return ['text/plain']
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/core/clipboard/clipboard_winctypes.py b/kivy/core/clipboard/clipboard_winctypes.py
--- a/kivy/core/clipboard/clipboard_winctypes.py
+++ b/kivy/core/clipboard/clipboard_winctypes.py
@@ -27,9 +27,13 @@
GetClipboardData.restype = wintypes.HANDLE
user32.OpenClipboard(user32.GetActiveWindow())
- # 1 is CF_TEXT
+ # Standard Clipboard Format "1" is "CF_TEXT"
pcontents = GetClipboardData(13)
+
+ # if someone pastes a FILE, the content is None for SCF 13
+ # and the clipboard is locked if not closed properly
if not pcontents:
+ user32.CloseClipboard()
return ''
data = c_wchar_p(pcontents).value.encode(self._encoding)
user32.CloseClipboard()
| {"golden_diff": "diff --git a/kivy/core/clipboard/clipboard_winctypes.py b/kivy/core/clipboard/clipboard_winctypes.py\n--- a/kivy/core/clipboard/clipboard_winctypes.py\n+++ b/kivy/core/clipboard/clipboard_winctypes.py\n@@ -27,9 +27,13 @@\n GetClipboardData.restype = wintypes.HANDLE\n \n user32.OpenClipboard(user32.GetActiveWindow())\n- # 1 is CF_TEXT\n+ # Standard Clipboard Format \"1\" is \"CF_TEXT\"\n pcontents = GetClipboardData(13)\n+\n+ # if someone pastes a FILE, the content is None for SCF 13\n+ # and the clipboard is locked if not closed properly\n if not pcontents:\n+ user32.CloseClipboard()\n return ''\n data = c_wchar_p(pcontents).value.encode(self._encoding)\n user32.CloseClipboard()\n", "issue": "Kivy breaks Clipboard\n### Versions\r\n\r\n* Python: 2.7.12\r\n* OS: Windows 10\r\n* Kivy: 1.9.2-dev0\r\n* Kivy installation method: wheel\r\n\r\n### Description\r\n\r\nWhen pasting some data into a `TextInput`, the clipboard breaks across the system, and copying and pasting is not possible until the Kivy app is terminated. Specifically, I found the following steps to reproduce the problem:\r\n1. Try copying a file into the `TextInput` box (nothing will paste in as expected)\r\n2. Try copying some text somewhere else (does not have to be in the `TextInput`)\r\n\r\nAfter step 1, nothing is copied or pasted and the Kivy application must be terminated before the clipboard starts working again.\n", "before_files": [{"content": "'''\nClipboard windows: an implementation of the Clipboard using ctypes.\n'''\n\n__all__ = ('ClipboardWindows', )\n\nfrom kivy.utils import platform\nfrom kivy.core.clipboard import ClipboardBase\n\nif platform != 'win':\n raise SystemError('unsupported platform for Windows clipboard')\n\nimport ctypes\nfrom ctypes import wintypes\nuser32 = ctypes.windll.user32\nkernel32 = ctypes.windll.kernel32\nmsvcrt = ctypes.cdll.msvcrt\nc_char_p = ctypes.c_char_p\nc_wchar_p = ctypes.c_wchar_p\n\n\nclass ClipboardWindows(ClipboardBase):\n\n def get(self, mimetype='text/plain'):\n GetClipboardData = user32.GetClipboardData\n GetClipboardData.argtypes = [wintypes.UINT]\n GetClipboardData.restype = wintypes.HANDLE\n\n user32.OpenClipboard(user32.GetActiveWindow())\n # 1 is CF_TEXT\n pcontents = GetClipboardData(13)\n if not pcontents:\n return ''\n data = c_wchar_p(pcontents).value.encode(self._encoding)\n user32.CloseClipboard()\n return data\n\n def put(self, text, mimetype='text/plain'):\n text = text.decode(self._encoding) # auto converted later\n text += u'\\x00'\n\n SetClipboardData = user32.SetClipboardData\n SetClipboardData.argtypes = [wintypes.UINT, wintypes.HANDLE]\n SetClipboardData.restype = wintypes.HANDLE\n\n GlobalAlloc = kernel32.GlobalAlloc\n GlobalAlloc.argtypes = [wintypes.UINT, ctypes.c_size_t]\n GlobalAlloc.restype = wintypes.HGLOBAL\n\n CF_UNICODETEXT = 13\n\n user32.OpenClipboard(user32.GetActiveWindow())\n user32.EmptyClipboard()\n hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))\n msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))\n SetClipboardData(CF_UNICODETEXT, hCd)\n user32.CloseClipboard()\n\n def get_types(self):\n return ['text/plain']\n", "path": "kivy/core/clipboard/clipboard_winctypes.py"}], "after_files": [{"content": "'''\nClipboard windows: an implementation of the Clipboard using ctypes.\n'''\n\n__all__ = ('ClipboardWindows', )\n\nfrom kivy.utils import platform\nfrom kivy.core.clipboard import ClipboardBase\n\nif platform != 'win':\n raise SystemError('unsupported platform for Windows clipboard')\n\nimport ctypes\nfrom ctypes import wintypes\nuser32 = ctypes.windll.user32\nkernel32 = ctypes.windll.kernel32\nmsvcrt = ctypes.cdll.msvcrt\nc_char_p = ctypes.c_char_p\nc_wchar_p = ctypes.c_wchar_p\n\n\nclass ClipboardWindows(ClipboardBase):\n\n def get(self, mimetype='text/plain'):\n GetClipboardData = user32.GetClipboardData\n GetClipboardData.argtypes = [wintypes.UINT]\n GetClipboardData.restype = wintypes.HANDLE\n\n user32.OpenClipboard(user32.GetActiveWindow())\n # Standard Clipboard Format \"1\" is \"CF_TEXT\"\n pcontents = GetClipboardData(13)\n\n # if someone pastes a FILE, the content is None for SCF 13\n # and the clipboard is locked if not closed properly\n if not pcontents:\n user32.CloseClipboard()\n return ''\n data = c_wchar_p(pcontents).value.encode(self._encoding)\n user32.CloseClipboard()\n return data\n\n def put(self, text, mimetype='text/plain'):\n text = text.decode(self._encoding) # auto converted later\n text += u'\\x00'\n\n SetClipboardData = user32.SetClipboardData\n SetClipboardData.argtypes = [wintypes.UINT, wintypes.HANDLE]\n SetClipboardData.restype = wintypes.HANDLE\n\n GlobalAlloc = kernel32.GlobalAlloc\n GlobalAlloc.argtypes = [wintypes.UINT, ctypes.c_size_t]\n GlobalAlloc.restype = wintypes.HGLOBAL\n\n CF_UNICODETEXT = 13\n\n user32.OpenClipboard(user32.GetActiveWindow())\n user32.EmptyClipboard()\n hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))\n msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))\n SetClipboardData(CF_UNICODETEXT, hCd)\n user32.CloseClipboard()\n\n def get_types(self):\n return ['text/plain']\n", "path": "kivy/core/clipboard/clipboard_winctypes.py"}]} | 1,026 | 204 |
gh_patches_debug_5831 | rasdani/github-patches | git_diff | sherlock-project__sherlock-139 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sites sorting
It may be a good idea to sort the sites in sites.md and data.json alphabetically. When I'm looking for sites to add, I always have to Ctrl+F in this repo or just scroll through the file... Also when seeing the results, it's just chaos.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `site_list.py`
Content:
```
1 """Sherlock: Supported Site Listing
2
3 This module generates the listing of supported sites.
4 """
5 import json
6
7 with open("data.json", "r", encoding="utf-8") as data_file:
8 data = json.load(data_file)
9
10 with open("sites.md", "w") as site_file:
11 site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\n')
12
13 index = 1
14 for social_network in data:
15 url_main = data.get(social_network).get("urlMain")
16 site_file.write(f'{index}. [{social_network}]({url_main})\n')
17 index = index + 1
18
19 print("Finished updating supported site listing!")
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/site_list.py b/site_list.py
--- a/site_list.py
+++ b/site_list.py
@@ -7,6 +7,11 @@
with open("data.json", "r", encoding="utf-8") as data_file:
data = json.load(data_file)
+sorted_json_data = json.dumps(data, indent=2, sort_keys=True)
+
+with open("data.json", "w") as data_file:
+ data_file.write(sorted_json_data)
+
with open("sites.md", "w") as site_file:
site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\n')
| {"golden_diff": "diff --git a/site_list.py b/site_list.py\n--- a/site_list.py\n+++ b/site_list.py\n@@ -7,6 +7,11 @@\n with open(\"data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n \n+sorted_json_data = json.dumps(data, indent=2, sort_keys=True)\n+\n+with open(\"data.json\", \"w\") as data_file:\n+ data_file.write(sorted_json_data)\n+\n with open(\"sites.md\", \"w\") as site_file:\n site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\\n')\n", "issue": "Sites sorting\nIt may be a good idea to sort the sites in sites.md and data.json alphabetically. When I'm looking for sites to add, I always have to Ctrl+F in this repo or just scroll through the file... Also when seeing the results, it's just chaos.\n", "before_files": [{"content": "\"\"\"Sherlock: Supported Site Listing\n\nThis module generates the listing of supported sites.\n\"\"\"\nimport json\n\nwith open(\"data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n\nwith open(\"sites.md\", \"w\") as site_file:\n site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\\n')\n\n index = 1\n for social_network in data:\n url_main = data.get(social_network).get(\"urlMain\")\n site_file.write(f'{index}. [{social_network}]({url_main})\\n')\n index = index + 1\n\nprint(\"Finished updating supported site listing!\")\n", "path": "site_list.py"}], "after_files": [{"content": "\"\"\"Sherlock: Supported Site Listing\n\nThis module generates the listing of supported sites.\n\"\"\"\nimport json\n\nwith open(\"data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n\nsorted_json_data = json.dumps(data, indent=2, sort_keys=True)\n\nwith open(\"data.json\", \"w\") as data_file:\n data_file.write(sorted_json_data)\n\nwith open(\"sites.md\", \"w\") as site_file:\n site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\\n')\n\n index = 1\n for social_network in data:\n url_main = data.get(social_network).get(\"urlMain\")\n site_file.write(f'{index}. [{social_network}]({url_main})\\n')\n index = index + 1\n\nprint(\"Finished updating supported site listing!\")\n", "path": "site_list.py"}]} | 497 | 140 |
gh_patches_debug_5674 | rasdani/github-patches | git_diff | mozilla__bugbug-1214 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Log number of spam/non-spam bugs in SpamBug get_labels
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bugbug/models/spambug.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import xgboost
7 from imblearn.under_sampling import RandomUnderSampler
8 from sklearn.compose import ColumnTransformer
9 from sklearn.feature_extraction import DictVectorizer
10 from sklearn.pipeline import Pipeline
11
12 from bugbug import bug_features, bugzilla, feature_cleanup
13 from bugbug.model import BugModel
14
15
16 class SpamBugModel(BugModel):
17 def __init__(self, lemmatization=False):
18 BugModel.__init__(self, lemmatization)
19
20 self.sampler = RandomUnderSampler(random_state=0)
21
22 feature_extractors = [
23 bug_features.has_str(),
24 bug_features.has_regression_range(),
25 bug_features.severity(),
26 bug_features.is_coverity_issue(),
27 bug_features.has_crash_signature(),
28 bug_features.has_url(),
29 bug_features.has_w3c_url(),
30 bug_features.has_github_url(),
31 bug_features.whiteboard(),
32 bug_features.patches(),
33 bug_features.landings(),
34 bug_features.product(),
35 bug_features.component(),
36 bug_features.num_words_title(),
37 bug_features.num_words_comments(),
38 bug_features.keywords(),
39 ]
40
41 cleanup_functions = [
42 feature_cleanup.fileref(),
43 feature_cleanup.url(),
44 feature_cleanup.synonyms(),
45 ]
46
47 self.extraction_pipeline = Pipeline(
48 [
49 (
50 "bug_extractor",
51 bug_features.BugExtractor(
52 feature_extractors, cleanup_functions, rollback=True
53 ),
54 ),
55 (
56 "union",
57 ColumnTransformer(
58 [
59 ("data", DictVectorizer(), "data"),
60 ("title", self.text_vectorizer(), "title"),
61 ("comments", self.text_vectorizer(), "comments"),
62 ]
63 ),
64 ),
65 ]
66 )
67
68 self.clf = xgboost.XGBClassifier(n_jobs=16)
69 self.clf.set_params(predictor="cpu_predictor")
70
71 def get_labels(self):
72 classes = {}
73
74 for bug_data in bugzilla.get_bugs(include_invalid=True):
75 bug_id = bug_data["id"]
76
77 # Legitimate bugs
78 if bug_data["resolution"] == "FIXED":
79 classes[bug_id] = 0
80
81 # Spam bugs
82 elif (
83 bug_data["product"] == "Invalid Bugs"
84 and bug_data["component"] == "General"
85 ):
86 classes[bug_id] = 1
87
88 return classes, [0, 1]
89
90 def items_gen(self, classes):
91 # Overwriting this method to add include_invalid=True to get_bugs to
92 # include spam bugs.
93 return (
94 (bug, classes[bug["id"]])
95 for bug in bugzilla.get_bugs(include_invalid=True)
96 if bug["id"] in classes
97 )
98
99 def get_feature_names(self):
100 return self.extraction_pipeline.named_steps["union"].get_feature_names()
101
102 def overwrite_classes(self, bugs, classes, probabilities):
103 for (i, bug) in enumerate(bugs):
104 if "@mozilla" in bug["creator"]:
105 if probabilities:
106 classes[i] = [1.0, 0.0]
107 else:
108 classes[i] = 0
109
110 return classes
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bugbug/models/spambug.py b/bugbug/models/spambug.py
--- a/bugbug/models/spambug.py
+++ b/bugbug/models/spambug.py
@@ -85,6 +85,17 @@
):
classes[bug_id] = 1
+ print(
+ "{} bugs are classified as non-spam".format(
+ sum(1 for label in classes.values() if label == 0)
+ )
+ )
+ print(
+ "{} bugs are classified as spam".format(
+ sum(1 for label in classes.values() if label == 1)
+ )
+ )
+
return classes, [0, 1]
def items_gen(self, classes):
| {"golden_diff": "diff --git a/bugbug/models/spambug.py b/bugbug/models/spambug.py\n--- a/bugbug/models/spambug.py\n+++ b/bugbug/models/spambug.py\n@@ -85,6 +85,17 @@\n ):\n classes[bug_id] = 1\n \n+ print(\n+ \"{} bugs are classified as non-spam\".format(\n+ sum(1 for label in classes.values() if label == 0)\n+ )\n+ )\n+ print(\n+ \"{} bugs are classified as spam\".format(\n+ sum(1 for label in classes.values() if label == 1)\n+ )\n+ )\n+\n return classes, [0, 1]\n \n def items_gen(self, classes):\n", "issue": "Log number of spam/non-spam bugs in SpamBug get_labels\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup\nfrom bugbug.model import BugModel\n\n\nclass SpamBugModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.product(),\n bug_features.component(),\n bug_features.num_words_title(),\n bug_features.num_words_comments(),\n bug_features.keywords(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(), \"title\"),\n (\"comments\", self.text_vectorizer(), \"comments\"),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs(include_invalid=True):\n bug_id = bug_data[\"id\"]\n\n # Legitimate bugs\n if bug_data[\"resolution\"] == \"FIXED\":\n classes[bug_id] = 0\n\n # Spam bugs\n elif (\n bug_data[\"product\"] == \"Invalid Bugs\"\n and bug_data[\"component\"] == \"General\"\n ):\n classes[bug_id] = 1\n\n return classes, [0, 1]\n\n def items_gen(self, classes):\n # Overwriting this method to add include_invalid=True to get_bugs to\n # include spam bugs.\n return (\n (bug, classes[bug[\"id\"]])\n for bug in bugzilla.get_bugs(include_invalid=True)\n if bug[\"id\"] in classes\n )\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n for (i, bug) in enumerate(bugs):\n if \"@mozilla\" in bug[\"creator\"]:\n if probabilities:\n classes[i] = [1.0, 0.0]\n else:\n classes[i] = 0\n\n return classes\n", "path": "bugbug/models/spambug.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup\nfrom bugbug.model import BugModel\n\n\nclass SpamBugModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.product(),\n bug_features.component(),\n bug_features.num_words_title(),\n bug_features.num_words_comments(),\n bug_features.keywords(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(), \"title\"),\n (\"comments\", self.text_vectorizer(), \"comments\"),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs(include_invalid=True):\n bug_id = bug_data[\"id\"]\n\n # Legitimate bugs\n if bug_data[\"resolution\"] == \"FIXED\":\n classes[bug_id] = 0\n\n # Spam bugs\n elif (\n bug_data[\"product\"] == \"Invalid Bugs\"\n and bug_data[\"component\"] == \"General\"\n ):\n classes[bug_id] = 1\n\n print(\n \"{} bugs are classified as non-spam\".format(\n sum(1 for label in classes.values() if label == 0)\n )\n )\n print(\n \"{} bugs are classified as spam\".format(\n sum(1 for label in classes.values() if label == 1)\n )\n )\n\n return classes, [0, 1]\n\n def items_gen(self, classes):\n # Overwriting this method to add include_invalid=True to get_bugs to\n # include spam bugs.\n return (\n (bug, classes[bug[\"id\"]])\n for bug in bugzilla.get_bugs(include_invalid=True)\n if bug[\"id\"] in classes\n )\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n for (i, bug) in enumerate(bugs):\n if \"@mozilla\" in bug[\"creator\"]:\n if probabilities:\n classes[i] = [1.0, 0.0]\n else:\n classes[i] = 0\n\n return classes\n", "path": "bugbug/models/spambug.py"}]} | 1,218 | 167 |
gh_patches_debug_16905 | rasdani/github-patches | git_diff | deeppavlov__DeepPavlov-101 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Downloading requirements
I was trying to install deeppavlov and had a problem following the installation steps.
1) There is no download.py file in root folder, it is in `deeppavlov/download.py`
``` sh
python download.py [-all]
```
2) Even if I use that file it outputs the error:
``` sh
(env) root@mysexyhost:~/work/ipavlov/DeepPavlov# python3 deeppavlov/download.py
/home/ubuntu/work/ipavlov/env/local/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
2018-03-12 07:34:11.490 ERROR in 'deeppavlov.core.models.serializable'['log'] at line 54: LOGGER ERROR: Can not initialise deeppavlov.core.models.serializable logger, logging to the stderr. Error traceback:
Traceback (most recent call last):
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/log.py", line 32, in get_logger
with open(log_config_path) as log_config_json:
TypeError: invalid file: PosixPath('/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/log_config.json')
2018-03-12 07:34:11.491 ERROR in 'deeppavlov.core.models.keras_model'['log'] at line 54: LOGGER ERROR: Can not initialise deeppavlov.core.models.keras_model logger, logging to the stderr. Error traceback:
Traceback (most recent call last):
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/log.py", line 32, in get_logger
with open(log_config_path) as log_config_json:
TypeError: invalid file: PosixPath('/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/log_config.json')
Traceback (most recent call last):
File "deeppavlov/download.py", line 24, in <module>
from deeppavlov.core.data.utils import download, download_decompress
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/__init__.py", line 1, in <module>
import deeppavlov.core.models.keras_model
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/models/keras_model.py", line 39, in <module>
class KerasModel(NNModel, metaclass=TfModelMeta):
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/models/keras_model.py", line 143, in KerasModel
sample_weight_mode=None, weighted_metrics=None, target_tensors=None):
File "/home/ubuntu/work/ipavlov/env/local/lib/python3.5/site-packages/overrides/overrides.py", line 70, in overrides
method.__name__)
AssertionError: No super class method found for "load"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram_utils/telegram_ui.py`
Content:
```
1 """
2 Copyright 2017 Neural Networks and Deep Learning lab, MIPT
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 """
16 import telebot
17
18 from deeppavlov.core.common.file import read_json
19 from deeppavlov.core.commands.infer import build_model_from_config
20
21
22 def init_bot_for_model(token, model):
23 bot = telebot.TeleBot(token)
24
25 model_name = type(model).__name__
26 models_info = read_json('../telegram_utils/models_info.json')
27 model_info = models_info[model_name] if model_name in models_info else models_info['@default']
28
29 @bot.message_handler(commands=['start'])
30 def send_start_message(message):
31 chat_id = message.chat.id
32 out_message = model_info['start_message']
33 if hasattr(model, 'reset'):
34 model.reset()
35 bot.send_message(chat_id, out_message)
36
37 @bot.message_handler(commands=['help'])
38 def send_help_message(message):
39 chat_id = message.chat.id
40 out_message = model_info['help_message']
41 bot.send_message(chat_id, out_message)
42
43 @bot.message_handler()
44 def handle_inference(message):
45 chat_id = message.chat.id
46 context = message.text
47
48 pred = model([context])
49 reply_message = str(pred[0])
50 bot.send_message(chat_id, reply_message)
51
52 bot.polling()
53
54
55 def interact_model_by_telegram(config_path, token):
56 config = read_json(config_path)
57 model = build_model_from_config(config)
58 init_bot_for_model(token, model)
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/telegram_utils/telegram_ui.py b/telegram_utils/telegram_ui.py
--- a/telegram_utils/telegram_ui.py
+++ b/telegram_utils/telegram_ui.py
@@ -13,6 +13,8 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
+from pathlib import Path
+
import telebot
from deeppavlov.core.common.file import read_json
@@ -23,7 +25,8 @@
bot = telebot.TeleBot(token)
model_name = type(model).__name__
- models_info = read_json('../telegram_utils/models_info.json')
+ config_path = Path(__file__).parent / 'models_info.json'
+ models_info = read_json(str(config_path))
model_info = models_info[model_name] if model_name in models_info else models_info['@default']
@bot.message_handler(commands=['start'])
| {"golden_diff": "diff --git a/telegram_utils/telegram_ui.py b/telegram_utils/telegram_ui.py\n--- a/telegram_utils/telegram_ui.py\n+++ b/telegram_utils/telegram_ui.py\n@@ -13,6 +13,8 @@\n See the License for the specific language governing permissions and\n limitations under the License.\n \"\"\"\n+from pathlib import Path\n+\n import telebot\n \n from deeppavlov.core.common.file import read_json\n@@ -23,7 +25,8 @@\n bot = telebot.TeleBot(token)\n \n model_name = type(model).__name__\n- models_info = read_json('../telegram_utils/models_info.json')\n+ config_path = Path(__file__).parent / 'models_info.json'\n+ models_info = read_json(str(config_path))\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n \n @bot.message_handler(commands=['start'])\n", "issue": "Downloading requirements\nI was trying to install deeppavlov and had a problem following the installation steps.\r\n\r\n1) There is no download.py file in root folder, it is in `deeppavlov/download.py`\r\n``` sh\r\npython download.py [-all] \r\n```\r\n\r\n2) Even if I use that file it outputs the error:\r\n``` sh\r\n(env) root@mysexyhost:~/work/ipavlov/DeepPavlov# python3 deeppavlov/download.py\r\n/home/ubuntu/work/ipavlov/env/local/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n from ._conv import register_converters as _register_converters\r\nUsing TensorFlow backend.\r\n2018-03-12 07:34:11.490 ERROR in 'deeppavlov.core.models.serializable'['log'] at line 54: LOGGER ERROR: Can not initialise deeppavlov.core.models.serializable logger, logging to the stderr. Error traceback:\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/log.py\", line 32, in get_logger\r\n with open(log_config_path) as log_config_json:\r\nTypeError: invalid file: PosixPath('/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/log_config.json')\r\n2018-03-12 07:34:11.491 ERROR in 'deeppavlov.core.models.keras_model'['log'] at line 54: LOGGER ERROR: Can not initialise deeppavlov.core.models.keras_model logger, logging to the stderr. Error traceback:\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/log.py\", line 32, in get_logger\r\n with open(log_config_path) as log_config_json:\r\nTypeError: invalid file: PosixPath('/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/log_config.json')\r\nTraceback (most recent call last):\r\n File \"deeppavlov/download.py\", line 24, in <module>\r\n from deeppavlov.core.data.utils import download, download_decompress\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/__init__.py\", line 1, in <module>\r\n import deeppavlov.core.models.keras_model\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/models/keras_model.py\", line 39, in <module>\r\n class KerasModel(NNModel, metaclass=TfModelMeta):\r\n File \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/models/keras_model.py\", line 143, in KerasModel\r\n sample_weight_mode=None, weighted_metrics=None, target_tensors=None):\r\n File \"/home/ubuntu/work/ipavlov/env/local/lib/python3.5/site-packages/overrides/overrides.py\", line 70, in overrides\r\n method.__name__)\r\nAssertionError: No super class method found for \"load\"\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport telebot\n\nfrom deeppavlov.core.common.file import read_json\nfrom deeppavlov.core.commands.infer import build_model_from_config\n\n\ndef init_bot_for_model(token, model):\n bot = telebot.TeleBot(token)\n\n model_name = type(model).__name__\n models_info = read_json('../telegram_utils/models_info.json')\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n\n @bot.message_handler(commands=['start'])\n def send_start_message(message):\n chat_id = message.chat.id\n out_message = model_info['start_message']\n if hasattr(model, 'reset'):\n model.reset()\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler(commands=['help'])\n def send_help_message(message):\n chat_id = message.chat.id\n out_message = model_info['help_message']\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler()\n def handle_inference(message):\n chat_id = message.chat.id\n context = message.text\n\n pred = model([context])\n reply_message = str(pred[0])\n bot.send_message(chat_id, reply_message)\n\n bot.polling()\n\n\ndef interact_model_by_telegram(config_path, token):\n config = read_json(config_path)\n model = build_model_from_config(config)\n init_bot_for_model(token, model)\n", "path": "telegram_utils/telegram_ui.py"}], "after_files": [{"content": "\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom pathlib import Path\n\nimport telebot\n\nfrom deeppavlov.core.common.file import read_json\nfrom deeppavlov.core.commands.infer import build_model_from_config\n\n\ndef init_bot_for_model(token, model):\n bot = telebot.TeleBot(token)\n\n model_name = type(model).__name__\n config_path = Path(__file__).parent / 'models_info.json'\n models_info = read_json(str(config_path))\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n\n @bot.message_handler(commands=['start'])\n def send_start_message(message):\n chat_id = message.chat.id\n out_message = model_info['start_message']\n if hasattr(model, 'reset'):\n model.reset()\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler(commands=['help'])\n def send_help_message(message):\n chat_id = message.chat.id\n out_message = model_info['help_message']\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler()\n def handle_inference(message):\n chat_id = message.chat.id\n context = message.text\n\n pred = model([context])\n reply_message = str(pred[0])\n bot.send_message(chat_id, reply_message)\n\n bot.polling()\n\n\ndef interact_model_by_telegram(config_path, token):\n config = read_json(config_path)\n model = build_model_from_config(config)\n init_bot_for_model(token, model)\n", "path": "telegram_utils/telegram_ui.py"}]} | 1,529 | 196 |
gh_patches_debug_11304 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1450 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add example code for overlay segment configuration for workstation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/workstation_configs/forms.py`
Content:
```
1 from django.forms import ModelForm
2
3 from grandchallenge.core.forms import SaveFormInitMixin
4 from grandchallenge.core.widgets import JSONEditorWidget
5 from grandchallenge.workstation_configs.models import (
6 OVERLAY_SEGMENTS_SCHEMA,
7 WorkstationConfig,
8 )
9
10
11 class WorkstationConfigForm(SaveFormInitMixin, ModelForm):
12 class Meta:
13 model = WorkstationConfig
14 fields = (
15 "title",
16 "description",
17 "window_presets",
18 "default_window_preset",
19 "default_slab_thickness_mm",
20 "default_slab_render_method",
21 "default_orientation",
22 "default_overlay_alpha",
23 "default_overlay_lut",
24 "default_overlay_interpolation",
25 "overlay_segments",
26 "default_zoom_scale",
27 "show_image_info_plugin",
28 "show_display_plugin",
29 "show_invert_tool",
30 "show_flip_tool",
31 "show_window_level_tool",
32 "show_reset_tool",
33 )
34 widgets = {
35 "overlay_segments": JSONEditorWidget(
36 schema=OVERLAY_SEGMENTS_SCHEMA
37 ),
38 }
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py
--- a/app/grandchallenge/workstation_configs/forms.py
+++ b/app/grandchallenge/workstation_configs/forms.py
@@ -36,3 +36,14 @@
schema=OVERLAY_SEGMENTS_SCHEMA
),
}
+ help_texts = {
+ "overlay_segments": (
+ "If an categorical overlay is shown, it is possible to show toggles "
+ "to change the visibility of the different overlay categories. To do "
+ "so, configure the categories that should be displayed. Data from the"
+ " algorithm's output.json can be added as an extra label to each "
+ "toggle using jinja templating. "
+ 'For example: [{ "voxel_value": 0, "name": "Level 0", "visible": '
+ 'false, "metric_template": "{{metrics.volumes[0]}} mm³"},]'
+ ),
+ }
| {"golden_diff": "diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py\n--- a/app/grandchallenge/workstation_configs/forms.py\n+++ b/app/grandchallenge/workstation_configs/forms.py\n@@ -36,3 +36,14 @@\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n }\n+ help_texts = {\n+ \"overlay_segments\": (\n+ \"If an categorical overlay is shown, it is possible to show toggles \"\n+ \"to change the visibility of the different overlay categories. To do \"\n+ \"so, configure the categories that should be displayed. Data from the\"\n+ \" algorithm's output.json can be added as an extra label to each \"\n+ \"toggle using jinja templating. \"\n+ 'For example: [{ \"voxel_value\": 0, \"name\": \"Level 0\", \"visible\": '\n+ 'false, \"metric_template\": \"{{metrics.volumes[0]}} mm\u00b3\"},]'\n+ ),\n+ }\n", "issue": "Add example code for overlay segment configuration for workstation\n\n", "before_files": [{"content": "from django.forms import ModelForm\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.core.widgets import JSONEditorWidget\nfrom grandchallenge.workstation_configs.models import (\n OVERLAY_SEGMENTS_SCHEMA,\n WorkstationConfig,\n)\n\n\nclass WorkstationConfigForm(SaveFormInitMixin, ModelForm):\n class Meta:\n model = WorkstationConfig\n fields = (\n \"title\",\n \"description\",\n \"window_presets\",\n \"default_window_preset\",\n \"default_slab_thickness_mm\",\n \"default_slab_render_method\",\n \"default_orientation\",\n \"default_overlay_alpha\",\n \"default_overlay_lut\",\n \"default_overlay_interpolation\",\n \"overlay_segments\",\n \"default_zoom_scale\",\n \"show_image_info_plugin\",\n \"show_display_plugin\",\n \"show_invert_tool\",\n \"show_flip_tool\",\n \"show_window_level_tool\",\n \"show_reset_tool\",\n )\n widgets = {\n \"overlay_segments\": JSONEditorWidget(\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n }\n", "path": "app/grandchallenge/workstation_configs/forms.py"}], "after_files": [{"content": "from django.forms import ModelForm\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.core.widgets import JSONEditorWidget\nfrom grandchallenge.workstation_configs.models import (\n OVERLAY_SEGMENTS_SCHEMA,\n WorkstationConfig,\n)\n\n\nclass WorkstationConfigForm(SaveFormInitMixin, ModelForm):\n class Meta:\n model = WorkstationConfig\n fields = (\n \"title\",\n \"description\",\n \"window_presets\",\n \"default_window_preset\",\n \"default_slab_thickness_mm\",\n \"default_slab_render_method\",\n \"default_orientation\",\n \"default_overlay_alpha\",\n \"default_overlay_lut\",\n \"default_overlay_interpolation\",\n \"overlay_segments\",\n \"default_zoom_scale\",\n \"show_image_info_plugin\",\n \"show_display_plugin\",\n \"show_invert_tool\",\n \"show_flip_tool\",\n \"show_window_level_tool\",\n \"show_reset_tool\",\n )\n widgets = {\n \"overlay_segments\": JSONEditorWidget(\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n }\n help_texts = {\n \"overlay_segments\": (\n \"If an categorical overlay is shown, it is possible to show toggles \"\n \"to change the visibility of the different overlay categories. To do \"\n \"so, configure the categories that should be displayed. Data from the\"\n \" algorithm's output.json can be added as an extra label to each \"\n \"toggle using jinja templating. \"\n 'For example: [{ \"voxel_value\": 0, \"name\": \"Level 0\", \"visible\": '\n 'false, \"metric_template\": \"{{metrics.volumes[0]}} mm\u00b3\"},]'\n ),\n }\n", "path": "app/grandchallenge/workstation_configs/forms.py"}]} | 564 | 220 |
gh_patches_debug_27161 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-18228 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for nzz.ch
rudolffischer@BueroPC-RF:~$ youtube-dl "http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209" -v
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209', '-v']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2014.12.06.1
[debug] Python version 2.7.6 - Linux-3.13.0-39-generic-x86_64-with-Ubuntu-14.04-trusty
[debug] exe versions: rtmpdump 2.4
[debug] Proxy map: {}
[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Requesting header
WARNING: Falling back on generic information extractor.
[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Downloading webpage
[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Extracting information
ERROR: Unsupported URL: http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py", line 651, in _real_extract
doc = parse_xml(webpage)
File "/usr/local/bin/youtube-dl/youtube_dl/utils.py", line 1425, in parse_xml
tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1300, in XML
parser.feed(text)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1642, in feed
self._raiseerror(v)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
raise err
ParseError: not well-formed (invalid token): line 2, column 42
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 553, in extract_info
ie_result = ie.extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 241, in extract
return self._real_extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py", line 1044, in _real_extract
raise ExtractorError('Unsupported URL: %s' % url)
ExtractorError: Unsupported URL: http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
rudolffischer@BueroPC-RF:~$
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/nzz.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..utils import (
8 extract_attributes,
9 )
10
11
12 class NZZIE(InfoExtractor):
13 _VALID_URL = r'https?://(?:www\.)?nzz\.ch/(?:[^/]+/)*[^/?#]+-ld\.(?P<id>\d+)'
14 _TEST = {
15 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',
16 'info_dict': {
17 'id': '9153',
18 },
19 'playlist_mincount': 6,
20 }
21
22 def _real_extract(self, url):
23 page_id = self._match_id(url)
24 webpage = self._download_webpage(url, page_id)
25
26 entries = []
27 for player_element in re.findall(r'(<[^>]+class="kalturaPlayer"[^>]*>)', webpage):
28 player_params = extract_attributes(player_element)
29 if player_params.get('data-type') not in ('kaltura_singleArticle',):
30 self.report_warning('Unsupported player type')
31 continue
32 entry_id = player_params['data-id']
33 entries.append(self.url_result(
34 'kaltura:1750922:' + entry_id, 'Kaltura', entry_id))
35
36 return self.playlist_result(entries, page_id)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/youtube_dl/extractor/nzz.py b/youtube_dl/extractor/nzz.py
--- a/youtube_dl/extractor/nzz.py
+++ b/youtube_dl/extractor/nzz.py
@@ -11,20 +11,27 @@
class NZZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nzz\.ch/(?:[^/]+/)*[^/?#]+-ld\.(?P<id>\d+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',
'info_dict': {
'id': '9153',
},
'playlist_mincount': 6,
- }
+ }, {
+ 'url': 'https://www.nzz.ch/video/nzz-standpunkte/cvp-auf-der-suche-nach-dem-mass-der-mitte-ld.1368112',
+ 'info_dict': {
+ 'id': '1368112',
+ },
+ 'playlist_count': 1,
+ }]
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
entries = []
- for player_element in re.findall(r'(<[^>]+class="kalturaPlayer"[^>]*>)', webpage):
+ for player_element in re.findall(
+ r'(<[^>]+class="kalturaPlayer[^"]*"[^>]*>)', webpage):
player_params = extract_attributes(player_element)
if player_params.get('data-type') not in ('kaltura_singleArticle',):
self.report_warning('Unsupported player type')
| {"golden_diff": "diff --git a/youtube_dl/extractor/nzz.py b/youtube_dl/extractor/nzz.py\n--- a/youtube_dl/extractor/nzz.py\n+++ b/youtube_dl/extractor/nzz.py\n@@ -11,20 +11,27 @@\n \n class NZZIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?nzz\\.ch/(?:[^/]+/)*[^/?#]+-ld\\.(?P<id>\\d+)'\n- _TEST = {\n+ _TESTS = [{\n 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',\n 'info_dict': {\n 'id': '9153',\n },\n 'playlist_mincount': 6,\n- }\n+ }, {\n+ 'url': 'https://www.nzz.ch/video/nzz-standpunkte/cvp-auf-der-suche-nach-dem-mass-der-mitte-ld.1368112',\n+ 'info_dict': {\n+ 'id': '1368112',\n+ },\n+ 'playlist_count': 1,\n+ }]\n \n def _real_extract(self, url):\n page_id = self._match_id(url)\n webpage = self._download_webpage(url, page_id)\n \n entries = []\n- for player_element in re.findall(r'(<[^>]+class=\"kalturaPlayer\"[^>]*>)', webpage):\n+ for player_element in re.findall(\n+ r'(<[^>]+class=\"kalturaPlayer[^\"]*\"[^>]*>)', webpage):\n player_params = extract_attributes(player_element)\n if player_params.get('data-type') not in ('kaltura_singleArticle',):\n self.report_warning('Unsupported player type')\n", "issue": "Add support for nzz.ch\nrudolffischer@BueroPC-RF:~$ youtube-dl \"http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209\" -v\n[debug] System config: []\n[debug] User config: []\n[debug] Command-line args: ['http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209', '-v']\n[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8\n[debug] youtube-dl version 2014.12.06.1\n[debug] Python version 2.7.6 - Linux-3.13.0-39-generic-x86_64-with-Ubuntu-14.04-trusty\n[debug] exe versions: rtmpdump 2.4\n[debug] Proxy map: {}\n[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Requesting header\nWARNING: Falling back on generic information extractor.\n[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Downloading webpage\n[generic] 30-jahre-herzschmerz-aus-saas-fee-1: Extracting information\nERROR: Unsupported URL: http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\nTraceback (most recent call last):\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py\", line 651, in _real_extract\n doc = parse_xml(webpage)\n File \"/usr/local/bin/youtube-dl/youtube_dl/utils.py\", line 1425, in parse_xml\n tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1300, in XML\n parser.feed(text)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1642, in feed\n self._raiseerror(v)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1506, in _raiseerror\n raise err\nParseError: not well-formed (invalid token): line 2, column 42\nTraceback (most recent call last):\n File \"/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 553, in extract_info\n ie_result = ie.extract(url)\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 241, in extract\n return self._real_extract(url)\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/generic.py\", line 1044, in _real_extract\n raise ExtractorError('Unsupported URL: %s' % url)\nExtractorError: Unsupported URL: http://www.nzz.ch/panorama/30-jahre-herzschmerz-aus-saas-fee-1.18438209; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\n\nrudolffischer@BueroPC-RF:~$ \n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n extract_attributes,\n)\n\n\nclass NZZIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?nzz\\.ch/(?:[^/]+/)*[^/?#]+-ld\\.(?P<id>\\d+)'\n _TEST = {\n 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',\n 'info_dict': {\n 'id': '9153',\n },\n 'playlist_mincount': 6,\n }\n\n def _real_extract(self, url):\n page_id = self._match_id(url)\n webpage = self._download_webpage(url, page_id)\n\n entries = []\n for player_element in re.findall(r'(<[^>]+class=\"kalturaPlayer\"[^>]*>)', webpage):\n player_params = extract_attributes(player_element)\n if player_params.get('data-type') not in ('kaltura_singleArticle',):\n self.report_warning('Unsupported player type')\n continue\n entry_id = player_params['data-id']\n entries.append(self.url_result(\n 'kaltura:1750922:' + entry_id, 'Kaltura', entry_id))\n\n return self.playlist_result(entries, page_id)\n", "path": "youtube_dl/extractor/nzz.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n extract_attributes,\n)\n\n\nclass NZZIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?nzz\\.ch/(?:[^/]+/)*[^/?#]+-ld\\.(?P<id>\\d+)'\n _TESTS = [{\n 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',\n 'info_dict': {\n 'id': '9153',\n },\n 'playlist_mincount': 6,\n }, {\n 'url': 'https://www.nzz.ch/video/nzz-standpunkte/cvp-auf-der-suche-nach-dem-mass-der-mitte-ld.1368112',\n 'info_dict': {\n 'id': '1368112',\n },\n 'playlist_count': 1,\n }]\n\n def _real_extract(self, url):\n page_id = self._match_id(url)\n webpage = self._download_webpage(url, page_id)\n\n entries = []\n for player_element in re.findall(\n r'(<[^>]+class=\"kalturaPlayer[^\"]*\"[^>]*>)', webpage):\n player_params = extract_attributes(player_element)\n if player_params.get('data-type') not in ('kaltura_singleArticle',):\n self.report_warning('Unsupported player type')\n continue\n entry_id = player_params['data-id']\n entries.append(self.url_result(\n 'kaltura:1750922:' + entry_id, 'Kaltura', entry_id))\n\n return self.playlist_result(entries, page_id)\n", "path": "youtube_dl/extractor/nzz.py"}]} | 1,537 | 420 |
gh_patches_debug_25079 | rasdani/github-patches | git_diff | Kinto__kinto-630 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enabling the flush endpoint through env vars does not seem to work
I'm running Kinto + postgres with docker-compose (using the example docker-compose.yml in the repo).
Adding `KINTO_FLUSH_ENDPOINT_ENABLED` to the environment section in docker-compose.yml does not enable the flush endpoint for me. I instead had to add `kinto.flush_endpoint_enabled = true` to a custom ini file, that worked.
Can the flush endpoint be enabled through an env var like this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/__init__.py`
Content:
```
1 import pkg_resources
2 import logging
3
4 import kinto.core
5 from pyramid.config import Configurator
6 from pyramid.settings import asbool
7 from pyramid.security import Authenticated
8
9 from kinto.authorization import RouteFactory
10
11 # Module version, as defined in PEP-0396.
12 __version__ = pkg_resources.get_distribution(__package__).version
13
14 # Implemented HTTP API Version
15 HTTP_API_VERSION = '1.5'
16
17 # Main kinto logger
18 logger = logging.getLogger(__name__)
19
20
21 DEFAULT_SETTINGS = {
22 'retry_after_seconds': 3,
23 'cache_backend': 'kinto.core.cache.memory',
24 'permission_backend': 'kinto.core.permission.memory',
25 'storage_backend': 'kinto.core.storage.memory',
26 'project_docs': 'https://kinto.readthedocs.io/',
27 'bucket_create_principals': Authenticated,
28 'multiauth.authorization_policy': (
29 'kinto.authorization.AuthorizationPolicy'),
30 'experimental_collection_schema_validation': 'False',
31 'http_api_version': HTTP_API_VERSION
32 }
33
34
35 def main(global_config, config=None, **settings):
36 if not config:
37 config = Configurator(settings=settings, root_factory=RouteFactory)
38
39 # Force project name, since it determines settings prefix.
40 config.add_settings({'kinto.project_name': 'kinto'})
41
42 kinto.core.initialize(config,
43 version=__version__,
44 default_settings=DEFAULT_SETTINGS)
45
46 settings = config.get_settings()
47
48 # Expose capability
49 schema_enabled = asbool(
50 settings['experimental_collection_schema_validation']
51 )
52 if schema_enabled:
53 config.add_api_capability(
54 "schema",
55 description="Validates collection records with JSON schemas.",
56 url="http://kinto.readthedocs.io/en/latest/api/1.x/"
57 "collections.html#collection-json-schema")
58
59 # Scan Kinto views.
60 kwargs = {}
61 flush_enabled = asbool(settings.get('flush_endpoint_enabled'))
62
63 if flush_enabled:
64 config.add_api_capability(
65 "flush_endpoint",
66 description="The __flush__ endpoint can be used to remove all "
67 "data from all backends.",
68 url="http://kinto.readthedocs.io/en/latest/configuration/"
69 "settings.html#activating-the-flush-endpoint"
70 )
71 else:
72 kwargs['ignore'] = 'kinto.views.flush'
73 config.scan("kinto.views", **kwargs)
74
75 app = config.make_wsgi_app()
76
77 # Install middleware (idempotent if disabled)
78 return kinto.core.install_middlewares(app, settings)
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/__init__.py b/kinto/__init__.py
--- a/kinto/__init__.py
+++ b/kinto/__init__.py
@@ -19,6 +19,7 @@
DEFAULT_SETTINGS = {
+ 'flush_endpoint_enabled': False,
'retry_after_seconds': 3,
'cache_backend': 'kinto.core.cache.memory',
'permission_backend': 'kinto.core.permission.memory',
@@ -58,18 +59,18 @@
# Scan Kinto views.
kwargs = {}
- flush_enabled = asbool(settings.get('flush_endpoint_enabled'))
+ flush_enabled = asbool(settings.get('flush_endpoint_enabled'))
if flush_enabled:
config.add_api_capability(
"flush_endpoint",
description="The __flush__ endpoint can be used to remove all "
"data from all backends.",
url="http://kinto.readthedocs.io/en/latest/configuration/"
- "settings.html#activating-the-flush-endpoint"
- )
+ "settings.html#activating-the-flush-endpoint")
else:
kwargs['ignore'] = 'kinto.views.flush'
+
config.scan("kinto.views", **kwargs)
app = config.make_wsgi_app()
| {"golden_diff": "diff --git a/kinto/__init__.py b/kinto/__init__.py\n--- a/kinto/__init__.py\n+++ b/kinto/__init__.py\n@@ -19,6 +19,7 @@\n \n \n DEFAULT_SETTINGS = {\n+ 'flush_endpoint_enabled': False,\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n@@ -58,18 +59,18 @@\n \n # Scan Kinto views.\n kwargs = {}\n- flush_enabled = asbool(settings.get('flush_endpoint_enabled'))\n \n+ flush_enabled = asbool(settings.get('flush_endpoint_enabled'))\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"http://kinto.readthedocs.io/en/latest/configuration/\"\n- \"settings.html#activating-the-flush-endpoint\"\n- )\n+ \"settings.html#activating-the-flush-endpoint\")\n else:\n kwargs['ignore'] = 'kinto.views.flush'\n+\n config.scan(\"kinto.views\", **kwargs)\n \n app = config.make_wsgi_app()\n", "issue": "Enabling the flush endpoint through env vars does not seem to work\nI'm running Kinto + postgres with docker-compose (using the example docker-compose.yml in the repo). \n\nAdding `KINTO_FLUSH_ENDPOINT_ENABLED` to the environment section in docker-compose.yml does not enable the flush endpoint for me. I instead had to add `kinto.flush_endpoint_enabled = true` to a custom ini file, that worked.\n\nCan the flush endpoint be enabled through an env var like this?\n\n", "before_files": [{"content": "import pkg_resources\nimport logging\n\nimport kinto.core\nfrom pyramid.config import Configurator\nfrom pyramid.settings import asbool\nfrom pyramid.security import Authenticated\n\nfrom kinto.authorization import RouteFactory\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(__package__).version\n\n# Implemented HTTP API Version\nHTTP_API_VERSION = '1.5'\n\n# Main kinto logger\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_SETTINGS = {\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n 'storage_backend': 'kinto.core.storage.memory',\n 'project_docs': 'https://kinto.readthedocs.io/',\n 'bucket_create_principals': Authenticated,\n 'multiauth.authorization_policy': (\n 'kinto.authorization.AuthorizationPolicy'),\n 'experimental_collection_schema_validation': 'False',\n 'http_api_version': HTTP_API_VERSION\n}\n\n\ndef main(global_config, config=None, **settings):\n if not config:\n config = Configurator(settings=settings, root_factory=RouteFactory)\n\n # Force project name, since it determines settings prefix.\n config.add_settings({'kinto.project_name': 'kinto'})\n\n kinto.core.initialize(config,\n version=__version__,\n default_settings=DEFAULT_SETTINGS)\n\n settings = config.get_settings()\n\n # Expose capability\n schema_enabled = asbool(\n settings['experimental_collection_schema_validation']\n )\n if schema_enabled:\n config.add_api_capability(\n \"schema\",\n description=\"Validates collection records with JSON schemas.\",\n url=\"http://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"collections.html#collection-json-schema\")\n\n # Scan Kinto views.\n kwargs = {}\n flush_enabled = asbool(settings.get('flush_endpoint_enabled'))\n\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"http://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-flush-endpoint\"\n )\n else:\n kwargs['ignore'] = 'kinto.views.flush'\n config.scan(\"kinto.views\", **kwargs)\n\n app = config.make_wsgi_app()\n\n # Install middleware (idempotent if disabled)\n return kinto.core.install_middlewares(app, settings)\n", "path": "kinto/__init__.py"}], "after_files": [{"content": "import pkg_resources\nimport logging\n\nimport kinto.core\nfrom pyramid.config import Configurator\nfrom pyramid.settings import asbool\nfrom pyramid.security import Authenticated\n\nfrom kinto.authorization import RouteFactory\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(__package__).version\n\n# Implemented HTTP API Version\nHTTP_API_VERSION = '1.5'\n\n# Main kinto logger\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_SETTINGS = {\n 'flush_endpoint_enabled': False,\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n 'storage_backend': 'kinto.core.storage.memory',\n 'project_docs': 'https://kinto.readthedocs.io/',\n 'bucket_create_principals': Authenticated,\n 'multiauth.authorization_policy': (\n 'kinto.authorization.AuthorizationPolicy'),\n 'experimental_collection_schema_validation': 'False',\n 'http_api_version': HTTP_API_VERSION\n}\n\n\ndef main(global_config, config=None, **settings):\n if not config:\n config = Configurator(settings=settings, root_factory=RouteFactory)\n\n # Force project name, since it determines settings prefix.\n config.add_settings({'kinto.project_name': 'kinto'})\n\n kinto.core.initialize(config,\n version=__version__,\n default_settings=DEFAULT_SETTINGS)\n\n settings = config.get_settings()\n\n # Expose capability\n schema_enabled = asbool(\n settings['experimental_collection_schema_validation']\n )\n if schema_enabled:\n config.add_api_capability(\n \"schema\",\n description=\"Validates collection records with JSON schemas.\",\n url=\"http://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"collections.html#collection-json-schema\")\n\n # Scan Kinto views.\n kwargs = {}\n\n flush_enabled = asbool(settings.get('flush_endpoint_enabled'))\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"http://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-flush-endpoint\")\n else:\n kwargs['ignore'] = 'kinto.views.flush'\n\n config.scan(\"kinto.views\", **kwargs)\n\n app = config.make_wsgi_app()\n\n # Install middleware (idempotent if disabled)\n return kinto.core.install_middlewares(app, settings)\n", "path": "kinto/__init__.py"}]} | 1,043 | 272 |
gh_patches_debug_63214 | rasdani/github-patches | git_diff | ManimCommunity__manim-3108 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The documentation for the `--resolution` flag in the cli is wrong
The current documentation of the `--resolution` flag says the format is `(W,H)` which is confusing because the passed value needs to be of the form `"W,H"`so the documentation should be updatet accordingly such that it reflects the usage `-r "W,H"` best with an example of `-r "1920,1080"`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/cli/render/render_options.py`
Content:
```
1 from __future__ import annotations
2
3 import re
4
5 import click
6 from cloup import option, option_group
7
8 from manim.constants import QUALITIES, RendererType
9
10 from ... import logger
11
12
13 def validate_scene_range(ctx, param, value):
14 try:
15 start = int(value)
16 return (start,)
17 except Exception:
18 pass
19
20 if value:
21 try:
22 start, end = map(int, re.split(r"[;,\-]", value))
23 return start, end
24 except Exception:
25 logger.error("Couldn't determine a range for -n option.")
26 exit()
27
28
29 def validate_resolution(ctx, param, value):
30 if value:
31 try:
32 start, end = map(int, re.split(r"[;,\-]", value))
33 return (start, end)
34 except Exception:
35 logger.error("Resolution option is invalid.")
36 exit()
37
38
39 render_options = option_group(
40 "Render Options",
41 option(
42 "-n",
43 "--from_animation_number",
44 callback=validate_scene_range,
45 help="Start rendering from n_0 until n_1. If n_1 is left unspecified, "
46 "renders all scenes after n_0.",
47 default=None,
48 ),
49 option(
50 "-a",
51 "--write_all",
52 is_flag=True,
53 help="Render all scenes in the input file.",
54 default=None,
55 ),
56 option(
57 "--format",
58 type=click.Choice(["png", "gif", "mp4", "webm", "mov"], case_sensitive=False),
59 default=None,
60 ),
61 option("-s", "--save_last_frame", is_flag=True, default=None),
62 option(
63 "-q",
64 "--quality",
65 default=None,
66 type=click.Choice(
67 list(reversed([q["flag"] for q in QUALITIES.values() if q["flag"]])), # type: ignore
68 case_sensitive=False,
69 ),
70 help="Render quality at the follow resolution framerates, respectively: "
71 + ", ".join(
72 reversed(
73 [
74 f'{q["pixel_width"]}x{q["pixel_height"]} {q["frame_rate"]}FPS'
75 for q in QUALITIES.values()
76 if q["flag"]
77 ]
78 )
79 ),
80 ),
81 option(
82 "-r",
83 "--resolution",
84 callback=validate_resolution,
85 default=None,
86 help="Resolution in (W,H) for when 16:9 aspect ratio isn't possible.",
87 ),
88 option(
89 "--fps",
90 "--frame_rate",
91 "frame_rate",
92 type=float,
93 default=None,
94 help="Render at this frame rate.",
95 ),
96 option(
97 "--renderer",
98 type=click.Choice(
99 [renderer_type.value for renderer_type in RendererType],
100 case_sensitive=False,
101 ),
102 help="Select a renderer for your Scene.",
103 default="cairo",
104 ),
105 option(
106 "-g",
107 "--save_pngs",
108 is_flag=True,
109 default=None,
110 help="Save each frame as png (Deprecated).",
111 ),
112 option(
113 "-i",
114 "--save_as_gif",
115 default=None,
116 is_flag=True,
117 help="Save as a gif (Deprecated).",
118 ),
119 option(
120 "--save_sections",
121 default=None,
122 is_flag=True,
123 help="Save section videos in addition to movie file.",
124 ),
125 option(
126 "-s",
127 "--save_last_frame",
128 default=None,
129 is_flag=True,
130 help="Save last frame as png (Deprecated).",
131 ),
132 option(
133 "-t",
134 "--transparent",
135 is_flag=True,
136 help="Render scenes with alpha channel.",
137 ),
138 option(
139 "--use_projection_fill_shaders",
140 is_flag=True,
141 help="Use shaders for OpenGLVMobject fill which are compatible with transformation matrices.",
142 default=None,
143 ),
144 option(
145 "--use_projection_stroke_shaders",
146 is_flag=True,
147 help="Use shaders for OpenGLVMobject stroke which are compatible with transformation matrices.",
148 default=None,
149 ),
150 )
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/cli/render/render_options.py b/manim/cli/render/render_options.py
--- a/manim/cli/render/render_options.py
+++ b/manim/cli/render/render_options.py
@@ -83,7 +83,7 @@
"--resolution",
callback=validate_resolution,
default=None,
- help="Resolution in (W,H) for when 16:9 aspect ratio isn't possible.",
+ help='Resolution in "W,H" for when 16:9 aspect ratio isn\'t possible.',
),
option(
"--fps",
| {"golden_diff": "diff --git a/manim/cli/render/render_options.py b/manim/cli/render/render_options.py\n--- a/manim/cli/render/render_options.py\n+++ b/manim/cli/render/render_options.py\n@@ -83,7 +83,7 @@\n \"--resolution\",\n callback=validate_resolution,\n default=None,\n- help=\"Resolution in (W,H) for when 16:9 aspect ratio isn't possible.\",\n+ help='Resolution in \"W,H\" for when 16:9 aspect ratio isn\\'t possible.',\n ),\n option(\n \"--fps\",\n", "issue": "The documentation for the `--resolution` flag in the cli is wrong\nThe current documentation of the `--resolution` flag says the format is `(W,H)` which is confusing because the passed value needs to be of the form `\"W,H\"`so the documentation should be updatet accordingly such that it reflects the usage `-r \"W,H\"` best with an example of `-r \"1920,1080\"`\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\n\nimport click\nfrom cloup import option, option_group\n\nfrom manim.constants import QUALITIES, RendererType\n\nfrom ... import logger\n\n\ndef validate_scene_range(ctx, param, value):\n try:\n start = int(value)\n return (start,)\n except Exception:\n pass\n\n if value:\n try:\n start, end = map(int, re.split(r\"[;,\\-]\", value))\n return start, end\n except Exception:\n logger.error(\"Couldn't determine a range for -n option.\")\n exit()\n\n\ndef validate_resolution(ctx, param, value):\n if value:\n try:\n start, end = map(int, re.split(r\"[;,\\-]\", value))\n return (start, end)\n except Exception:\n logger.error(\"Resolution option is invalid.\")\n exit()\n\n\nrender_options = option_group(\n \"Render Options\",\n option(\n \"-n\",\n \"--from_animation_number\",\n callback=validate_scene_range,\n help=\"Start rendering from n_0 until n_1. If n_1 is left unspecified, \"\n \"renders all scenes after n_0.\",\n default=None,\n ),\n option(\n \"-a\",\n \"--write_all\",\n is_flag=True,\n help=\"Render all scenes in the input file.\",\n default=None,\n ),\n option(\n \"--format\",\n type=click.Choice([\"png\", \"gif\", \"mp4\", \"webm\", \"mov\"], case_sensitive=False),\n default=None,\n ),\n option(\"-s\", \"--save_last_frame\", is_flag=True, default=None),\n option(\n \"-q\",\n \"--quality\",\n default=None,\n type=click.Choice(\n list(reversed([q[\"flag\"] for q in QUALITIES.values() if q[\"flag\"]])), # type: ignore\n case_sensitive=False,\n ),\n help=\"Render quality at the follow resolution framerates, respectively: \"\n + \", \".join(\n reversed(\n [\n f'{q[\"pixel_width\"]}x{q[\"pixel_height\"]} {q[\"frame_rate\"]}FPS'\n for q in QUALITIES.values()\n if q[\"flag\"]\n ]\n )\n ),\n ),\n option(\n \"-r\",\n \"--resolution\",\n callback=validate_resolution,\n default=None,\n help=\"Resolution in (W,H) for when 16:9 aspect ratio isn't possible.\",\n ),\n option(\n \"--fps\",\n \"--frame_rate\",\n \"frame_rate\",\n type=float,\n default=None,\n help=\"Render at this frame rate.\",\n ),\n option(\n \"--renderer\",\n type=click.Choice(\n [renderer_type.value for renderer_type in RendererType],\n case_sensitive=False,\n ),\n help=\"Select a renderer for your Scene.\",\n default=\"cairo\",\n ),\n option(\n \"-g\",\n \"--save_pngs\",\n is_flag=True,\n default=None,\n help=\"Save each frame as png (Deprecated).\",\n ),\n option(\n \"-i\",\n \"--save_as_gif\",\n default=None,\n is_flag=True,\n help=\"Save as a gif (Deprecated).\",\n ),\n option(\n \"--save_sections\",\n default=None,\n is_flag=True,\n help=\"Save section videos in addition to movie file.\",\n ),\n option(\n \"-s\",\n \"--save_last_frame\",\n default=None,\n is_flag=True,\n help=\"Save last frame as png (Deprecated).\",\n ),\n option(\n \"-t\",\n \"--transparent\",\n is_flag=True,\n help=\"Render scenes with alpha channel.\",\n ),\n option(\n \"--use_projection_fill_shaders\",\n is_flag=True,\n help=\"Use shaders for OpenGLVMobject fill which are compatible with transformation matrices.\",\n default=None,\n ),\n option(\n \"--use_projection_stroke_shaders\",\n is_flag=True,\n help=\"Use shaders for OpenGLVMobject stroke which are compatible with transformation matrices.\",\n default=None,\n ),\n)\n", "path": "manim/cli/render/render_options.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\n\nimport click\nfrom cloup import option, option_group\n\nfrom manim.constants import QUALITIES, RendererType\n\nfrom ... import logger\n\n\ndef validate_scene_range(ctx, param, value):\n try:\n start = int(value)\n return (start,)\n except Exception:\n pass\n\n if value:\n try:\n start, end = map(int, re.split(r\"[;,\\-]\", value))\n return start, end\n except Exception:\n logger.error(\"Couldn't determine a range for -n option.\")\n exit()\n\n\ndef validate_resolution(ctx, param, value):\n if value:\n try:\n start, end = map(int, re.split(r\"[;,\\-]\", value))\n return (start, end)\n except Exception:\n logger.error(\"Resolution option is invalid.\")\n exit()\n\n\nrender_options = option_group(\n \"Render Options\",\n option(\n \"-n\",\n \"--from_animation_number\",\n callback=validate_scene_range,\n help=\"Start rendering from n_0 until n_1. If n_1 is left unspecified, \"\n \"renders all scenes after n_0.\",\n default=None,\n ),\n option(\n \"-a\",\n \"--write_all\",\n is_flag=True,\n help=\"Render all scenes in the input file.\",\n default=None,\n ),\n option(\n \"--format\",\n type=click.Choice([\"png\", \"gif\", \"mp4\", \"webm\", \"mov\"], case_sensitive=False),\n default=None,\n ),\n option(\"-s\", \"--save_last_frame\", is_flag=True, default=None),\n option(\n \"-q\",\n \"--quality\",\n default=None,\n type=click.Choice(\n list(reversed([q[\"flag\"] for q in QUALITIES.values() if q[\"flag\"]])), # type: ignore\n case_sensitive=False,\n ),\n help=\"Render quality at the follow resolution framerates, respectively: \"\n + \", \".join(\n reversed(\n [\n f'{q[\"pixel_width\"]}x{q[\"pixel_height\"]} {q[\"frame_rate\"]}FPS'\n for q in QUALITIES.values()\n if q[\"flag\"]\n ]\n )\n ),\n ),\n option(\n \"-r\",\n \"--resolution\",\n callback=validate_resolution,\n default=None,\n help='Resolution in \"W,H\" for when 16:9 aspect ratio isn\\'t possible.',\n ),\n option(\n \"--fps\",\n \"--frame_rate\",\n \"frame_rate\",\n type=float,\n default=None,\n help=\"Render at this frame rate.\",\n ),\n option(\n \"--renderer\",\n type=click.Choice(\n [renderer_type.value for renderer_type in RendererType],\n case_sensitive=False,\n ),\n help=\"Select a renderer for your Scene.\",\n default=\"cairo\",\n ),\n option(\n \"-g\",\n \"--save_pngs\",\n is_flag=True,\n default=None,\n help=\"Save each frame as png (Deprecated).\",\n ),\n option(\n \"-i\",\n \"--save_as_gif\",\n default=None,\n is_flag=True,\n help=\"Save as a gif (Deprecated).\",\n ),\n option(\n \"--save_sections\",\n default=None,\n is_flag=True,\n help=\"Save section videos in addition to movie file.\",\n ),\n option(\n \"-s\",\n \"--save_last_frame\",\n default=None,\n is_flag=True,\n help=\"Save last frame as png (Deprecated).\",\n ),\n option(\n \"-t\",\n \"--transparent\",\n is_flag=True,\n help=\"Render scenes with alpha channel.\",\n ),\n option(\n \"--use_projection_fill_shaders\",\n is_flag=True,\n help=\"Use shaders for OpenGLVMobject fill which are compatible with transformation matrices.\",\n default=None,\n ),\n option(\n \"--use_projection_stroke_shaders\",\n is_flag=True,\n help=\"Use shaders for OpenGLVMobject stroke which are compatible with transformation matrices.\",\n default=None,\n ),\n)\n", "path": "manim/cli/render/render_options.py"}]} | 1,551 | 122 |
gh_patches_debug_39951 | rasdani/github-patches | git_diff | liqd__a4-opin-346 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Choose template: Small issues
There are some small wording issues when you choose a template to create a project in the dashboard. See comments in screenshot.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/dashboard/templatetags/dashboard_templatetags.py`
Content:
```
1 from django import template
2
3 register = template.Library()
4
5
6 @register.simple_tag
7 def selected(request, pattern):
8 path = request.path
9 if path == pattern:
10 return 'selected'
11 return ''
12
```
Path: `euth/dashboard/urls.py`
Content:
```
1 from django.conf.urls import url
2
3 from . import views
4
5 urlpatterns = [
6 url(
7 r'^$',
8 views.dashboard,
9 name='dashboard'),
10 url(
11 r'^profile$',
12 views.DashboardProfileView.as_view(),
13 name='dashboard-profile'),
14 url(
15 r'^email/$',
16 views.DashboardEmailView.as_view(),
17 name='dashboard-email'
18 ),
19 url(
20 r'^connections/$',
21 views.DashboardAccountView.as_view(),
22 name='dashboard-connections'
23 ),
24 url(
25 r'^(?P<organisation_slug>[-\w_]+)/$',
26 views.DashboardOrganisationUpdateView.as_view(),
27 name='dashboard-organisation-edit'
28 ),
29 url(
30 r'^(?P<organisation_slug>[-\w_]+)/projects/$',
31 views.DashboardProjectListView.as_view(),
32 name='dashboard-project-list'),
33 url(r'^(?P<organisation_slug>[-\w_]+)/blueprints/$',
34 views.DashboardBlueprintListView.as_view(),
35 name='dashboard-blueprint-list'),
36 url(r'^(?P<organisation_slug>[-\w_]+)/blueprints/'
37 r'(?P<blueprint_slug>[-\w_]+)/$',
38 views.DashboardProjectCreateView.as_view(),
39 name='dashboard-project-create'),
40 url(
41 r'^(?P<organisation_slug>[-\w_]+)/projects/(?P<slug>[-\w_]+)/$',
42 views.DashboardProjectUpdateView.as_view(),
43 name='dashboard-project-edit'
44 ),
45 url(
46 r'^(?P<organisation_slug>[-\w_]+)/projects/(?P<slug>[-\w_]+)/users$',
47 views.DashboardProjectUserView.as_view(),
48 name='dashboard-project-users'
49 ),
50 url(
51 r'^(?P<organisation_slug>[-\w_]+)/projects/'
52 r'(?P<slug>[-\w_]+)/users/invite$',
53 views.DashboardProjectInviteView.as_view(),
54 name='dashboard-project-invite'
55 ),
56 ]
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/dashboard/templatetags/dashboard_templatetags.py b/euth/dashboard/templatetags/dashboard_templatetags.py
deleted file mode 100644
--- a/euth/dashboard/templatetags/dashboard_templatetags.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from django import template
-
-register = template.Library()
-
-
[email protected]_tag
-def selected(request, pattern):
- path = request.path
- if path == pattern:
- return 'selected'
- return ''
diff --git a/euth/dashboard/urls.py b/euth/dashboard/urls.py
--- a/euth/dashboard/urls.py
+++ b/euth/dashboard/urls.py
@@ -10,47 +10,57 @@
url(
r'^profile$',
views.DashboardProfileView.as_view(),
+ {'dashboard_menu_item': 'profile'},
name='dashboard-profile'),
url(
r'^email/$',
views.DashboardEmailView.as_view(),
+ {'dashboard_menu_item': 'email'},
name='dashboard-email'
),
url(
r'^connections/$',
views.DashboardAccountView.as_view(),
+ {'dashboard_menu_item': 'connections'},
name='dashboard-connections'
),
url(
r'^(?P<organisation_slug>[-\w_]+)/$',
views.DashboardOrganisationUpdateView.as_view(),
+ {'dashboard_menu_item': 'organisation'},
name='dashboard-organisation-edit'
),
url(
r'^(?P<organisation_slug>[-\w_]+)/projects/$',
views.DashboardProjectListView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-list'),
url(r'^(?P<organisation_slug>[-\w_]+)/blueprints/$',
views.DashboardBlueprintListView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-blueprint-list'),
url(r'^(?P<organisation_slug>[-\w_]+)/blueprints/'
r'(?P<blueprint_slug>[-\w_]+)/$',
views.DashboardProjectCreateView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-create'),
url(
r'^(?P<organisation_slug>[-\w_]+)/projects/(?P<slug>[-\w_]+)/$',
views.DashboardProjectUpdateView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-edit'
),
url(
r'^(?P<organisation_slug>[-\w_]+)/projects/(?P<slug>[-\w_]+)/users$',
views.DashboardProjectUserView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-users'
),
url(
r'^(?P<organisation_slug>[-\w_]+)/projects/'
r'(?P<slug>[-\w_]+)/users/invite$',
views.DashboardProjectInviteView.as_view(),
+ {'dashboard_menu_item': 'project'},
name='dashboard-project-invite'
),
]
| {"golden_diff": "diff --git a/euth/dashboard/templatetags/dashboard_templatetags.py b/euth/dashboard/templatetags/dashboard_templatetags.py\ndeleted file mode 100644\n--- a/euth/dashboard/templatetags/dashboard_templatetags.py\n+++ /dev/null\n@@ -1,11 +0,0 @@\n-from django import template\n-\n-register = template.Library()\n-\n-\[email protected]_tag\n-def selected(request, pattern):\n- path = request.path\n- if path == pattern:\n- return 'selected'\n- return ''\ndiff --git a/euth/dashboard/urls.py b/euth/dashboard/urls.py\n--- a/euth/dashboard/urls.py\n+++ b/euth/dashboard/urls.py\n@@ -10,47 +10,57 @@\n url(\n r'^profile$',\n views.DashboardProfileView.as_view(),\n+ {'dashboard_menu_item': 'profile'},\n name='dashboard-profile'),\n url(\n r'^email/$',\n views.DashboardEmailView.as_view(),\n+ {'dashboard_menu_item': 'email'},\n name='dashboard-email'\n ),\n url(\n r'^connections/$',\n views.DashboardAccountView.as_view(),\n+ {'dashboard_menu_item': 'connections'},\n name='dashboard-connections'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/$',\n views.DashboardOrganisationUpdateView.as_view(),\n+ {'dashboard_menu_item': 'organisation'},\n name='dashboard-organisation-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/$',\n views.DashboardProjectListView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/$',\n views.DashboardBlueprintListView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-blueprint-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/'\n r'(?P<blueprint_slug>[-\\w_]+)/$',\n views.DashboardProjectCreateView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-create'),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/$',\n views.DashboardProjectUpdateView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/users$',\n views.DashboardProjectUserView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-users'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/'\n r'(?P<slug>[-\\w_]+)/users/invite$',\n views.DashboardProjectInviteView.as_view(),\n+ {'dashboard_menu_item': 'project'},\n name='dashboard-project-invite'\n ),\n ]\n", "issue": "Choose template: Small issues\nThere are some small wording issues when you choose a template to create a project in the dashboard. See comments in screenshot.\n\n\n\n", "before_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag\ndef selected(request, pattern):\n path = request.path\n if path == pattern:\n return 'selected'\n return ''\n", "path": "euth/dashboard/templatetags/dashboard_templatetags.py"}, {"content": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(\n r'^$',\n views.dashboard,\n name='dashboard'),\n url(\n r'^profile$',\n views.DashboardProfileView.as_view(),\n name='dashboard-profile'),\n url(\n r'^email/$',\n views.DashboardEmailView.as_view(),\n name='dashboard-email'\n ),\n url(\n r'^connections/$',\n views.DashboardAccountView.as_view(),\n name='dashboard-connections'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/$',\n views.DashboardOrganisationUpdateView.as_view(),\n name='dashboard-organisation-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/$',\n views.DashboardProjectListView.as_view(),\n name='dashboard-project-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/$',\n views.DashboardBlueprintListView.as_view(),\n name='dashboard-blueprint-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/'\n r'(?P<blueprint_slug>[-\\w_]+)/$',\n views.DashboardProjectCreateView.as_view(),\n name='dashboard-project-create'),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/$',\n views.DashboardProjectUpdateView.as_view(),\n name='dashboard-project-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/users$',\n views.DashboardProjectUserView.as_view(),\n name='dashboard-project-users'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/'\n r'(?P<slug>[-\\w_]+)/users/invite$',\n views.DashboardProjectInviteView.as_view(),\n name='dashboard-project-invite'\n ),\n]\n", "path": "euth/dashboard/urls.py"}], "after_files": [{"content": null, "path": "euth/dashboard/templatetags/dashboard_templatetags.py"}, {"content": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(\n r'^$',\n views.dashboard,\n name='dashboard'),\n url(\n r'^profile$',\n views.DashboardProfileView.as_view(),\n {'dashboard_menu_item': 'profile'},\n name='dashboard-profile'),\n url(\n r'^email/$',\n views.DashboardEmailView.as_view(),\n {'dashboard_menu_item': 'email'},\n name='dashboard-email'\n ),\n url(\n r'^connections/$',\n views.DashboardAccountView.as_view(),\n {'dashboard_menu_item': 'connections'},\n name='dashboard-connections'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/$',\n views.DashboardOrganisationUpdateView.as_view(),\n {'dashboard_menu_item': 'organisation'},\n name='dashboard-organisation-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/$',\n views.DashboardProjectListView.as_view(),\n {'dashboard_menu_item': 'project'},\n name='dashboard-project-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/$',\n views.DashboardBlueprintListView.as_view(),\n {'dashboard_menu_item': 'project'},\n name='dashboard-blueprint-list'),\n url(r'^(?P<organisation_slug>[-\\w_]+)/blueprints/'\n r'(?P<blueprint_slug>[-\\w_]+)/$',\n views.DashboardProjectCreateView.as_view(),\n {'dashboard_menu_item': 'project'},\n name='dashboard-project-create'),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/$',\n views.DashboardProjectUpdateView.as_view(),\n {'dashboard_menu_item': 'project'},\n name='dashboard-project-edit'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/(?P<slug>[-\\w_]+)/users$',\n views.DashboardProjectUserView.as_view(),\n {'dashboard_menu_item': 'project'},\n name='dashboard-project-users'\n ),\n url(\n r'^(?P<organisation_slug>[-\\w_]+)/projects/'\n r'(?P<slug>[-\\w_]+)/users/invite$',\n views.DashboardProjectInviteView.as_view(),\n {'dashboard_menu_item': 'project'},\n name='dashboard-project-invite'\n ),\n]\n", "path": "euth/dashboard/urls.py"}]} | 986 | 685 |
gh_patches_debug_33519 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10121 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve our test coverage
### Feature description
Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.
### How to find low-coverage files
Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage:
```
---------- coverage: platform linux, python 3.12.0-final-0 -----------
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------------------------------------
quantum/q_fourier_transform.py 30 30 0% 14-93
scripts/validate_solutions.py 54 54 0% 2-94
strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129
...
```
The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.
Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.
_**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.
### How to add doctests
A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:
```py
def add(a: int, b: int) -> int:
"""
Adds two non-negative numbers.
>>> add(1, 1)
2
>>> add(2, 5)
7
>>> add(1, 0)
1
>>> add(-1, -1)
Traceback (most recent last):
...
ValueError: Numbers must be non-negative
"""
```
For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).
Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.
_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `maths/binary_exponentiation_2.py`
Content:
```
1 """
2 * Binary Exponentiation for Powers
3 * This is a method to find a^b in a time complexity of O(log b)
4 * This is one of the most commonly used methods of finding powers.
5 * Also useful in cases where solution to (a^b)%c is required,
6 * where a,b,c can be numbers over the computers calculation limits.
7 * Done using iteration, can also be done using recursion
8
9 * @author chinmoy159
10 * @version 1.0 dated 10/08/2017
11 """
12
13
14 def b_expo(a: int, b: int) -> int:
15 res = 1
16 while b > 0:
17 if b & 1:
18 res *= a
19
20 a *= a
21 b >>= 1
22
23 return res
24
25
26 def b_expo_mod(a: int, b: int, c: int) -> int:
27 res = 1
28 while b > 0:
29 if b & 1:
30 res = ((res % c) * (a % c)) % c
31
32 a *= a
33 b >>= 1
34
35 return res
36
37
38 """
39 * Wondering how this method works !
40 * It's pretty simple.
41 * Let's say you need to calculate a ^ b
42 * RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2
43 * RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.
44 * Once b is even, repeat the process to get a ^ b
45 * Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1
46 *
47 * As far as the modulo is concerned,
48 * the fact : (a*b) % c = ((a%c) * (b%c)) % c
49 * Now apply RULE 1 OR 2 whichever is required.
50 """
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/maths/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py
--- a/maths/binary_exponentiation_2.py
+++ b/maths/binary_exponentiation_2.py
@@ -1,17 +1,33 @@
"""
-* Binary Exponentiation for Powers
-* This is a method to find a^b in a time complexity of O(log b)
-* This is one of the most commonly used methods of finding powers.
-* Also useful in cases where solution to (a^b)%c is required,
-* where a,b,c can be numbers over the computers calculation limits.
-* Done using iteration, can also be done using recursion
-
-* @author chinmoy159
-* @version 1.0 dated 10/08/2017
+Binary Exponentiation
+This is a method to find a^b in O(log b) time complexity
+This is one of the most commonly used methods of exponentiation
+It's also useful when the solution to (a^b) % c is required because a, b, c may be
+over the computer's calculation limits
+
+Let's say you need to calculate a ^ b
+- RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2
+- RULE 2 : IF b is odd, then a ^ b = a * (a ^ (b - 1)), where b - 1 is even
+Once b is even, repeat the process until b = 1 or b = 0, because a^1 = a and a^0 = 1
+
+For modular exponentiation, we use the fact that (a*b) % c = ((a%c) * (b%c)) % c
+Now apply RULE 1 or 2 as required
+
+@author chinmoy159
"""
def b_expo(a: int, b: int) -> int:
+ """
+ >>> b_expo(2, 10)
+ 1024
+ >>> b_expo(9, 0)
+ 1
+ >>> b_expo(0, 12)
+ 0
+ >>> b_expo(4, 12)
+ 16777216
+ """
res = 1
while b > 0:
if b & 1:
@@ -24,6 +40,16 @@
def b_expo_mod(a: int, b: int, c: int) -> int:
+ """
+ >>> b_expo_mod(2, 10, 1000000007)
+ 1024
+ >>> b_expo_mod(11, 13, 19)
+ 11
+ >>> b_expo_mod(0, 19, 20)
+ 0
+ >>> b_expo_mod(15, 5, 4)
+ 3
+ """
res = 1
while b > 0:
if b & 1:
@@ -33,18 +59,3 @@
b >>= 1
return res
-
-
-"""
-* Wondering how this method works !
-* It's pretty simple.
-* Let's say you need to calculate a ^ b
-* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2
-* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.
-* Once b is even, repeat the process to get a ^ b
-* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1
-*
-* As far as the modulo is concerned,
-* the fact : (a*b) % c = ((a%c) * (b%c)) % c
-* Now apply RULE 1 OR 2 whichever is required.
-"""
| {"golden_diff": "diff --git a/maths/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py\n--- a/maths/binary_exponentiation_2.py\n+++ b/maths/binary_exponentiation_2.py\n@@ -1,17 +1,33 @@\n \"\"\"\n-* Binary Exponentiation for Powers\n-* This is a method to find a^b in a time complexity of O(log b)\n-* This is one of the most commonly used methods of finding powers.\n-* Also useful in cases where solution to (a^b)%c is required,\n-* where a,b,c can be numbers over the computers calculation limits.\n-* Done using iteration, can also be done using recursion\n-\n-* @author chinmoy159\n-* @version 1.0 dated 10/08/2017\n+Binary Exponentiation\n+This is a method to find a^b in O(log b) time complexity\n+This is one of the most commonly used methods of exponentiation\n+It's also useful when the solution to (a^b) % c is required because a, b, c may be\n+over the computer's calculation limits\n+\n+Let's say you need to calculate a ^ b\n+- RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2\n+- RULE 2 : IF b is odd, then a ^ b = a * (a ^ (b - 1)), where b - 1 is even\n+Once b is even, repeat the process until b = 1 or b = 0, because a^1 = a and a^0 = 1\n+\n+For modular exponentiation, we use the fact that (a*b) % c = ((a%c) * (b%c)) % c\n+Now apply RULE 1 or 2 as required\n+\n+@author chinmoy159\n \"\"\"\n \n \n def b_expo(a: int, b: int) -> int:\n+ \"\"\"\n+ >>> b_expo(2, 10)\n+ 1024\n+ >>> b_expo(9, 0)\n+ 1\n+ >>> b_expo(0, 12)\n+ 0\n+ >>> b_expo(4, 12)\n+ 16777216\n+ \"\"\"\n res = 1\n while b > 0:\n if b & 1:\n@@ -24,6 +40,16 @@\n \n \n def b_expo_mod(a: int, b: int, c: int) -> int:\n+ \"\"\"\n+ >>> b_expo_mod(2, 10, 1000000007)\n+ 1024\n+ >>> b_expo_mod(11, 13, 19)\n+ 11\n+ >>> b_expo_mod(0, 19, 20)\n+ 0\n+ >>> b_expo_mod(15, 5, 4)\n+ 3\n+ \"\"\"\n res = 1\n while b > 0:\n if b & 1:\n@@ -33,18 +59,3 @@\n b >>= 1\n \n return res\n-\n-\n-\"\"\"\n-* Wondering how this method works !\n-* It's pretty simple.\n-* Let's say you need to calculate a ^ b\n-* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2\n-* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.\n-* Once b is even, repeat the process to get a ^ b\n-* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1\n-*\n-* As far as the modulo is concerned,\n-* the fact : (a*b) % c = ((a%c) * (b%c)) % c\n-* Now apply RULE 1 OR 2 whichever is required.\n-\"\"\"\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "\"\"\"\n* Binary Exponentiation for Powers\n* This is a method to find a^b in a time complexity of O(log b)\n* This is one of the most commonly used methods of finding powers.\n* Also useful in cases where solution to (a^b)%c is required,\n* where a,b,c can be numbers over the computers calculation limits.\n* Done using iteration, can also be done using recursion\n\n* @author chinmoy159\n* @version 1.0 dated 10/08/2017\n\"\"\"\n\n\ndef b_expo(a: int, b: int) -> int:\n res = 1\n while b > 0:\n if b & 1:\n res *= a\n\n a *= a\n b >>= 1\n\n return res\n\n\ndef b_expo_mod(a: int, b: int, c: int) -> int:\n res = 1\n while b > 0:\n if b & 1:\n res = ((res % c) * (a % c)) % c\n\n a *= a\n b >>= 1\n\n return res\n\n\n\"\"\"\n* Wondering how this method works !\n* It's pretty simple.\n* Let's say you need to calculate a ^ b\n* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2\n* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.\n* Once b is even, repeat the process to get a ^ b\n* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1\n*\n* As far as the modulo is concerned,\n* the fact : (a*b) % c = ((a%c) * (b%c)) % c\n* Now apply RULE 1 OR 2 whichever is required.\n\"\"\"\n", "path": "maths/binary_exponentiation_2.py"}], "after_files": [{"content": "\"\"\"\nBinary Exponentiation\nThis is a method to find a^b in O(log b) time complexity\nThis is one of the most commonly used methods of exponentiation\nIt's also useful when the solution to (a^b) % c is required because a, b, c may be\nover the computer's calculation limits\n\nLet's say you need to calculate a ^ b\n- RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2\n- RULE 2 : IF b is odd, then a ^ b = a * (a ^ (b - 1)), where b - 1 is even\nOnce b is even, repeat the process until b = 1 or b = 0, because a^1 = a and a^0 = 1\n\nFor modular exponentiation, we use the fact that (a*b) % c = ((a%c) * (b%c)) % c\nNow apply RULE 1 or 2 as required\n\n@author chinmoy159\n\"\"\"\n\n\ndef b_expo(a: int, b: int) -> int:\n \"\"\"\n >>> b_expo(2, 10)\n 1024\n >>> b_expo(9, 0)\n 1\n >>> b_expo(0, 12)\n 0\n >>> b_expo(4, 12)\n 16777216\n \"\"\"\n res = 1\n while b > 0:\n if b & 1:\n res *= a\n\n a *= a\n b >>= 1\n\n return res\n\n\ndef b_expo_mod(a: int, b: int, c: int) -> int:\n \"\"\"\n >>> b_expo_mod(2, 10, 1000000007)\n 1024\n >>> b_expo_mod(11, 13, 19)\n 11\n >>> b_expo_mod(0, 19, 20)\n 0\n >>> b_expo_mod(15, 5, 4)\n 3\n \"\"\"\n res = 1\n while b > 0:\n if b & 1:\n res = ((res % c) * (a % c)) % c\n\n a *= a\n b >>= 1\n\n return res\n", "path": "maths/binary_exponentiation_2.py"}]} | 1,656 | 955 |
gh_patches_debug_25843 | rasdani/github-patches | git_diff | fidals__shopelectro-965 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create a custom page for 500 error
The page should have an apology and an suggestion to continue the purchase with the help of a consultant
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/urls.py`
Content:
```
1 from datetime import timedelta
2 from collections import OrderedDict
3
4 from django.conf import settings
5 from django.conf.urls import url, include
6 from django.conf.urls.static import static
7 from django.contrib.sitemaps.views import sitemap
8 from django.views.decorators.cache import cache_page, never_cache
9
10 from pages.views import RobotsView, SitemapPage
11 from pages.urls import custom_page_url
12
13 from shopelectro import sitemaps, views
14 from shopelectro.admin import se_admin
15
16
17 def cached_time(*args, **kwargs) -> int:
18 """Return value of time for caching in seconds."""
19 return int(timedelta(*args, **kwargs).total_seconds())
20
21
22 # Orders sitemaps instances
23 sitemaps = OrderedDict([
24 ('index', sitemaps.IndexSitemap),
25 ('category', sitemaps.CategorySitemap),
26 ('category-with-tags', sitemaps.CategoryWithTagsSitemap),
27 ('products', sitemaps.ProductSitemap),
28 ('site', sitemaps.PagesSitemap)
29 ])
30
31 # disable cache
32 if settings.DEBUG:
33 def cache_page(arg): # Ignore PyFlakesBear
34 if callable(arg):
35 return arg
36 return cache_page
37
38 cached_60d = cache_page(cached_time(days=60))
39 cached_2h = cache_page(cached_time(hours=2))
40
41 admin_urls = [
42 url(r'^', se_admin.urls),
43 url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),
44 url(r'^get-tree-items/$', views.Tree.as_view()),
45 url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),
46 url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),
47 url(r'^select2/', include('django_select2.urls')),
48 ]
49
50 catalog_urls = [
51 # "category" group
52 url(r'^categories/(?P<slug>[\w-]+)/$',
53 cached_2h(views.CategoryPage.as_view()), name='category'),
54 url(r'^categories/(?P<slug>[\w-]+)/tags/(?P<tags>[\w_-]+)/$',
55 cached_2h(views.CategoryPage.as_view()), name='category'),
56 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/$',
57 views.CategoryPage.as_view(), name='category'),
58 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w_-]+)/$',
59 views.CategoryPage.as_view(), name='category'),
60 # "load more" group
61 url(r'categories/(?P<slug>[\w-]+)/load-more/'
62 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',
63 views.load_more, name='load_more'),
64 url(r'categories/(?P<slug>[\w-]+)/load-more/'
65 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w_-]+)/$',
66 views.load_more, name='load_more'),
67 # rest of urls
68 url(r'^no-images/$', views.ProductsWithoutImages.as_view(),
69 name='products_without_images'),
70 url(r'^no-text/$', views.ProductsWithoutText.as_view(),
71 name='products_without_text'),
72 url(r'^products/(?P<product_vendor_code>[0-9]+)/$',
73 views.ProductPage.as_view(), name='product'),
74 ]
75
76 service_urls = [
77 url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),
78 url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),
79 url(r'^ya-feedback/redirect/$',
80 views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),
81 url(r'^ya-feedback/request/$',
82 views.ya_feedback_request, name='ya_feedback_request'),
83 ]
84
85 search_urls = [
86 url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),
87 ]
88
89 ecommerce_urls = [
90 url(r'^cart-get/$', never_cache(views.Cart.as_view()), name='cart_get'),
91 url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),
92 url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),
93 url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),
94 url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),
95 url(r'^order-call/$', views.order_call),
96 url(r'^one-click-buy/$', views.one_click_buy),
97 url(r'^yandex-order/$', views.YandexOrder.as_view()),
98 url(r'', include('ecommerce.urls')),
99 ]
100
101 custom_pages = [
102 # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work
103 custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'),
104 custom_page_url(r'^(?P<page>robots\.txt)$', RobotsView.as_view()),
105 custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),
106 custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.category_matrix)),
107 custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),
108 # these pages should show only actual state
109 custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())),
110 custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())),
111 ]
112
113 urlpatterns = [
114 url('', include(custom_pages)),
115 url(r'^admin/', include(admin_urls)),
116 url(r'^catalog/', include(catalog_urls)),
117 url(r'^pages/', include('pages.urls')),
118 url(r'^save-feedback/$', views.save_feedback),
119 url(r'^delete-feedback/$', views.delete_feedback),
120 url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),
121 url(r'^shop/', include(ecommerce_urls)),
122 url(r'^search/', include(search_urls)),
123 url(r'^service/', include(service_urls)),
124 url(r'^sitemap\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),
125 ]
126
127 if settings.DEBUG:
128 import debug_toolbar
129
130 urlpatterns += [
131 url(r'^__debug__/', include(debug_toolbar.urls)),
132 *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),
133 *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),
134 ]
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/urls.py b/shopelectro/urls.py
--- a/shopelectro/urls.py
+++ b/shopelectro/urls.py
@@ -1,15 +1,15 @@
-from datetime import timedelta
from collections import OrderedDict
+from datetime import timedelta
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib.sitemaps.views import sitemap
from django.views.decorators.cache import cache_page, never_cache
+from django.views.generic import TemplateView
-from pages.views import RobotsView, SitemapPage
from pages.urls import custom_page_url
-
+from pages.views import RobotsView, SitemapPage
from shopelectro import sitemaps, views
from shopelectro.admin import se_admin
@@ -132,3 +132,11 @@
*static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),
*static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),
]
+
+# Test and Debug environments replace real 404 and 500 error with stack traces.
+# We expose real 404 and 500 pages with separated urls to test them.
+if settings.TEST_ENV or settings.DEBUG:
+ urlpatterns += [
+ url(r'^404/$', TemplateView.as_view(template_name='404.html')),
+ url(r'^500/$', TemplateView.as_view(template_name='500.html')),
+ ]
| {"golden_diff": "diff --git a/shopelectro/urls.py b/shopelectro/urls.py\n--- a/shopelectro/urls.py\n+++ b/shopelectro/urls.py\n@@ -1,15 +1,15 @@\n-from datetime import timedelta\n from collections import OrderedDict\n+from datetime import timedelta\n \n from django.conf import settings\n from django.conf.urls import url, include\n from django.conf.urls.static import static\n from django.contrib.sitemaps.views import sitemap\n from django.views.decorators.cache import cache_page, never_cache\n+from django.views.generic import TemplateView\n \n-from pages.views import RobotsView, SitemapPage\n from pages.urls import custom_page_url\n-\n+from pages.views import RobotsView, SitemapPage\n from shopelectro import sitemaps, views\n from shopelectro.admin import se_admin\n \n@@ -132,3 +132,11 @@\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n+\n+# Test and Debug environments replace real 404 and 500 error with stack traces.\n+# We expose real 404 and 500 pages with separated urls to test them.\n+if settings.TEST_ENV or settings.DEBUG:\n+ urlpatterns += [\n+ url(r'^404/$', TemplateView.as_view(template_name='404.html')),\n+ url(r'^500/$', TemplateView.as_view(template_name='500.html')),\n+ ]\n", "issue": "Create a custom page for 500 error\nThe page should have an apology and an suggestion to continue the purchase with the help of a consultant\n", "before_files": [{"content": "from datetime import timedelta\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.decorators.cache import cache_page, never_cache\n\nfrom pages.views import RobotsView, SitemapPage\nfrom pages.urls import custom_page_url\n\nfrom shopelectro import sitemaps, views\nfrom shopelectro.admin import se_admin\n\n\ndef cached_time(*args, **kwargs) -> int:\n \"\"\"Return value of time for caching in seconds.\"\"\"\n return int(timedelta(*args, **kwargs).total_seconds())\n\n\n# Orders sitemaps instances\nsitemaps = OrderedDict([\n ('index', sitemaps.IndexSitemap),\n ('category', sitemaps.CategorySitemap),\n ('category-with-tags', sitemaps.CategoryWithTagsSitemap),\n ('products', sitemaps.ProductSitemap),\n ('site', sitemaps.PagesSitemap)\n])\n\n# disable cache\nif settings.DEBUG:\n def cache_page(arg): # Ignore PyFlakesBear\n if callable(arg):\n return arg\n return cache_page\n\ncached_60d = cache_page(cached_time(days=60))\ncached_2h = cache_page(cached_time(hours=2))\n\nadmin_urls = [\n url(r'^', se_admin.urls),\n url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),\n url(r'^get-tree-items/$', views.Tree.as_view()),\n url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),\n url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),\n url(r'^select2/', include('django_select2.urls')),\n]\n\ncatalog_urls = [\n # \"category\" group\n url(r'^categories/(?P<slug>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/tags/(?P<tags>[\\w_-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/$',\n views.CategoryPage.as_view(), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.CategoryPage.as_view(), name='category'),\n # \"load more\" group\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',\n views.load_more, name='load_more'),\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.load_more, name='load_more'),\n # rest of urls\n url(r'^no-images/$', views.ProductsWithoutImages.as_view(),\n name='products_without_images'),\n url(r'^no-text/$', views.ProductsWithoutText.as_view(),\n name='products_without_text'),\n url(r'^products/(?P<product_vendor_code>[0-9]+)/$',\n views.ProductPage.as_view(), name='product'),\n]\n\nservice_urls = [\n url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),\n url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),\n url(r'^ya-feedback/redirect/$',\n views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),\n url(r'^ya-feedback/request/$',\n views.ya_feedback_request, name='ya_feedback_request'),\n]\n\nsearch_urls = [\n url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),\n]\n\necommerce_urls = [\n url(r'^cart-get/$', never_cache(views.Cart.as_view()), name='cart_get'),\n url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),\n url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),\n url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),\n url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),\n url(r'^order-call/$', views.order_call),\n url(r'^one-click-buy/$', views.one_click_buy),\n url(r'^yandex-order/$', views.YandexOrder.as_view()),\n url(r'', include('ecommerce.urls')),\n]\n\ncustom_pages = [\n # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work\n custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'),\n custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view()),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.category_matrix)),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n # these pages should show only actual state\n custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())),\n custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())),\n]\n\nurlpatterns = [\n url('', include(custom_pages)),\n url(r'^admin/', include(admin_urls)),\n url(r'^catalog/', include(catalog_urls)),\n url(r'^pages/', include('pages.urls')),\n url(r'^save-feedback/$', views.save_feedback),\n url(r'^delete-feedback/$', views.delete_feedback),\n url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),\n url(r'^shop/', include(ecommerce_urls)),\n url(r'^search/', include(search_urls)),\n url(r'^service/', include(service_urls)),\n url(r'^sitemap\\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n", "path": "shopelectro/urls.py"}], "after_files": [{"content": "from collections import OrderedDict\nfrom datetime import timedelta\n\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.decorators.cache import cache_page, never_cache\nfrom django.views.generic import TemplateView\n\nfrom pages.urls import custom_page_url\nfrom pages.views import RobotsView, SitemapPage\nfrom shopelectro import sitemaps, views\nfrom shopelectro.admin import se_admin\n\n\ndef cached_time(*args, **kwargs) -> int:\n \"\"\"Return value of time for caching in seconds.\"\"\"\n return int(timedelta(*args, **kwargs).total_seconds())\n\n\n# Orders sitemaps instances\nsitemaps = OrderedDict([\n ('index', sitemaps.IndexSitemap),\n ('category', sitemaps.CategorySitemap),\n ('category-with-tags', sitemaps.CategoryWithTagsSitemap),\n ('products', sitemaps.ProductSitemap),\n ('site', sitemaps.PagesSitemap)\n])\n\n# disable cache\nif settings.DEBUG:\n def cache_page(arg): # Ignore PyFlakesBear\n if callable(arg):\n return arg\n return cache_page\n\ncached_60d = cache_page(cached_time(days=60))\ncached_2h = cache_page(cached_time(hours=2))\n\nadmin_urls = [\n url(r'^', se_admin.urls),\n url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),\n url(r'^get-tree-items/$', views.Tree.as_view()),\n url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),\n url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),\n url(r'^select2/', include('django_select2.urls')),\n]\n\ncatalog_urls = [\n # \"category\" group\n url(r'^categories/(?P<slug>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/tags/(?P<tags>[\\w_-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/$',\n views.CategoryPage.as_view(), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.CategoryPage.as_view(), name='category'),\n # \"load more\" group\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',\n views.load_more, name='load_more'),\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.load_more, name='load_more'),\n # rest of urls\n url(r'^no-images/$', views.ProductsWithoutImages.as_view(),\n name='products_without_images'),\n url(r'^no-text/$', views.ProductsWithoutText.as_view(),\n name='products_without_text'),\n url(r'^products/(?P<product_vendor_code>[0-9]+)/$',\n views.ProductPage.as_view(), name='product'),\n]\n\nservice_urls = [\n url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),\n url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),\n url(r'^ya-feedback/redirect/$',\n views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),\n url(r'^ya-feedback/request/$',\n views.ya_feedback_request, name='ya_feedback_request'),\n]\n\nsearch_urls = [\n url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),\n]\n\necommerce_urls = [\n url(r'^cart-get/$', never_cache(views.Cart.as_view()), name='cart_get'),\n url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),\n url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),\n url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),\n url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),\n url(r'^order-call/$', views.order_call),\n url(r'^one-click-buy/$', views.one_click_buy),\n url(r'^yandex-order/$', views.YandexOrder.as_view()),\n url(r'', include('ecommerce.urls')),\n]\n\ncustom_pages = [\n # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work\n custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'),\n custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view()),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.category_matrix)),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n # these pages should show only actual state\n custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())),\n custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())),\n]\n\nurlpatterns = [\n url('', include(custom_pages)),\n url(r'^admin/', include(admin_urls)),\n url(r'^catalog/', include(catalog_urls)),\n url(r'^pages/', include('pages.urls')),\n url(r'^save-feedback/$', views.save_feedback),\n url(r'^delete-feedback/$', views.delete_feedback),\n url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),\n url(r'^shop/', include(ecommerce_urls)),\n url(r'^search/', include(search_urls)),\n url(r'^service/', include(service_urls)),\n url(r'^sitemap\\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n\n# Test and Debug environments replace real 404 and 500 error with stack traces.\n# We expose real 404 and 500 pages with separated urls to test them.\nif settings.TEST_ENV or settings.DEBUG:\n urlpatterns += [\n url(r'^404/$', TemplateView.as_view(template_name='404.html')),\n url(r'^500/$', TemplateView.as_view(template_name='500.html')),\n ]\n", "path": "shopelectro/urls.py"}]} | 2,018 | 324 |
gh_patches_debug_56718 | rasdani/github-patches | git_diff | mosaicml__composer-293 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ResNet56 default num_classes argument
## 🚀 Feature Request
The `num_classes` argument for [ResNet56_cifar10](https://github.com/mosaicml/composer/blob/main/composer/models/resnet56_cifar10/model.py) should have a default value `num_classes=10`.
## Motivation
It felt silly when writing a demo notebook to have to specify `num_classes=10` when calling `composer.models.CIFAR10_ResNet56(num_classes=10)`. The model has "cifar10" in its name, and even if it didn't, it's most common use is for cifar10.
## Implementation
Does it require any changes beyond the `__init__()` signature?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `composer/models/resnet56_cifar10/model.py`
Content:
```
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 from typing import List, Optional
4
5 from composer.models.base import MosaicClassifier
6 from composer.models.model_hparams import Initializer
7 from composer.models.resnets import CIFAR_ResNet
8
9
10 class CIFAR10_ResNet56(MosaicClassifier):
11 """A ResNet-56 model extending :class:`MosaicClassifier`.
12
13 See this `paper <https://arxiv.org/abs/1512.03385>`_ for details
14 on the residual network architecture.
15
16 Args:
17 num_classes (int): The number of classes for the model.
18 initializers (List[Initializer], optional): Initializers
19 for the model. ``None`` for no initialization.
20 (default: ``None``)
21 """
22
23 def __init__(
24 self,
25 num_classes: int,
26 initializers: Optional[List[Initializer]] = None,
27 ) -> None:
28 if initializers is None:
29 initializers = []
30
31 model = CIFAR_ResNet.get_model_from_name(
32 "cifar_resnet_56",
33 initializers,
34 num_classes,
35 )
36 super().__init__(module=model)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/composer/models/resnet56_cifar10/model.py b/composer/models/resnet56_cifar10/model.py
--- a/composer/models/resnet56_cifar10/model.py
+++ b/composer/models/resnet56_cifar10/model.py
@@ -22,7 +22,7 @@
def __init__(
self,
- num_classes: int,
+ num_classes: int = 10,
initializers: Optional[List[Initializer]] = None,
) -> None:
if initializers is None:
| {"golden_diff": "diff --git a/composer/models/resnet56_cifar10/model.py b/composer/models/resnet56_cifar10/model.py\n--- a/composer/models/resnet56_cifar10/model.py\n+++ b/composer/models/resnet56_cifar10/model.py\n@@ -22,7 +22,7 @@\n \n def __init__(\n self,\n- num_classes: int,\n+ num_classes: int = 10,\n initializers: Optional[List[Initializer]] = None,\n ) -> None:\n if initializers is None:\n", "issue": "ResNet56 default num_classes argument\n## \ud83d\ude80 Feature Request\r\nThe `num_classes` argument for [ResNet56_cifar10](https://github.com/mosaicml/composer/blob/main/composer/models/resnet56_cifar10/model.py) should have a default value `num_classes=10`.\r\n\r\n## Motivation\r\n\r\nIt felt silly when writing a demo notebook to have to specify `num_classes=10` when calling `composer.models.CIFAR10_ResNet56(num_classes=10)`. The model has \"cifar10\" in its name, and even if it didn't, it's most common use is for cifar10.\r\n\r\n## Implementation\r\n\r\nDoes it require any changes beyond the `__init__()` signature?\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom typing import List, Optional\n\nfrom composer.models.base import MosaicClassifier\nfrom composer.models.model_hparams import Initializer\nfrom composer.models.resnets import CIFAR_ResNet\n\n\nclass CIFAR10_ResNet56(MosaicClassifier):\n \"\"\"A ResNet-56 model extending :class:`MosaicClassifier`.\n\n See this `paper <https://arxiv.org/abs/1512.03385>`_ for details\n on the residual network architecture.\n\n Args:\n num_classes (int): The number of classes for the model.\n initializers (List[Initializer], optional): Initializers\n for the model. ``None`` for no initialization.\n (default: ``None``)\n \"\"\"\n\n def __init__(\n self,\n num_classes: int,\n initializers: Optional[List[Initializer]] = None,\n ) -> None:\n if initializers is None:\n initializers = []\n\n model = CIFAR_ResNet.get_model_from_name(\n \"cifar_resnet_56\",\n initializers,\n num_classes,\n )\n super().__init__(module=model)\n", "path": "composer/models/resnet56_cifar10/model.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom typing import List, Optional\n\nfrom composer.models.base import MosaicClassifier\nfrom composer.models.model_hparams import Initializer\nfrom composer.models.resnets import CIFAR_ResNet\n\n\nclass CIFAR10_ResNet56(MosaicClassifier):\n \"\"\"A ResNet-56 model extending :class:`MosaicClassifier`.\n\n See this `paper <https://arxiv.org/abs/1512.03385>`_ for details\n on the residual network architecture.\n\n Args:\n num_classes (int): The number of classes for the model.\n initializers (List[Initializer], optional): Initializers\n for the model. ``None`` for no initialization.\n (default: ``None``)\n \"\"\"\n\n def __init__(\n self,\n num_classes: int = 10,\n initializers: Optional[List[Initializer]] = None,\n ) -> None:\n if initializers is None:\n initializers = []\n\n model = CIFAR_ResNet.get_model_from_name(\n \"cifar_resnet_56\",\n initializers,\n num_classes,\n )\n super().__init__(module=model)\n", "path": "composer/models/resnet56_cifar10/model.py"}]} | 756 | 127 |
gh_patches_debug_1114 | rasdani/github-patches | git_diff | Pylons__pyramid-2224 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update to Sphinx 1.3.4 when released
There is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.
When 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 ##############################################################################
2 #
3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors.
4 # All Rights Reserved.
5 #
6 # This software is subject to the provisions of the BSD-like license at
7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
11 # FITNESS FOR A PARTICULAR PURPOSE
12 #
13 ##############################################################################
14
15 import os
16 import sys
17
18 from setuptools import setup, find_packages
19
20 py_version = sys.version_info[:2]
21
22 PY3 = py_version[0] == 3
23
24 if PY3:
25 if py_version < (3, 2):
26 raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')
27 else:
28 if py_version < (2, 6):
29 raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')
30
31 here = os.path.abspath(os.path.dirname(__file__))
32 try:
33 with open(os.path.join(here, 'README.rst')) as f:
34 README = f.read()
35 with open(os.path.join(here, 'CHANGES.txt')) as f:
36 CHANGES = f.read()
37 except IOError:
38 README = CHANGES = ''
39
40 install_requires=[
41 'setuptools',
42 'WebOb >= 1.3.1', # request.domain and CookieProfile
43 'repoze.lru >= 0.4', # py3 compat
44 'zope.interface >= 3.8.0', # has zope.interface.registry
45 'zope.deprecation >= 3.5.0', # py3 compat
46 'venusian >= 1.0a3', # ``ignore``
47 'translationstring >= 0.4', # py3 compat
48 'PasteDeploy >= 1.5.0', # py3 compat
49 ]
50
51 tests_require = [
52 'WebTest >= 1.3.1', # py3 compat
53 ]
54
55 if not PY3:
56 tests_require.append('zope.component>=3.11.0')
57
58 docs_extras = [
59 'Sphinx >= 1.3.1',
60 'docutils',
61 'repoze.sphinx.autointerface',
62 'pylons_sphinx_latesturl',
63 'pylons-sphinx-themes',
64 'sphinxcontrib-programoutput',
65 ]
66
67 testing_extras = tests_require + [
68 'nose',
69 'coverage',
70 'virtualenv', # for scaffolding tests
71 ]
72
73 setup(name='pyramid',
74 version='1.7.dev0',
75 description='The Pyramid Web Framework, a Pylons project',
76 long_description=README + '\n\n' + CHANGES,
77 classifiers=[
78 "Development Status :: 6 - Mature",
79 "Intended Audience :: Developers",
80 "Programming Language :: Python",
81 "Programming Language :: Python :: 2.6",
82 "Programming Language :: Python :: 2.7",
83 "Programming Language :: Python :: 3",
84 "Programming Language :: Python :: 3.2",
85 "Programming Language :: Python :: 3.3",
86 "Programming Language :: Python :: 3.4",
87 "Programming Language :: Python :: 3.5",
88 "Programming Language :: Python :: Implementation :: CPython",
89 "Programming Language :: Python :: Implementation :: PyPy",
90 "Framework :: Pyramid",
91 "Topic :: Internet :: WWW/HTTP",
92 "Topic :: Internet :: WWW/HTTP :: WSGI",
93 "License :: Repoze Public License",
94 ],
95 keywords='web wsgi pylons pyramid',
96 author="Chris McDonough, Agendaless Consulting",
97 author_email="[email protected]",
98 url="http://docs.pylonsproject.org/en/latest/docs/pyramid.html",
99 license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
100 packages=find_packages(),
101 include_package_data=True,
102 zip_safe=False,
103 install_requires = install_requires,
104 extras_require = {
105 'testing':testing_extras,
106 'docs':docs_extras,
107 },
108 tests_require = tests_require,
109 test_suite="pyramid.tests",
110 entry_points = """\
111 [pyramid.scaffold]
112 starter=pyramid.scaffolds:StarterProjectTemplate
113 zodb=pyramid.scaffolds:ZODBProjectTemplate
114 alchemy=pyramid.scaffolds:AlchemyProjectTemplate
115 [pyramid.pshell_runner]
116 python=pyramid.scripts.pshell:python_shell_runner
117 [console_scripts]
118 pcreate = pyramid.scripts.pcreate:main
119 pserve = pyramid.scripts.pserve:main
120 pshell = pyramid.scripts.pshell:main
121 proutes = pyramid.scripts.proutes:main
122 pviews = pyramid.scripts.pviews:main
123 ptweens = pyramid.scripts.ptweens:main
124 prequest = pyramid.scripts.prequest:main
125 pdistreport = pyramid.scripts.pdistreport:main
126 [paste.server_runner]
127 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner
128 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner
129 """
130 )
131
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,7 @@
tests_require.append('zope.component>=3.11.0')
docs_extras = [
- 'Sphinx >= 1.3.1',
+ 'Sphinx >= 1.3.4',
'docutils',
'repoze.sphinx.autointerface',
'pylons_sphinx_latesturl',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -56,7 +56,7 @@\n tests_require.append('zope.component>=3.11.0')\n \n docs_extras = [\n- 'Sphinx >= 1.3.1',\n+ 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n", "issue": "Update to Sphinx 1.3.4 when released\nThere is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.\n\nWhen 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 2):\n raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.1',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.7.dev0',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}], "after_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 2):\n raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.7.dev0',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}]} | 1,823 | 106 |
gh_patches_debug_25116 | rasdani/github-patches | git_diff | lutris__lutris-2682 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failure to read Steam's config.vdf due to wrong case
Lutris can't read Steam's config.vdf file because the "Steam" value is actually lowercase when Lutris expects it to be uppercase.

Same as #1966.
Failure to read Steam's config.vdf due to wrong case
Lutris can't read Steam's config.vdf file because the "Steam" value is actually lowercase when Lutris expects it to be uppercase.

Same as #1966.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/util/steam/config.py`
Content:
```
1 """Handle Steam configuration"""
2 import os
3 from collections import OrderedDict, defaultdict
4
5 from lutris.util import system
6 from lutris.util.log import logger
7 from lutris.util.steam.vdf import vdf_parse
8
9
10 def get_default_acf(appid, name):
11 """Return a default configuration usable to
12 create a runnable game in Steam"""
13
14 userconfig = OrderedDict()
15 userconfig["name"] = name
16 userconfig["gameid"] = appid
17
18 appstate = OrderedDict()
19 appstate["appID"] = appid
20 appstate["Universe"] = "1"
21 appstate["StateFlags"] = "1026"
22 appstate["installdir"] = name
23 appstate["UserConfig"] = userconfig
24 return {"AppState": appstate}
25
26
27 def read_config(steam_data_dir):
28 """Read the Steam configuration and return it as an object"""
29 config_filename = os.path.join(steam_data_dir, "config/config.vdf")
30 if not system.path_exists(config_filename):
31 return None
32 with open(config_filename, "r") as steam_config_file:
33 config = vdf_parse(steam_config_file, {})
34 try:
35 return config["InstallConfigStore"]["Software"]["Valve"]["Steam"]
36 except KeyError:
37 try:
38 return config["InstallConfigStore"]["Software"]["valve"]["Steam"]
39 except KeyError as ex:
40 logger.error("Steam config %s is empty: %s", config_filename, ex)
41
42
43 def get_steamapps_paths_for_platform(platform_name):
44 """
45 """
46 from lutris.runners import winesteam, steam
47
48 runners = {"linux": steam.steam, "windows": winesteam.winesteam}
49 runner = runners[platform_name]()
50 return runner.get_steamapps_dirs()
51
52
53 def get_steamapps_paths(flat=False, platform=None):
54 base_platforms = ["linux", "windows"]
55 if flat:
56 steamapps_paths = []
57 else:
58 steamapps_paths = defaultdict(list)
59
60 if platform:
61 if platform not in base_platforms:
62 raise ValueError("Illegal value for Steam platform: %s" % platform)
63 platforms = [platform]
64 else:
65 platforms = base_platforms
66
67 for _platform in platforms:
68 folders = get_steamapps_paths_for_platform(_platform)
69 if flat:
70 steamapps_paths += folders
71 else:
72 steamapps_paths[_platform] = folders
73
74 return steamapps_paths
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/util/steam/config.py b/lutris/util/steam/config.py
--- a/lutris/util/steam/config.py
+++ b/lutris/util/steam/config.py
@@ -26,18 +26,25 @@
def read_config(steam_data_dir):
"""Read the Steam configuration and return it as an object"""
+
+ def get_entry_case_insensitive(config_dict, path):
+ for key, value in config_dict.items():
+ if key.lower() == path[0].lower():
+ if len(path) <= 1:
+ return config_dict[key]
+
+ return get_entry_case_insensitive(config_dict[key], path[1:])
+ raise KeyError(path[0])
+
config_filename = os.path.join(steam_data_dir, "config/config.vdf")
if not system.path_exists(config_filename):
return None
with open(config_filename, "r") as steam_config_file:
config = vdf_parse(steam_config_file, {})
try:
- return config["InstallConfigStore"]["Software"]["Valve"]["Steam"]
- except KeyError:
- try:
- return config["InstallConfigStore"]["Software"]["valve"]["Steam"]
- except KeyError as ex:
- logger.error("Steam config %s is empty: %s", config_filename, ex)
+ return get_entry_case_insensitive(config, ["InstallConfigStore", "Software", "Valve", "Steam"])
+ except KeyError as ex:
+ logger.error("Steam config %s is empty: %s", config_filename, ex)
def get_steamapps_paths_for_platform(platform_name):
| {"golden_diff": "diff --git a/lutris/util/steam/config.py b/lutris/util/steam/config.py\n--- a/lutris/util/steam/config.py\n+++ b/lutris/util/steam/config.py\n@@ -26,18 +26,25 @@\n \n def read_config(steam_data_dir):\n \"\"\"Read the Steam configuration and return it as an object\"\"\"\n+\n+ def get_entry_case_insensitive(config_dict, path):\n+ for key, value in config_dict.items():\n+ if key.lower() == path[0].lower():\n+ if len(path) <= 1:\n+ return config_dict[key]\n+\n+ return get_entry_case_insensitive(config_dict[key], path[1:])\n+ raise KeyError(path[0])\n+\n config_filename = os.path.join(steam_data_dir, \"config/config.vdf\")\n if not system.path_exists(config_filename):\n return None\n with open(config_filename, \"r\") as steam_config_file:\n config = vdf_parse(steam_config_file, {})\n try:\n- return config[\"InstallConfigStore\"][\"Software\"][\"Valve\"][\"Steam\"]\n- except KeyError:\n- try:\n- return config[\"InstallConfigStore\"][\"Software\"][\"valve\"][\"Steam\"]\n- except KeyError as ex:\n- logger.error(\"Steam config %s is empty: %s\", config_filename, ex)\n+ return get_entry_case_insensitive(config, [\"InstallConfigStore\", \"Software\", \"Valve\", \"Steam\"])\n+ except KeyError as ex:\n+ logger.error(\"Steam config %s is empty: %s\", config_filename, ex)\n \n \n def get_steamapps_paths_for_platform(platform_name):\n", "issue": "Failure to read Steam's config.vdf due to wrong case\nLutris can't read Steam's config.vdf file because the \"Steam\" value is actually lowercase when Lutris expects it to be uppercase.\r\n\r\n\r\n\r\nSame as #1966.\nFailure to read Steam's config.vdf due to wrong case\nLutris can't read Steam's config.vdf file because the \"Steam\" value is actually lowercase when Lutris expects it to be uppercase.\r\n\r\n\r\n\r\nSame as #1966.\n", "before_files": [{"content": "\"\"\"Handle Steam configuration\"\"\"\nimport os\nfrom collections import OrderedDict, defaultdict\n\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris.util.steam.vdf import vdf_parse\n\n\ndef get_default_acf(appid, name):\n \"\"\"Return a default configuration usable to\n create a runnable game in Steam\"\"\"\n\n userconfig = OrderedDict()\n userconfig[\"name\"] = name\n userconfig[\"gameid\"] = appid\n\n appstate = OrderedDict()\n appstate[\"appID\"] = appid\n appstate[\"Universe\"] = \"1\"\n appstate[\"StateFlags\"] = \"1026\"\n appstate[\"installdir\"] = name\n appstate[\"UserConfig\"] = userconfig\n return {\"AppState\": appstate}\n\n\ndef read_config(steam_data_dir):\n \"\"\"Read the Steam configuration and return it as an object\"\"\"\n config_filename = os.path.join(steam_data_dir, \"config/config.vdf\")\n if not system.path_exists(config_filename):\n return None\n with open(config_filename, \"r\") as steam_config_file:\n config = vdf_parse(steam_config_file, {})\n try:\n return config[\"InstallConfigStore\"][\"Software\"][\"Valve\"][\"Steam\"]\n except KeyError:\n try:\n return config[\"InstallConfigStore\"][\"Software\"][\"valve\"][\"Steam\"]\n except KeyError as ex:\n logger.error(\"Steam config %s is empty: %s\", config_filename, ex)\n\n\ndef get_steamapps_paths_for_platform(platform_name):\n \"\"\"\n \"\"\"\n from lutris.runners import winesteam, steam\n\n runners = {\"linux\": steam.steam, \"windows\": winesteam.winesteam}\n runner = runners[platform_name]()\n return runner.get_steamapps_dirs()\n\n\ndef get_steamapps_paths(flat=False, platform=None):\n base_platforms = [\"linux\", \"windows\"]\n if flat:\n steamapps_paths = []\n else:\n steamapps_paths = defaultdict(list)\n\n if platform:\n if platform not in base_platforms:\n raise ValueError(\"Illegal value for Steam platform: %s\" % platform)\n platforms = [platform]\n else:\n platforms = base_platforms\n\n for _platform in platforms:\n folders = get_steamapps_paths_for_platform(_platform)\n if flat:\n steamapps_paths += folders\n else:\n steamapps_paths[_platform] = folders\n\n return steamapps_paths\n", "path": "lutris/util/steam/config.py"}], "after_files": [{"content": "\"\"\"Handle Steam configuration\"\"\"\nimport os\nfrom collections import OrderedDict, defaultdict\n\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris.util.steam.vdf import vdf_parse\n\n\ndef get_default_acf(appid, name):\n \"\"\"Return a default configuration usable to\n create a runnable game in Steam\"\"\"\n\n userconfig = OrderedDict()\n userconfig[\"name\"] = name\n userconfig[\"gameid\"] = appid\n\n appstate = OrderedDict()\n appstate[\"appID\"] = appid\n appstate[\"Universe\"] = \"1\"\n appstate[\"StateFlags\"] = \"1026\"\n appstate[\"installdir\"] = name\n appstate[\"UserConfig\"] = userconfig\n return {\"AppState\": appstate}\n\n\ndef read_config(steam_data_dir):\n \"\"\"Read the Steam configuration and return it as an object\"\"\"\n\n def get_entry_case_insensitive(config_dict, path):\n for key, value in config_dict.items():\n if key.lower() == path[0].lower():\n if len(path) <= 1:\n return config_dict[key]\n\n return get_entry_case_insensitive(config_dict[key], path[1:])\n raise KeyError(path[0])\n\n config_filename = os.path.join(steam_data_dir, \"config/config.vdf\")\n if not system.path_exists(config_filename):\n return None\n with open(config_filename, \"r\") as steam_config_file:\n config = vdf_parse(steam_config_file, {})\n try:\n return get_entry_case_insensitive(config, [\"InstallConfigStore\", \"Software\", \"Valve\", \"Steam\"])\n except KeyError as ex:\n logger.error(\"Steam config %s is empty: %s\", config_filename, ex)\n\n\ndef get_steamapps_paths_for_platform(platform_name):\n \"\"\"\n \"\"\"\n from lutris.runners import winesteam, steam\n\n runners = {\"linux\": steam.steam, \"windows\": winesteam.winesteam}\n runner = runners[platform_name]()\n return runner.get_steamapps_dirs()\n\n\ndef get_steamapps_paths(flat=False, platform=None):\n base_platforms = [\"linux\", \"windows\"]\n if flat:\n steamapps_paths = []\n else:\n steamapps_paths = defaultdict(list)\n\n if platform:\n if platform not in base_platforms:\n raise ValueError(\"Illegal value for Steam platform: %s\" % platform)\n platforms = [platform]\n else:\n platforms = base_platforms\n\n for _platform in platforms:\n folders = get_steamapps_paths_for_platform(_platform)\n if flat:\n steamapps_paths += folders\n else:\n steamapps_paths[_platform] = folders\n\n return steamapps_paths\n", "path": "lutris/util/steam/config.py"}]} | 1,171 | 353 |
gh_patches_debug_29875 | rasdani/github-patches | git_diff | streamlink__streamlink-1727 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Request: Add more functions to facebook plugin
### Checklist
- [x] This is a bug report.
- [ ] This is a feature request.
- [x] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
### Description
Reminder that with the new initial support of Mpeg Dash #880 and #990 might be fixable now, depending on what streamlink supports and how Facebook's videos and livestreaming has changed since this was last looked it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/facebook.py`
Content:
```
1 import re
2
3 from streamlink.plugin import Plugin
4 from streamlink.stream import HLSStream
5
6 _playlist_url = "https://www.facebook.com/video/playback/playlist.m3u8?v={0}"
7
8 _url_re = re.compile(r"http(s)?://(www\.)?facebook\.com/[^/]+/videos/(?P<video_id>\d+)")
9
10
11 class Facebook(Plugin):
12 @classmethod
13 def can_handle_url(cls, url):
14 return _url_re.match(url)
15
16 @Plugin.broken(990)
17 def _get_streams(self):
18 match = _url_re.match(self.url)
19 video = match.group("video_id")
20
21 playlist = _playlist_url.format(video)
22
23 return HLSStream.parse_variant_playlist(self.session, playlist)
24
25
26 __plugin__ = Facebook
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/facebook.py b/src/streamlink/plugins/facebook.py
--- a/src/streamlink/plugins/facebook.py
+++ b/src/streamlink/plugins/facebook.py
@@ -1,26 +1,42 @@
import re
from streamlink.plugin import Plugin
-from streamlink.stream import HLSStream
-
-_playlist_url = "https://www.facebook.com/video/playback/playlist.m3u8?v={0}"
-
-_url_re = re.compile(r"http(s)?://(www\.)?facebook\.com/[^/]+/videos/(?P<video_id>\d+)")
+from streamlink.plugin.api import http, useragents
+from streamlink.stream import DASHStream, HTTPStream
+from streamlink.utils import parse_json
class Facebook(Plugin):
+ _url_re = re.compile(r"https?://(?:www\.)?facebook\.com/[^/]+/videos")
+ _mpd_re = re.compile(r'''(sd|hd)_src["']?\s*:\s*(?P<quote>["'])(?P<url>.+?)(?P=quote)''')
+ _playlist_re = re.compile(r'''video:\[({url:".+?}\])''')
+ _plurl_re = re.compile(r'''url:"(.*?)"''')
+
@classmethod
def can_handle_url(cls, url):
- return _url_re.match(url)
+ return cls._url_re.match(url)
- @Plugin.broken(990)
def _get_streams(self):
- match = _url_re.match(self.url)
- video = match.group("video_id")
+ res = http.get(self.url, headers={"User-Agent": useragents.CHROME})
+ with open("temp.html", "w") as f:
+ f.write(res.text)
+
+ for match in self._mpd_re.finditer(res.text):
+ manifest_url = match.group("url")
+ if "\\/" in manifest_url:
+ # if the URL is json encoded, decode it
+ manifest_url = parse_json("\"{}\"".format(manifest_url))
+ for s in DASHStream.parse_manifest(self.session, manifest_url).items():
+ yield s
+ else:
+ match = self._playlist_re.search(res.text)
+ playlist = match and match.group(1)
+ if playlist:
+ for url in {url.group(1) for url in self._plurl_re.finditer(playlist)}:
+ yield "live", HTTPStream(self.session, url)
+
- playlist = _playlist_url.format(video)
- return HLSStream.parse_variant_playlist(self.session, playlist)
__plugin__ = Facebook
| {"golden_diff": "diff --git a/src/streamlink/plugins/facebook.py b/src/streamlink/plugins/facebook.py\n--- a/src/streamlink/plugins/facebook.py\n+++ b/src/streamlink/plugins/facebook.py\n@@ -1,26 +1,42 @@\n import re\n \n from streamlink.plugin import Plugin\n-from streamlink.stream import HLSStream\n-\n-_playlist_url = \"https://www.facebook.com/video/playback/playlist.m3u8?v={0}\"\n-\n-_url_re = re.compile(r\"http(s)?://(www\\.)?facebook\\.com/[^/]+/videos/(?P<video_id>\\d+)\")\n+from streamlink.plugin.api import http, useragents\n+from streamlink.stream import DASHStream, HTTPStream\n+from streamlink.utils import parse_json\n \n \n class Facebook(Plugin):\n+ _url_re = re.compile(r\"https?://(?:www\\.)?facebook\\.com/[^/]+/videos\")\n+ _mpd_re = re.compile(r'''(sd|hd)_src[\"']?\\s*:\\s*(?P<quote>[\"'])(?P<url>.+?)(?P=quote)''')\n+ _playlist_re = re.compile(r'''video:\\[({url:\".+?}\\])''')\n+ _plurl_re = re.compile(r'''url:\"(.*?)\"''')\n+\n @classmethod\n def can_handle_url(cls, url):\n- return _url_re.match(url)\n+ return cls._url_re.match(url)\n \n- @Plugin.broken(990)\n def _get_streams(self):\n- match = _url_re.match(self.url)\n- video = match.group(\"video_id\")\n+ res = http.get(self.url, headers={\"User-Agent\": useragents.CHROME})\n+ with open(\"temp.html\", \"w\") as f:\n+ f.write(res.text)\n+\n+ for match in self._mpd_re.finditer(res.text):\n+ manifest_url = match.group(\"url\")\n+ if \"\\\\/\" in manifest_url:\n+ # if the URL is json encoded, decode it\n+ manifest_url = parse_json(\"\\\"{}\\\"\".format(manifest_url))\n+ for s in DASHStream.parse_manifest(self.session, manifest_url).items():\n+ yield s\n+ else:\n+ match = self._playlist_re.search(res.text)\n+ playlist = match and match.group(1)\n+ if playlist:\n+ for url in {url.group(1) for url in self._plurl_re.finditer(playlist)}:\n+ yield \"live\", HTTPStream(self.session, url)\n+\n \n- playlist = _playlist_url.format(video)\n \n- return HLSStream.parse_variant_playlist(self.session, playlist)\n \n \n __plugin__ = Facebook\n", "issue": "Request: Add more functions to facebook plugin\n### Checklist\r\n\r\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [x] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n### Description\r\nReminder that with the new initial support of Mpeg Dash #880 and #990 might be fixable now, depending on what streamlink supports and how Facebook's videos and livestreaming has changed since this was last looked it.\r\n\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.stream import HLSStream\n\n_playlist_url = \"https://www.facebook.com/video/playback/playlist.m3u8?v={0}\"\n\n_url_re = re.compile(r\"http(s)?://(www\\.)?facebook\\.com/[^/]+/videos/(?P<video_id>\\d+)\")\n\n\nclass Facebook(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n @Plugin.broken(990)\n def _get_streams(self):\n match = _url_re.match(self.url)\n video = match.group(\"video_id\")\n\n playlist = _playlist_url.format(video)\n\n return HLSStream.parse_variant_playlist(self.session, playlist)\n\n\n__plugin__ = Facebook\n", "path": "src/streamlink/plugins/facebook.py"}], "after_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, useragents\nfrom streamlink.stream import DASHStream, HTTPStream\nfrom streamlink.utils import parse_json\n\n\nclass Facebook(Plugin):\n _url_re = re.compile(r\"https?://(?:www\\.)?facebook\\.com/[^/]+/videos\")\n _mpd_re = re.compile(r'''(sd|hd)_src[\"']?\\s*:\\s*(?P<quote>[\"'])(?P<url>.+?)(?P=quote)''')\n _playlist_re = re.compile(r'''video:\\[({url:\".+?}\\])''')\n _plurl_re = re.compile(r'''url:\"(.*?)\"''')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url)\n\n def _get_streams(self):\n res = http.get(self.url, headers={\"User-Agent\": useragents.CHROME})\n with open(\"temp.html\", \"w\") as f:\n f.write(res.text)\n\n for match in self._mpd_re.finditer(res.text):\n manifest_url = match.group(\"url\")\n if \"\\\\/\" in manifest_url:\n # if the URL is json encoded, decode it\n manifest_url = parse_json(\"\\\"{}\\\"\".format(manifest_url))\n for s in DASHStream.parse_manifest(self.session, manifest_url).items():\n yield s\n else:\n match = self._playlist_re.search(res.text)\n playlist = match and match.group(1)\n if playlist:\n for url in {url.group(1) for url in self._plurl_re.finditer(playlist)}:\n yield \"live\", HTTPStream(self.session, url)\n\n\n\n\n\n__plugin__ = Facebook\n", "path": "src/streamlink/plugins/facebook.py"}]} | 590 | 584 |
gh_patches_debug_126 | rasdani/github-patches | git_diff | holoviz__panel-3990 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clearing value of a DatetimePicker
#### Description of expected behavior and the observed behavior
Not sure if this is a bug or a new feature to Panel. Let's say I have a layout consisting of a button named "Edit", a DatetimePicker disabled with no default value, and a button named "Submit". At the time of initialization, the value of DatetimePicker is Null. The way these objects interact is as follows:
- Click "Edit" button, DatetimePicker is enabled so user can select a specific time value.
- Click "Submit" button, the selected time value will be pushed to the DB, and the DatetimePicker will be disabled and reset back to Null.
I have tried several ways with no success in clearing the value of the DatetimePicker.
#### Complete, minimal, self-contained example code that reproduces the issue
```
time_widget = pn.widgets.DatetimePicker(disabled=True)
time_widget.value = now()
# how to set value back to None?
time_widget.value = None/pandas.NaT/np.nan => all causes error
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/models/datetime_picker.py`
Content:
```
1 from bokeh.core.enums import CalendarPosition
2 from bokeh.core.properties import (
3 Bool, Date, Datetime, Either, Enum, List, Nullable, String, Tuple,
4 )
5 from bokeh.models.widgets.inputs import InputWidget
6
7
8 class DatetimePicker(InputWidget):
9 ''' Calendar-based date picker widget.
10
11 '''
12
13 value = String(help="""
14 The initial or picked date.
15 """)
16
17 min_date = Nullable(Either(Date, Datetime), help="""
18 Optional earliest allowable date.
19 """)
20
21 max_date = Nullable(Either(Date, Datetime), help="""
22 Optional latest allowable date.
23 """)
24
25 disabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help="""
26 A list of dates of ``(start, end)`` date ranges to make unavailable for
27 selection. All other dates will be avalable.
28
29 .. note::
30 Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.
31 """)
32
33 enabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help="""
34 A list of dates of ``(start, end)`` date ranges to make available for
35 selection. All other dates will be unavailable.
36
37 .. note::
38 Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.
39 """)
40
41 position = Enum(CalendarPosition, default="auto", help="""
42 Where the calendar is rendered relative to the input when ``inline`` is False.
43 """)
44
45 inline = Bool(default=False, help="""
46 Whether the calendar sholud be displayed inline.
47 """)
48
49 enable_time = Bool(default=True)
50
51 enable_seconds = Bool(default=True)
52
53 military_time = Bool(default=True)
54
55 date_format = String("Y-m-d H:i:S")
56
57 mode = String(default="single", help="""
58 Should either be "single" or "range".""")
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/models/datetime_picker.py b/panel/models/datetime_picker.py
--- a/panel/models/datetime_picker.py
+++ b/panel/models/datetime_picker.py
@@ -10,7 +10,7 @@
'''
- value = String(help="""
+ value = Nullable(String, help="""
The initial or picked date.
""")
| {"golden_diff": "diff --git a/panel/models/datetime_picker.py b/panel/models/datetime_picker.py\n--- a/panel/models/datetime_picker.py\n+++ b/panel/models/datetime_picker.py\n@@ -10,7 +10,7 @@\n \n '''\n \n- value = String(help=\"\"\"\n+ value = Nullable(String, help=\"\"\"\n The initial or picked date.\n \"\"\")\n", "issue": "Clearing value of a DatetimePicker\n#### Description of expected behavior and the observed behavior\r\nNot sure if this is a bug or a new feature to Panel. Let's say I have a layout consisting of a button named \"Edit\", a DatetimePicker disabled with no default value, and a button named \"Submit\". At the time of initialization, the value of DatetimePicker is Null. The way these objects interact is as follows:\r\n- Click \"Edit\" button, DatetimePicker is enabled so user can select a specific time value.\r\n- Click \"Submit\" button, the selected time value will be pushed to the DB, and the DatetimePicker will be disabled and reset back to Null.\r\n\r\nI have tried several ways with no success in clearing the value of the DatetimePicker.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\n```\r\ntime_widget = pn.widgets.DatetimePicker(disabled=True)\r\ntime_widget.value = now()\r\n\r\n# how to set value back to None?\r\ntime_widget.value = None/pandas.NaT/np.nan => all causes error\r\n```\r\n\n", "before_files": [{"content": "from bokeh.core.enums import CalendarPosition\nfrom bokeh.core.properties import (\n Bool, Date, Datetime, Either, Enum, List, Nullable, String, Tuple,\n)\nfrom bokeh.models.widgets.inputs import InputWidget\n\n\nclass DatetimePicker(InputWidget):\n ''' Calendar-based date picker widget.\n\n '''\n\n value = String(help=\"\"\"\n The initial or picked date.\n \"\"\")\n\n min_date = Nullable(Either(Date, Datetime), help=\"\"\"\n Optional earliest allowable date.\n \"\"\")\n\n max_date = Nullable(Either(Date, Datetime), help=\"\"\"\n Optional latest allowable date.\n \"\"\")\n\n disabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help=\"\"\"\n A list of dates of ``(start, end)`` date ranges to make unavailable for\n selection. All other dates will be avalable.\n\n .. note::\n Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.\n \"\"\")\n\n enabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help=\"\"\"\n A list of dates of ``(start, end)`` date ranges to make available for\n selection. All other dates will be unavailable.\n\n .. note::\n Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.\n \"\"\")\n\n position = Enum(CalendarPosition, default=\"auto\", help=\"\"\"\n Where the calendar is rendered relative to the input when ``inline`` is False.\n \"\"\")\n\n inline = Bool(default=False, help=\"\"\"\n Whether the calendar sholud be displayed inline.\n \"\"\")\n\n enable_time = Bool(default=True)\n\n enable_seconds = Bool(default=True)\n\n military_time = Bool(default=True)\n\n date_format = String(\"Y-m-d H:i:S\")\n\n mode = String(default=\"single\", help=\"\"\"\n Should either be \"single\" or \"range\".\"\"\")\n", "path": "panel/models/datetime_picker.py"}], "after_files": [{"content": "from bokeh.core.enums import CalendarPosition\nfrom bokeh.core.properties import (\n Bool, Date, Datetime, Either, Enum, List, Nullable, String, Tuple,\n)\nfrom bokeh.models.widgets.inputs import InputWidget\n\n\nclass DatetimePicker(InputWidget):\n ''' Calendar-based date picker widget.\n\n '''\n\n value = Nullable(String, help=\"\"\"\n The initial or picked date.\n \"\"\")\n\n min_date = Nullable(Either(Date, Datetime), help=\"\"\"\n Optional earliest allowable date.\n \"\"\")\n\n max_date = Nullable(Either(Date, Datetime), help=\"\"\"\n Optional latest allowable date.\n \"\"\")\n\n disabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help=\"\"\"\n A list of dates of ``(start, end)`` date ranges to make unavailable for\n selection. All other dates will be avalable.\n\n .. note::\n Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.\n \"\"\")\n\n enabled_dates = List(Either(Date, Datetime, Tuple(Date, Date), Tuple(Datetime, Datetime)), default=[], help=\"\"\"\n A list of dates of ``(start, end)`` date ranges to make available for\n selection. All other dates will be unavailable.\n\n .. note::\n Only one of ``disabled_dates`` and ``enabled_dates`` should be specified.\n \"\"\")\n\n position = Enum(CalendarPosition, default=\"auto\", help=\"\"\"\n Where the calendar is rendered relative to the input when ``inline`` is False.\n \"\"\")\n\n inline = Bool(default=False, help=\"\"\"\n Whether the calendar sholud be displayed inline.\n \"\"\")\n\n enable_time = Bool(default=True)\n\n enable_seconds = Bool(default=True)\n\n military_time = Bool(default=True)\n\n date_format = String(\"Y-m-d H:i:S\")\n\n mode = String(default=\"single\", help=\"\"\"\n Should either be \"single\" or \"range\".\"\"\")\n", "path": "panel/models/datetime_picker.py"}]} | 1,024 | 84 |
gh_patches_debug_8704 | rasdani/github-patches | git_diff | sublimelsp__LSP-1557 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[regression] lsp_execute does nothing due to empty session
Since this [commit](https://github.com/sublimelsp/LSP/commit/7d05794fa3cc4ecd3931d09a90e801addc70d9fa) the `capability` variable got deleted which means that `self.best_session(self.capability)` is unable to find session.
The consequence is that [LSP-metals.sublime-commands](https://github.com/scalameta/metals-sublime/blob/master/LSP-metals.sublime-commands) aren't executed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/execute_command.py`
Content:
```
1 import sublime
2 from .core.protocol import Error
3 from .core.protocol import ExecuteCommandParams
4 from .core.registry import LspTextCommand
5 from .core.registry import windows
6 from .core.typing import List, Optional, Any
7 from .core.views import uri_from_view, offset_to_point, region_to_range, text_document_identifier
8
9
10 class LspExecuteCommand(LspTextCommand):
11
12 def run(self,
13 edit: sublime.Edit,
14 command_name: Optional[str] = None,
15 command_args: Optional[List[Any]] = None,
16 session_name: Optional[str] = None,
17 event: Optional[dict] = None) -> None:
18 # Handle VSCode-specific command for triggering AC/sighelp
19 if command_name == "editor.action.triggerSuggest":
20 # Triggered from set_timeout as suggestions popup doesn't trigger otherwise.
21 return sublime.set_timeout(lambda: self.view.run_command("auto_complete"))
22 if command_name == "editor.action.triggerParameterHints":
23
24 def run_async() -> None:
25 listener = windows.listener_for_view(self.view)
26 if listener:
27 listener.do_signature_help_async(manual=False)
28
29 return sublime.set_timeout_async(run_async)
30 session = self.session_by_name(session_name) if session_name else self.best_session(self.capability)
31 if session and command_name:
32 if command_args:
33 self._expand_variables(command_args)
34 params = {"command": command_name} # type: ExecuteCommandParams
35 if command_args:
36 params["arguments"] = command_args
37
38 def handle_response(response: Any) -> None:
39 assert command_name
40 if isinstance(response, Error):
41 sublime.message_dialog("command {} failed. Reason: {}".format(command_name, str(response)))
42 return
43 msg = "command {} completed".format(command_name)
44 if response:
45 msg += "with response: {}".format(response)
46 window = self.view.window()
47 if window:
48 window.status_message(msg)
49
50 session.execute_command(params, progress=True).then(handle_response)
51
52 def _expand_variables(self, command_args: List[Any]) -> None:
53 region = self.view.sel()[0]
54 for i, arg in enumerate(command_args):
55 if arg in ["$document_id", "${document_id}"]:
56 command_args[i] = text_document_identifier(self.view)
57 if arg in ["$file_uri", "${file_uri}"]:
58 command_args[i] = uri_from_view(self.view)
59 elif arg in ["$selection", "${selection}"]:
60 command_args[i] = self.view.substr(region)
61 elif arg in ["$offset", "${offset}"]:
62 command_args[i] = region.b
63 elif arg in ["$selection_begin", "${selection_begin}"]:
64 command_args[i] = region.begin()
65 elif arg in ["$selection_end", "${selection_end}"]:
66 command_args[i] = region.end()
67 elif arg in ["$position", "${position}"]:
68 command_args[i] = offset_to_point(self.view, region.b).to_lsp()
69 elif arg in ["$range", "${range}"]:
70 command_args[i] = region_to_range(self.view, region).to_lsp()
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/execute_command.py b/plugin/execute_command.py
--- a/plugin/execute_command.py
+++ b/plugin/execute_command.py
@@ -27,7 +27,7 @@
listener.do_signature_help_async(manual=False)
return sublime.set_timeout_async(run_async)
- session = self.session_by_name(session_name) if session_name else self.best_session(self.capability)
+ session = self.session_by_name(session_name if session_name else self.session_name)
if session and command_name:
if command_args:
self._expand_variables(command_args)
| {"golden_diff": "diff --git a/plugin/execute_command.py b/plugin/execute_command.py\n--- a/plugin/execute_command.py\n+++ b/plugin/execute_command.py\n@@ -27,7 +27,7 @@\n listener.do_signature_help_async(manual=False)\n \n return sublime.set_timeout_async(run_async)\n- session = self.session_by_name(session_name) if session_name else self.best_session(self.capability)\n+ session = self.session_by_name(session_name if session_name else self.session_name)\n if session and command_name:\n if command_args:\n self._expand_variables(command_args)\n", "issue": "[regression] lsp_execute does nothing due to empty session\nSince this [commit](https://github.com/sublimelsp/LSP/commit/7d05794fa3cc4ecd3931d09a90e801addc70d9fa) the `capability` variable got deleted which means that `self.best_session(self.capability)` is unable to find session.\r\n\r\nThe consequence is that [LSP-metals.sublime-commands](https://github.com/scalameta/metals-sublime/blob/master/LSP-metals.sublime-commands) aren't executed.\r\n\r\n \r\n\n", "before_files": [{"content": "import sublime\nfrom .core.protocol import Error\nfrom .core.protocol import ExecuteCommandParams\nfrom .core.registry import LspTextCommand\nfrom .core.registry import windows\nfrom .core.typing import List, Optional, Any\nfrom .core.views import uri_from_view, offset_to_point, region_to_range, text_document_identifier\n\n\nclass LspExecuteCommand(LspTextCommand):\n\n def run(self,\n edit: sublime.Edit,\n command_name: Optional[str] = None,\n command_args: Optional[List[Any]] = None,\n session_name: Optional[str] = None,\n event: Optional[dict] = None) -> None:\n # Handle VSCode-specific command for triggering AC/sighelp\n if command_name == \"editor.action.triggerSuggest\":\n # Triggered from set_timeout as suggestions popup doesn't trigger otherwise.\n return sublime.set_timeout(lambda: self.view.run_command(\"auto_complete\"))\n if command_name == \"editor.action.triggerParameterHints\":\n\n def run_async() -> None:\n listener = windows.listener_for_view(self.view)\n if listener:\n listener.do_signature_help_async(manual=False)\n\n return sublime.set_timeout_async(run_async)\n session = self.session_by_name(session_name) if session_name else self.best_session(self.capability)\n if session and command_name:\n if command_args:\n self._expand_variables(command_args)\n params = {\"command\": command_name} # type: ExecuteCommandParams\n if command_args:\n params[\"arguments\"] = command_args\n\n def handle_response(response: Any) -> None:\n assert command_name\n if isinstance(response, Error):\n sublime.message_dialog(\"command {} failed. Reason: {}\".format(command_name, str(response)))\n return\n msg = \"command {} completed\".format(command_name)\n if response:\n msg += \"with response: {}\".format(response)\n window = self.view.window()\n if window:\n window.status_message(msg)\n\n session.execute_command(params, progress=True).then(handle_response)\n\n def _expand_variables(self, command_args: List[Any]) -> None:\n region = self.view.sel()[0]\n for i, arg in enumerate(command_args):\n if arg in [\"$document_id\", \"${document_id}\"]:\n command_args[i] = text_document_identifier(self.view)\n if arg in [\"$file_uri\", \"${file_uri}\"]:\n command_args[i] = uri_from_view(self.view)\n elif arg in [\"$selection\", \"${selection}\"]:\n command_args[i] = self.view.substr(region)\n elif arg in [\"$offset\", \"${offset}\"]:\n command_args[i] = region.b\n elif arg in [\"$selection_begin\", \"${selection_begin}\"]:\n command_args[i] = region.begin()\n elif arg in [\"$selection_end\", \"${selection_end}\"]:\n command_args[i] = region.end()\n elif arg in [\"$position\", \"${position}\"]:\n command_args[i] = offset_to_point(self.view, region.b).to_lsp()\n elif arg in [\"$range\", \"${range}\"]:\n command_args[i] = region_to_range(self.view, region).to_lsp()\n", "path": "plugin/execute_command.py"}], "after_files": [{"content": "import sublime\nfrom .core.protocol import Error\nfrom .core.protocol import ExecuteCommandParams\nfrom .core.registry import LspTextCommand\nfrom .core.registry import windows\nfrom .core.typing import List, Optional, Any\nfrom .core.views import uri_from_view, offset_to_point, region_to_range, text_document_identifier\n\n\nclass LspExecuteCommand(LspTextCommand):\n\n def run(self,\n edit: sublime.Edit,\n command_name: Optional[str] = None,\n command_args: Optional[List[Any]] = None,\n session_name: Optional[str] = None,\n event: Optional[dict] = None) -> None:\n # Handle VSCode-specific command for triggering AC/sighelp\n if command_name == \"editor.action.triggerSuggest\":\n # Triggered from set_timeout as suggestions popup doesn't trigger otherwise.\n return sublime.set_timeout(lambda: self.view.run_command(\"auto_complete\"))\n if command_name == \"editor.action.triggerParameterHints\":\n\n def run_async() -> None:\n listener = windows.listener_for_view(self.view)\n if listener:\n listener.do_signature_help_async(manual=False)\n\n return sublime.set_timeout_async(run_async)\n session = self.session_by_name(session_name if session_name else self.session_name)\n if session and command_name:\n if command_args:\n self._expand_variables(command_args)\n params = {\"command\": command_name} # type: ExecuteCommandParams\n if command_args:\n params[\"arguments\"] = command_args\n\n def handle_response(response: Any) -> None:\n assert command_name\n if isinstance(response, Error):\n sublime.message_dialog(\"command {} failed. Reason: {}\".format(command_name, str(response)))\n return\n msg = \"command {} completed\".format(command_name)\n if response:\n msg += \"with response: {}\".format(response)\n window = self.view.window()\n if window:\n window.status_message(msg)\n\n session.execute_command(params, progress=True).then(handle_response)\n\n def _expand_variables(self, command_args: List[Any]) -> None:\n region = self.view.sel()[0]\n for i, arg in enumerate(command_args):\n if arg in [\"$document_id\", \"${document_id}\"]:\n command_args[i] = text_document_identifier(self.view)\n if arg in [\"$file_uri\", \"${file_uri}\"]:\n command_args[i] = uri_from_view(self.view)\n elif arg in [\"$selection\", \"${selection}\"]:\n command_args[i] = self.view.substr(region)\n elif arg in [\"$offset\", \"${offset}\"]:\n command_args[i] = region.b\n elif arg in [\"$selection_begin\", \"${selection_begin}\"]:\n command_args[i] = region.begin()\n elif arg in [\"$selection_end\", \"${selection_end}\"]:\n command_args[i] = region.end()\n elif arg in [\"$position\", \"${position}\"]:\n command_args[i] = offset_to_point(self.view, region.b).to_lsp()\n elif arg in [\"$range\", \"${range}\"]:\n command_args[i] = region_to_range(self.view, region).to_lsp()\n", "path": "plugin/execute_command.py"}]} | 1,189 | 124 |
gh_patches_debug_38287 | rasdani/github-patches | git_diff | microsoft__MLOS-358 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
flake8 and/or prettier setup for devcontainer
Per comments in #354 and #340
We should "just" add flake8 and some type of auto prettier to the devcontainer setup so that those types of things are caught during dev cycle instead of review nits as much as possible.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlos_core/setup.py`
Content:
```
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Setup instructions for the mlos_core package.
7 """
8
9 from itertools import chain
10 from logging import warning
11 from typing import Dict, List
12
13 from setuptools import setup, find_packages
14
15 from _version import _VERSION # pylint: disable=import-private-name
16
17 try:
18 from setuptools_scm import get_version
19 version = get_version(root='..', relative_to=__file__)
20 if version is not None:
21 _VERSION = version
22 except ImportError:
23 warning("setuptools_scm not found, using version from _version.py")
24 except LookupError as e:
25 warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
26
27
28 extra_requires: Dict[str, List[str]] = {
29 'emukit': ['emukit'],
30 'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)
31 }
32
33 # construct special 'full' extra that adds requirements for all built-in
34 # backend integrations and additional extra features.
35 extra_requires['full'] = list(set(chain(*extra_requires.values())))
36
37 extra_requires['full-tests'] = extra_requires['full'] + [
38 'pytest',
39 'pytest-forked',
40 'pytest-xdist',
41 'pytest-cov',
42 'pytest-local-badge',
43 ]
44
45 # pylint: disable=duplicate-code
46 MODULE_BASE_NAME = 'mlos_core'
47 setup(
48 name='mlos-core',
49 version=_VERSION,
50 packages=find_packages(exclude=[f"{MODULE_BASE_NAME}.tests", f"{MODULE_BASE_NAME}.tests.*"]),
51 package_data={
52 '': ['py.typed', '**/*.pyi'],
53 },
54 install_requires=[
55 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)
56 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released
57 'scipy>=1.3.2',
58 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)
59 'pandas>=1.0.3',
60 'ConfigSpace>=0.6.1',
61 ],
62 extras_require=extra_requires,
63 author='Microsoft',
64 author_email='[email protected]',
65 description=('MLOS Core Python interface for parameter optimization.'),
66 license='MIT',
67 keywords='',
68 url='https://aka.ms/mlos-core',
69 python_requires='>=3.8',
70 )
71
```
Path: `mlos_bench/setup.py`
Content:
```
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Setup instructions for the mlos_bench package.
7 """
8
9 from logging import warning
10 from itertools import chain
11 from typing import Dict, List
12
13 from setuptools import setup, find_packages
14
15 from _version import _VERSION # pylint: disable=import-private-name
16
17 try:
18 from setuptools_scm import get_version
19 version = get_version(root='..', relative_to=__file__)
20 if version is not None:
21 _VERSION = version
22 except ImportError:
23 warning("setuptools_scm not found, using version from _version.py")
24 except LookupError as e:
25 warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
26
27
28 extra_requires: Dict[str, List[str]] = {
29 # Additional tools for extra functionality.
30 'azure': ['azure-storage-file-share'],
31 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],
32 'storage-sql-mysql': ['sqlalchemy', 'mysql-connector-python'],
33 'storage-sql-postgres': ['sqlalchemy', 'psycopg2'],
34 'storage-sql-sqlite': ['sqlalchemy'], # sqlite3 comes with python, so we don't need to install it.
35 # Transitive extra_requires from mlos-core.
36 'emukit': ['emukit'],
37 'skopt': ['scikit-optimize'],
38 }
39
40 # construct special 'full' extra that adds requirements for all built-in
41 # backend integrations and additional extra features.
42 extra_requires['full'] = list(set(chain(*extra_requires.values())))
43
44 extra_requires['full-tests'] = extra_requires['full'] + [
45 'pytest',
46 'pytest-forked',
47 'pytest-xdist',
48 'pytest-cov',
49 'pytest-local-badge',
50 ]
51
52 # pylint: disable=duplicate-code
53 MODULE_BASE_NAME = 'mlos_bench'
54 setup(
55 name='mlos-bench',
56 version=_VERSION,
57 packages=find_packages(exclude=[f"{MODULE_BASE_NAME}.tests", f"{MODULE_BASE_NAME}.tests.*"]),
58 package_data={
59 '': ['py.typed', '**/*.pyi'],
60 'mlos_bench': [
61 'config/**/*.md',
62 'config/**/*.jsonc',
63 'config/**/*.json',
64 'config/**/*.py',
65 'config/**/*.sh',
66 'config/**/*.cmd',
67 'config/**/*.ps1',
68 ],
69 },
70 entry_points={
71 'console_scripts': [
72 'mlos_bench = mlos_bench.run:_main',
73 ],
74 },
75 install_requires=[
76 'mlos-core==' + _VERSION,
77 'requests',
78 'json5',
79 'jsonschema',
80 'importlib_resources;python_version<"3.10"',
81 ] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.
82 extras_require=extra_requires,
83 author='Microsoft',
84 author_email='[email protected]',
85 description=('MLOS Bench Python interface for benchmark automation and optimization.'),
86 license='MIT',
87 keywords='',
88 url='https://aka.ms/mlos-core',
89 python_requires='>=3.8',
90 )
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlos_bench/setup.py b/mlos_bench/setup.py
--- a/mlos_bench/setup.py
+++ b/mlos_bench/setup.py
@@ -18,14 +18,14 @@
from setuptools_scm import get_version
version = get_version(root='..', relative_to=__file__)
if version is not None:
- _VERSION = version
+ _VERSION = version # noqa: F811
except ImportError:
warning("setuptools_scm not found, using version from _version.py")
except LookupError as e:
warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
-extra_requires: Dict[str, List[str]] = {
+extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass
# Additional tools for extra functionality.
'azure': ['azure-storage-file-share'],
'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],
diff --git a/mlos_core/setup.py b/mlos_core/setup.py
--- a/mlos_core/setup.py
+++ b/mlos_core/setup.py
@@ -18,14 +18,14 @@
from setuptools_scm import get_version
version = get_version(root='..', relative_to=__file__)
if version is not None:
- _VERSION = version
+ _VERSION = version # noqa: F811
except ImportError:
warning("setuptools_scm not found, using version from _version.py")
except LookupError as e:
warning(f"setuptools_scm failed to find git version, using version from _version.py: {e}")
-extra_requires: Dict[str, List[str]] = {
+extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass
'emukit': ['emukit'],
'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)
}
@@ -52,10 +52,10 @@
'': ['py.typed', '**/*.pyi'],
},
install_requires=[
- 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)
- 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released
+ 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)
+ 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released
'scipy>=1.3.2',
- 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)
+ 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)
'pandas>=1.0.3',
'ConfigSpace>=0.6.1',
],
| {"golden_diff": "diff --git a/mlos_bench/setup.py b/mlos_bench/setup.py\n--- a/mlos_bench/setup.py\n+++ b/mlos_bench/setup.py\n@@ -18,14 +18,14 @@\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n- _VERSION = version\n+ _VERSION = version # noqa: F811\n except ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\n except LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n \n \n-extra_requires: Dict[str, List[str]] = {\n+extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass\n # Additional tools for extra functionality.\n 'azure': ['azure-storage-file-share'],\n 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],\ndiff --git a/mlos_core/setup.py b/mlos_core/setup.py\n--- a/mlos_core/setup.py\n+++ b/mlos_core/setup.py\n@@ -18,14 +18,14 @@\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n- _VERSION = version\n+ _VERSION = version # noqa: F811\n except ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\n except LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n \n \n-extra_requires: Dict[str, List[str]] = {\n+extra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)\n }\n@@ -52,10 +52,10 @@\n '': ['py.typed', '**/*.pyi'],\n },\n install_requires=[\n- 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)\n- 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released\n+ 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)\n+ 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released\n 'scipy>=1.3.2',\n- 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)\n+ 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'pandas>=1.0.3',\n 'ConfigSpace>=0.6.1',\n ],\n", "issue": "flake8 and/or prettier setup for devcontainer\nPer comments in #354 and #340 \r\nWe should \"just\" add flake8 and some type of auto prettier to the devcontainer setup so that those types of things are caught during dev cycle instead of review nits as much as possible.\n", "before_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nSetup instructions for the mlos_core package.\n\"\"\"\n\nfrom itertools import chain\nfrom logging import warning\nfrom typing import Dict, List\n\nfrom setuptools import setup, find_packages\n\nfrom _version import _VERSION # pylint: disable=import-private-name\n\ntry:\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n _VERSION = version\nexcept ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\nexcept LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n\n\nextra_requires: Dict[str, List[str]] = {\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)\n}\n\n# construct special 'full' extra that adds requirements for all built-in\n# backend integrations and additional extra features.\nextra_requires['full'] = list(set(chain(*extra_requires.values())))\n\nextra_requires['full-tests'] = extra_requires['full'] + [\n 'pytest',\n 'pytest-forked',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-local-badge',\n]\n\n# pylint: disable=duplicate-code\nMODULE_BASE_NAME = 'mlos_core'\nsetup(\n name='mlos-core',\n version=_VERSION,\n packages=find_packages(exclude=[f\"{MODULE_BASE_NAME}.tests\", f\"{MODULE_BASE_NAME}.tests.*\"]),\n package_data={\n '': ['py.typed', '**/*.pyi'],\n },\n install_requires=[\n 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released\n 'scipy>=1.3.2',\n 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'pandas>=1.0.3',\n 'ConfigSpace>=0.6.1',\n ],\n extras_require=extra_requires,\n author='Microsoft',\n author_email='[email protected]',\n description=('MLOS Core Python interface for parameter optimization.'),\n license='MIT',\n keywords='',\n url='https://aka.ms/mlos-core',\n python_requires='>=3.8',\n)\n", "path": "mlos_core/setup.py"}, {"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nSetup instructions for the mlos_bench package.\n\"\"\"\n\nfrom logging import warning\nfrom itertools import chain\nfrom typing import Dict, List\n\nfrom setuptools import setup, find_packages\n\nfrom _version import _VERSION # pylint: disable=import-private-name\n\ntry:\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n _VERSION = version\nexcept ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\nexcept LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n\n\nextra_requires: Dict[str, List[str]] = {\n # Additional tools for extra functionality.\n 'azure': ['azure-storage-file-share'],\n 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],\n 'storage-sql-mysql': ['sqlalchemy', 'mysql-connector-python'],\n 'storage-sql-postgres': ['sqlalchemy', 'psycopg2'],\n 'storage-sql-sqlite': ['sqlalchemy'], # sqlite3 comes with python, so we don't need to install it.\n # Transitive extra_requires from mlos-core.\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize'],\n}\n\n# construct special 'full' extra that adds requirements for all built-in\n# backend integrations and additional extra features.\nextra_requires['full'] = list(set(chain(*extra_requires.values())))\n\nextra_requires['full-tests'] = extra_requires['full'] + [\n 'pytest',\n 'pytest-forked',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-local-badge',\n]\n\n# pylint: disable=duplicate-code\nMODULE_BASE_NAME = 'mlos_bench'\nsetup(\n name='mlos-bench',\n version=_VERSION,\n packages=find_packages(exclude=[f\"{MODULE_BASE_NAME}.tests\", f\"{MODULE_BASE_NAME}.tests.*\"]),\n package_data={\n '': ['py.typed', '**/*.pyi'],\n 'mlos_bench': [\n 'config/**/*.md',\n 'config/**/*.jsonc',\n 'config/**/*.json',\n 'config/**/*.py',\n 'config/**/*.sh',\n 'config/**/*.cmd',\n 'config/**/*.ps1',\n ],\n },\n entry_points={\n 'console_scripts': [\n 'mlos_bench = mlos_bench.run:_main',\n ],\n },\n install_requires=[\n 'mlos-core==' + _VERSION,\n 'requests',\n 'json5',\n 'jsonschema',\n 'importlib_resources;python_version<\"3.10\"',\n ] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.\n extras_require=extra_requires,\n author='Microsoft',\n author_email='[email protected]',\n description=('MLOS Bench Python interface for benchmark automation and optimization.'),\n license='MIT',\n keywords='',\n url='https://aka.ms/mlos-core',\n python_requires='>=3.8',\n)\n", "path": "mlos_bench/setup.py"}], "after_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nSetup instructions for the mlos_core package.\n\"\"\"\n\nfrom itertools import chain\nfrom logging import warning\nfrom typing import Dict, List\n\nfrom setuptools import setup, find_packages\n\nfrom _version import _VERSION # pylint: disable=import-private-name\n\ntry:\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n _VERSION = version # noqa: F811\nexcept ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\nexcept LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n\n\nextra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize<=0.9.0'], # FIXME: temporarily work around some version mismatch issues (PR 850)\n}\n\n# construct special 'full' extra that adds requirements for all built-in\n# backend integrations and additional extra features.\nextra_requires['full'] = list(set(chain(*extra_requires.values())))\n\nextra_requires['full-tests'] = extra_requires['full'] + [\n 'pytest',\n 'pytest-forked',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-local-badge',\n]\n\n# pylint: disable=duplicate-code\nMODULE_BASE_NAME = 'mlos_core'\nsetup(\n name='mlos-core',\n version=_VERSION,\n packages=find_packages(exclude=[f\"{MODULE_BASE_NAME}.tests\", f\"{MODULE_BASE_NAME}.tests.*\"]),\n package_data={\n '': ['py.typed', '**/*.pyi'],\n },\n install_requires=[\n 'scikit-learn<1.2', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'joblib>=1.1.1', # CVE-2022-21797: scikit-learn dependency, addressed in 1.2.0dev0, which isn't currently released\n 'scipy>=1.3.2',\n 'numpy<1.24', # FIXME: temporarily work around some version mismatch issues (PR 850)\n 'pandas>=1.0.3',\n 'ConfigSpace>=0.6.1',\n ],\n extras_require=extra_requires,\n author='Microsoft',\n author_email='[email protected]',\n description=('MLOS Core Python interface for parameter optimization.'),\n license='MIT',\n keywords='',\n url='https://aka.ms/mlos-core',\n python_requires='>=3.8',\n)\n", "path": "mlos_core/setup.py"}, {"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nSetup instructions for the mlos_bench package.\n\"\"\"\n\nfrom logging import warning\nfrom itertools import chain\nfrom typing import Dict, List\n\nfrom setuptools import setup, find_packages\n\nfrom _version import _VERSION # pylint: disable=import-private-name\n\ntry:\n from setuptools_scm import get_version\n version = get_version(root='..', relative_to=__file__)\n if version is not None:\n _VERSION = version # noqa: F811\nexcept ImportError:\n warning(\"setuptools_scm not found, using version from _version.py\")\nexcept LookupError as e:\n warning(f\"setuptools_scm failed to find git version, using version from _version.py: {e}\")\n\n\nextra_requires: Dict[str, List[str]] = { # pylint: disable=consider-using-namedtuple-or-dataclass\n # Additional tools for extra functionality.\n 'azure': ['azure-storage-file-share'],\n 'storage-sql-duckdb': ['sqlalchemy', 'duckdb_engine'],\n 'storage-sql-mysql': ['sqlalchemy', 'mysql-connector-python'],\n 'storage-sql-postgres': ['sqlalchemy', 'psycopg2'],\n 'storage-sql-sqlite': ['sqlalchemy'], # sqlite3 comes with python, so we don't need to install it.\n # Transitive extra_requires from mlos-core.\n 'emukit': ['emukit'],\n 'skopt': ['scikit-optimize'],\n}\n\n# construct special 'full' extra that adds requirements for all built-in\n# backend integrations and additional extra features.\nextra_requires['full'] = list(set(chain(*extra_requires.values())))\n\nextra_requires['full-tests'] = extra_requires['full'] + [\n 'pytest',\n 'pytest-forked',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-local-badge',\n]\n\n# pylint: disable=duplicate-code\nMODULE_BASE_NAME = 'mlos_bench'\nsetup(\n name='mlos-bench',\n version=_VERSION,\n packages=find_packages(exclude=[f\"{MODULE_BASE_NAME}.tests\", f\"{MODULE_BASE_NAME}.tests.*\"]),\n package_data={\n '': ['py.typed', '**/*.pyi'],\n 'mlos_bench': [\n 'config/**/*.md',\n 'config/**/*.jsonc',\n 'config/**/*.json',\n 'config/**/*.py',\n 'config/**/*.sh',\n 'config/**/*.cmd',\n 'config/**/*.ps1',\n ],\n },\n entry_points={\n 'console_scripts': [\n 'mlos_bench = mlos_bench.run:_main',\n ],\n },\n install_requires=[\n 'mlos-core==' + _VERSION,\n 'requests',\n 'json5',\n 'jsonschema',\n 'importlib_resources;python_version<\"3.10\"',\n ] + extra_requires['storage-sql-sqlite'], # NOTE: For now sqlite is a fallback storage backend, so we always install it.\n extras_require=extra_requires,\n author='Microsoft',\n author_email='[email protected]',\n description=('MLOS Bench Python interface for benchmark automation and optimization.'),\n license='MIT',\n keywords='',\n url='https://aka.ms/mlos-core',\n python_requires='>=3.8',\n)\n", "path": "mlos_bench/setup.py"}]} | 1,935 | 744 |
gh_patches_debug_20127 | rasdani/github-patches | git_diff | rotki__rotki-591 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sign In Failed - TypeError
Good evening!
I'm here on Linux, I've just tried to log in to my Rotki database created with 1.0.4 (using 1.0.5 now). After I type in my password and log in, I get the message
> **Sign In Failed**
> TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
Now when I attempt to go back to 1.0.4 I get
> **Sign In Failed**
> DBUpgradeError: Your database version is newer than the version expected by the executable. Did you perhaps try to revert to an older rotkehlchen version?Please only use the latest version of the software.
No big worries, I'm still evaluating the software to see if it can do what I need so there's not a ton of data in there. Just thought you should know. I'll be happy to help with debugging if I can. But kids, so... I'll do my best!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rotkehlchen/db/settings.py`
Content:
```
1 from typing import Any, Dict, NamedTuple, Union
2
3 from rotkehlchen.constants.assets import S_USD
4 from rotkehlchen.constants.timing import YEAR_IN_SECONDS
5 from rotkehlchen.db.utils import str_to_bool
6 from rotkehlchen.errors import DeserializationError
7 from rotkehlchen.typing import FiatAsset, Timestamp
8 from rotkehlchen.user_messages import MessagesAggregator
9
10 ROTKEHLCHEN_DB_VERSION = 8
11 DEFAULT_TAXFREE_AFTER_PERIOD = YEAR_IN_SECONDS
12 DEFAULT_INCLUDE_CRYPTO2CRYPTO = True
13 DEFAULT_INCLUDE_GAS_COSTS = True
14 DEFAULT_ANONYMIZED_LOGS = False
15 DEFAULT_PREMIUM_SHOULD_SYNC = False
16 DEFAULT_START_DATE = '01/08/2015'
17 DEFAULT_UI_FLOATING_PRECISION = 2
18 DEFAULT_BALANCE_SAVE_FREQUENCY = 24
19 DEFAULT_MAIN_CURRENCY = S_USD
20 DEFAULT_DATE_DISPLAY_FORMAT = '%d/%m/%Y %H:%M:%S %Z'
21 DEFAULT_SUBMIT_USAGE_ANALYTICS = True
22
23
24 class DBSettings(NamedTuple):
25 version: int = ROTKEHLCHEN_DB_VERSION
26 last_write_ts: Timestamp = Timestamp(0)
27 premium_should_sync: bool = DEFAULT_PREMIUM_SHOULD_SYNC
28 include_crypto2crypto: bool = DEFAULT_INCLUDE_CRYPTO2CRYPTO
29 anonymized_logs: bool = DEFAULT_ANONYMIZED_LOGS
30 last_data_upload_ts: Timestamp = Timestamp(0)
31 ui_floating_precision: int = DEFAULT_UI_FLOATING_PRECISION
32 taxfree_after_period: int = DEFAULT_TAXFREE_AFTER_PERIOD
33 balance_save_frequency: int = DEFAULT_BALANCE_SAVE_FREQUENCY
34 include_gas_costs: bool = DEFAULT_INCLUDE_GAS_COSTS
35 historical_data_start: str = DEFAULT_START_DATE
36 eth_rpc_endpoint: str = 'http://localhost:8545'
37 main_currency: FiatAsset = DEFAULT_MAIN_CURRENCY
38 date_display_format: str = DEFAULT_DATE_DISPLAY_FORMAT
39 last_balance_save: Timestamp = Timestamp(0)
40 submit_usage_analytics: bool = DEFAULT_SUBMIT_USAGE_ANALYTICS
41
42
43 def read_boolean(value: Union[str, bool]) -> bool:
44 if isinstance(value, bool):
45 return value
46 elif isinstance(value, str):
47 return str_to_bool(value)
48
49 raise DeserializationError(
50 f'Failed to read a boolean from {value} which is of type {type(value)}',
51 )
52
53
54 def db_settings_from_dict(
55 settings_dict: Dict[str, Any],
56 msg_aggregator: MessagesAggregator,
57 ) -> DBSettings:
58 specified_args: Dict[str, Any] = {}
59 for key, value in settings_dict.items():
60 if key == 'version':
61 specified_args[key] = int(value)
62 elif key == 'historical_data_start':
63 specified_args[key] = str(value)
64 elif key == 'eth_rpc_endpoint':
65 specified_args[key] = str(value)
66 elif key == 'ui_floating_precision':
67 specified_args[key] = int(value)
68 elif key == 'include_crypto2crypto':
69 specified_args[key] = read_boolean(value)
70 elif key == 'taxfree_after_period':
71 specified_args[key] = int(value)
72 elif key == 'balance_save_frequency':
73 specified_args[key] = int(value)
74 elif key == 'main_currency':
75 specified_args[key] = FiatAsset(str(value))
76 elif key == 'anonymized_logs':
77 specified_args[key] = read_boolean(value)
78 elif key == 'include_gas_costs':
79 specified_args[key] = read_boolean(value)
80 elif key == 'date_display_format':
81 specified_args[key] = str(value)
82 elif key == 'premium_should_sync':
83 specified_args[key] = read_boolean(value)
84 elif key == 'last_write_ts':
85 specified_args[key] = Timestamp(int(value))
86 elif key == 'last_data_upload_ts':
87 specified_args[key] = Timestamp(int(value))
88 elif key == 'last_balance_save':
89 specified_args[key] = Timestamp(int(value))
90 elif key == 'submit_usage_analytics':
91 specified_args[key] = read_boolean(value)
92 else:
93 msg_aggregator.add_warning(
94 f'Unknown DB setting {key} given. Ignoring it. Should not '
95 f'happen so please open an issue in Github.',
96 )
97
98 return DBSettings(**specified_args)
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rotkehlchen/db/settings.py b/rotkehlchen/db/settings.py
--- a/rotkehlchen/db/settings.py
+++ b/rotkehlchen/db/settings.py
@@ -68,7 +68,23 @@
elif key == 'include_crypto2crypto':
specified_args[key] = read_boolean(value)
elif key == 'taxfree_after_period':
- specified_args[key] = int(value)
+ # taxfree_after_period can also be None, to signify disabled setting
+ if value is None:
+ specified_args[key] = value
+ else:
+ int_value = int(value)
+ if int_value <= 0:
+ value = None
+ msg_aggregator.add_warning(
+ f'A negative or zero value ({int_value}) for taxfree_after_period '
+ f'ended up in the DB. Setting it to None. Please open an issue in '
+ f'Github: https://github.com/rotki/rotki/issues/new/choose',
+ )
+
+ else:
+ value = int_value
+
+ specified_args[key] = value
elif key == 'balance_save_frequency':
specified_args[key] = int(value)
elif key == 'main_currency':
| {"golden_diff": "diff --git a/rotkehlchen/db/settings.py b/rotkehlchen/db/settings.py\n--- a/rotkehlchen/db/settings.py\n+++ b/rotkehlchen/db/settings.py\n@@ -68,7 +68,23 @@\n elif key == 'include_crypto2crypto':\n specified_args[key] = read_boolean(value)\n elif key == 'taxfree_after_period':\n- specified_args[key] = int(value)\n+ # taxfree_after_period can also be None, to signify disabled setting\n+ if value is None:\n+ specified_args[key] = value\n+ else:\n+ int_value = int(value)\n+ if int_value <= 0:\n+ value = None\n+ msg_aggregator.add_warning(\n+ f'A negative or zero value ({int_value}) for taxfree_after_period '\n+ f'ended up in the DB. Setting it to None. Please open an issue in '\n+ f'Github: https://github.com/rotki/rotki/issues/new/choose',\n+ )\n+\n+ else:\n+ value = int_value\n+\n+ specified_args[key] = value\n elif key == 'balance_save_frequency':\n specified_args[key] = int(value)\n elif key == 'main_currency':\n", "issue": "Sign In Failed - TypeError\nGood evening!\r\n\r\nI'm here on Linux, I've just tried to log in to my Rotki database created with 1.0.4 (using 1.0.5 now). After I type in my password and log in, I get the message\r\n\r\n> **Sign In Failed**\r\n> TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'\r\n\r\nNow when I attempt to go back to 1.0.4 I get\r\n> **Sign In Failed**\r\n> DBUpgradeError: Your database version is newer than the version expected by the executable. Did you perhaps try to revert to an older rotkehlchen version?Please only use the latest version of the software.\r\n\r\nNo big worries, I'm still evaluating the software to see if it can do what I need so there's not a ton of data in there. Just thought you should know. I'll be happy to help with debugging if I can. But kids, so... I'll do my best!\n", "before_files": [{"content": "from typing import Any, Dict, NamedTuple, Union\n\nfrom rotkehlchen.constants.assets import S_USD\nfrom rotkehlchen.constants.timing import YEAR_IN_SECONDS\nfrom rotkehlchen.db.utils import str_to_bool\nfrom rotkehlchen.errors import DeserializationError\nfrom rotkehlchen.typing import FiatAsset, Timestamp\nfrom rotkehlchen.user_messages import MessagesAggregator\n\nROTKEHLCHEN_DB_VERSION = 8\nDEFAULT_TAXFREE_AFTER_PERIOD = YEAR_IN_SECONDS\nDEFAULT_INCLUDE_CRYPTO2CRYPTO = True\nDEFAULT_INCLUDE_GAS_COSTS = True\nDEFAULT_ANONYMIZED_LOGS = False\nDEFAULT_PREMIUM_SHOULD_SYNC = False\nDEFAULT_START_DATE = '01/08/2015'\nDEFAULT_UI_FLOATING_PRECISION = 2\nDEFAULT_BALANCE_SAVE_FREQUENCY = 24\nDEFAULT_MAIN_CURRENCY = S_USD\nDEFAULT_DATE_DISPLAY_FORMAT = '%d/%m/%Y %H:%M:%S %Z'\nDEFAULT_SUBMIT_USAGE_ANALYTICS = True\n\n\nclass DBSettings(NamedTuple):\n version: int = ROTKEHLCHEN_DB_VERSION\n last_write_ts: Timestamp = Timestamp(0)\n premium_should_sync: bool = DEFAULT_PREMIUM_SHOULD_SYNC\n include_crypto2crypto: bool = DEFAULT_INCLUDE_CRYPTO2CRYPTO\n anonymized_logs: bool = DEFAULT_ANONYMIZED_LOGS\n last_data_upload_ts: Timestamp = Timestamp(0)\n ui_floating_precision: int = DEFAULT_UI_FLOATING_PRECISION\n taxfree_after_period: int = DEFAULT_TAXFREE_AFTER_PERIOD\n balance_save_frequency: int = DEFAULT_BALANCE_SAVE_FREQUENCY\n include_gas_costs: bool = DEFAULT_INCLUDE_GAS_COSTS\n historical_data_start: str = DEFAULT_START_DATE\n eth_rpc_endpoint: str = 'http://localhost:8545'\n main_currency: FiatAsset = DEFAULT_MAIN_CURRENCY\n date_display_format: str = DEFAULT_DATE_DISPLAY_FORMAT\n last_balance_save: Timestamp = Timestamp(0)\n submit_usage_analytics: bool = DEFAULT_SUBMIT_USAGE_ANALYTICS\n\n\ndef read_boolean(value: Union[str, bool]) -> bool:\n if isinstance(value, bool):\n return value\n elif isinstance(value, str):\n return str_to_bool(value)\n\n raise DeserializationError(\n f'Failed to read a boolean from {value} which is of type {type(value)}',\n )\n\n\ndef db_settings_from_dict(\n settings_dict: Dict[str, Any],\n msg_aggregator: MessagesAggregator,\n) -> DBSettings:\n specified_args: Dict[str, Any] = {}\n for key, value in settings_dict.items():\n if key == 'version':\n specified_args[key] = int(value)\n elif key == 'historical_data_start':\n specified_args[key] = str(value)\n elif key == 'eth_rpc_endpoint':\n specified_args[key] = str(value)\n elif key == 'ui_floating_precision':\n specified_args[key] = int(value)\n elif key == 'include_crypto2crypto':\n specified_args[key] = read_boolean(value)\n elif key == 'taxfree_after_period':\n specified_args[key] = int(value)\n elif key == 'balance_save_frequency':\n specified_args[key] = int(value)\n elif key == 'main_currency':\n specified_args[key] = FiatAsset(str(value))\n elif key == 'anonymized_logs':\n specified_args[key] = read_boolean(value)\n elif key == 'include_gas_costs':\n specified_args[key] = read_boolean(value)\n elif key == 'date_display_format':\n specified_args[key] = str(value)\n elif key == 'premium_should_sync':\n specified_args[key] = read_boolean(value)\n elif key == 'last_write_ts':\n specified_args[key] = Timestamp(int(value))\n elif key == 'last_data_upload_ts':\n specified_args[key] = Timestamp(int(value))\n elif key == 'last_balance_save':\n specified_args[key] = Timestamp(int(value))\n elif key == 'submit_usage_analytics':\n specified_args[key] = read_boolean(value)\n else:\n msg_aggregator.add_warning(\n f'Unknown DB setting {key} given. Ignoring it. Should not '\n f'happen so please open an issue in Github.',\n )\n\n return DBSettings(**specified_args)\n", "path": "rotkehlchen/db/settings.py"}], "after_files": [{"content": "from typing import Any, Dict, NamedTuple, Union\n\nfrom rotkehlchen.constants.assets import S_USD\nfrom rotkehlchen.constants.timing import YEAR_IN_SECONDS\nfrom rotkehlchen.db.utils import str_to_bool\nfrom rotkehlchen.errors import DeserializationError\nfrom rotkehlchen.typing import FiatAsset, Timestamp\nfrom rotkehlchen.user_messages import MessagesAggregator\n\nROTKEHLCHEN_DB_VERSION = 8\nDEFAULT_TAXFREE_AFTER_PERIOD = YEAR_IN_SECONDS\nDEFAULT_INCLUDE_CRYPTO2CRYPTO = True\nDEFAULT_INCLUDE_GAS_COSTS = True\nDEFAULT_ANONYMIZED_LOGS = False\nDEFAULT_PREMIUM_SHOULD_SYNC = False\nDEFAULT_START_DATE = '01/08/2015'\nDEFAULT_UI_FLOATING_PRECISION = 2\nDEFAULT_BALANCE_SAVE_FREQUENCY = 24\nDEFAULT_MAIN_CURRENCY = S_USD\nDEFAULT_DATE_DISPLAY_FORMAT = '%d/%m/%Y %H:%M:%S %Z'\nDEFAULT_SUBMIT_USAGE_ANALYTICS = True\n\n\nclass DBSettings(NamedTuple):\n version: int = ROTKEHLCHEN_DB_VERSION\n last_write_ts: Timestamp = Timestamp(0)\n premium_should_sync: bool = DEFAULT_PREMIUM_SHOULD_SYNC\n include_crypto2crypto: bool = DEFAULT_INCLUDE_CRYPTO2CRYPTO\n anonymized_logs: bool = DEFAULT_ANONYMIZED_LOGS\n last_data_upload_ts: Timestamp = Timestamp(0)\n ui_floating_precision: int = DEFAULT_UI_FLOATING_PRECISION\n taxfree_after_period: int = DEFAULT_TAXFREE_AFTER_PERIOD\n balance_save_frequency: int = DEFAULT_BALANCE_SAVE_FREQUENCY\n include_gas_costs: bool = DEFAULT_INCLUDE_GAS_COSTS\n historical_data_start: str = DEFAULT_START_DATE\n eth_rpc_endpoint: str = 'http://localhost:8545'\n main_currency: FiatAsset = DEFAULT_MAIN_CURRENCY\n date_display_format: str = DEFAULT_DATE_DISPLAY_FORMAT\n last_balance_save: Timestamp = Timestamp(0)\n submit_usage_analytics: bool = DEFAULT_SUBMIT_USAGE_ANALYTICS\n\n\ndef read_boolean(value: Union[str, bool]) -> bool:\n if isinstance(value, bool):\n return value\n elif isinstance(value, str):\n return str_to_bool(value)\n\n raise DeserializationError(\n f'Failed to read a boolean from {value} which is of type {type(value)}',\n )\n\n\ndef db_settings_from_dict(\n settings_dict: Dict[str, Any],\n msg_aggregator: MessagesAggregator,\n) -> DBSettings:\n specified_args: Dict[str, Any] = {}\n for key, value in settings_dict.items():\n if key == 'version':\n specified_args[key] = int(value)\n elif key == 'historical_data_start':\n specified_args[key] = str(value)\n elif key == 'eth_rpc_endpoint':\n specified_args[key] = str(value)\n elif key == 'ui_floating_precision':\n specified_args[key] = int(value)\n elif key == 'include_crypto2crypto':\n specified_args[key] = read_boolean(value)\n elif key == 'taxfree_after_period':\n # taxfree_after_period can also be None, to signify disabled setting\n if value is None:\n specified_args[key] = value\n else:\n int_value = int(value)\n if int_value <= 0:\n value = None\n msg_aggregator.add_warning(\n f'A negative or zero value ({int_value}) for taxfree_after_period '\n f'ended up in the DB. Setting it to None. Please open an issue in '\n f'Github: https://github.com/rotki/rotki/issues/new/choose',\n )\n\n else:\n value = int_value\n\n specified_args[key] = value\n elif key == 'balance_save_frequency':\n specified_args[key] = int(value)\n elif key == 'main_currency':\n specified_args[key] = FiatAsset(str(value))\n elif key == 'anonymized_logs':\n specified_args[key] = read_boolean(value)\n elif key == 'include_gas_costs':\n specified_args[key] = read_boolean(value)\n elif key == 'date_display_format':\n specified_args[key] = str(value)\n elif key == 'premium_should_sync':\n specified_args[key] = read_boolean(value)\n elif key == 'last_write_ts':\n specified_args[key] = Timestamp(int(value))\n elif key == 'last_data_upload_ts':\n specified_args[key] = Timestamp(int(value))\n elif key == 'last_balance_save':\n specified_args[key] = Timestamp(int(value))\n elif key == 'submit_usage_analytics':\n specified_args[key] = read_boolean(value)\n else:\n msg_aggregator.add_warning(\n f'Unknown DB setting {key} given. Ignoring it. Should not '\n f'happen so please open an issue in Github.',\n )\n\n return DBSettings(**specified_args)\n", "path": "rotkehlchen/db/settings.py"}]} | 1,588 | 276 |
gh_patches_debug_27214 | rasdani/github-patches | git_diff | mdn__kuma-6134 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove contributor notification post account creation
Once a user has successfully signed up, we show a banner similar to the one below either just below the page header, or generally at the top of the page.

Because of the changes to account roles, these no longer makes sense and should be removed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/users/signal_handlers.py`
Content:
```
1 from allauth.account.signals import email_confirmed, user_signed_up
2 from allauth.socialaccount.signals import social_account_removed
3 from django.contrib import messages
4 from django.core.exceptions import ObjectDoesNotExist
5 from django.db import transaction
6 from django.db.models.signals import post_delete, post_save, pre_delete
7 from django.dispatch import receiver
8 from django.utils.translation import ugettext_lazy as _
9 from waffle import switch_is_active
10
11 from kuma.core.urlresolvers import reverse
12 from kuma.payments.utils import cancel_stripe_customer_subscription
13 from kuma.wiki.jobs import DocumentContributorsJob
14
15 from .models import User, UserBan
16 from .tasks import send_welcome_email
17
18
19 @receiver(user_signed_up, dispatch_uid='users.user_signed_up')
20 def on_user_signed_up(sender, request, user, **kwargs):
21 """
22 Signal handler to be called when a given user has signed up.
23 """
24 url = reverse('wiki.document', args=['MDN/Getting_started'])
25 msg = _('You have completed the first step of '
26 '<a href="%s">getting started with MDN</a>') % url
27 messages.success(request, msg)
28 if switch_is_active('welcome_email'):
29 # only send if the user has already verified
30 # at least one email address
31 if user.emailaddress_set.filter(verified=True).exists():
32 transaction.on_commit(
33 lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)
34 )
35
36
37 @receiver(email_confirmed, dispatch_uid='users.email_confirmed')
38 def on_email_confirmed(sender, request, email_address, **kwargs):
39 """
40 Signal handler to be called when a given email address was confirmed
41 by a user.
42 """
43 if switch_is_active('welcome_email'):
44 # only send if the user has exactly one verified (the given)
45 # email address, in other words if it was just confirmed
46 user = email_address.user
47 previous_emails = user.emailaddress_set.exclude(pk=email_address.pk)
48 if not previous_emails.exists():
49 transaction.on_commit(
50 lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)
51 )
52
53
54 @receiver(social_account_removed, dispatch_uid='users.social_account_removed')
55 def on_social_account_removed(sender, request, socialaccount, **kwargs):
56 """
57 Invoked just after a user successfully removed a social account
58
59 We use it to reset the name of the socialaccount provider in
60 the user's session to one that he also has.
61 """
62 user = socialaccount.user
63 try:
64 all_socialaccounts = user.socialaccount_set.all()
65 next_socialaccount = all_socialaccounts[0]
66 request.session['sociallogin_provider'] = next_socialaccount.provider
67 request.session.modified = True
68 except (ObjectDoesNotExist, IndexError):
69 pass
70
71
72 @receiver(post_save, sender=UserBan, dispatch_uid='users.user_ban.save')
73 def on_ban_save(sender, instance, **kwargs):
74 """
75 Signal handler to be called when a given user ban is saved.
76 """
77 user = instance.user
78 user.is_active = not instance.is_active
79 user.save()
80 invalidate_document_contribution(user)
81
82
83 @receiver(post_delete, sender=UserBan, dispatch_uid='users.user_ban.delete')
84 def on_ban_delete(sender, instance, **kwargs):
85 """
86 Signal handler to be called when a user ban is deleted.
87 """
88 user = instance.user
89 user.is_active = True
90 user.save()
91 invalidate_document_contribution(user)
92
93
94 def invalidate_document_contribution(user):
95 """
96 Invalidate the contributor list for Documents the user has edited.
97
98 This will remove them if they have been banned, and add them if they
99 have been unbanned.
100 """
101 revisions = user.created_revisions
102 doc_ids = set(revisions.values_list('document_id', flat=True))
103 job = DocumentContributorsJob()
104 for doc_id in doc_ids:
105 job.invalidate(doc_id)
106
107
108 @receiver(pre_delete, sender=User, dispatch_uid='users.unsubscribe_payments')
109 def unsubscribe_payments_on_user_delete(sender, instance, **kwargs):
110 """Cancel Stripe subscriptions before deleting User."""
111 user = instance
112 if user.stripe_customer_id:
113 # This may raise an exception if the Stripe API call fails.
114 # This will stop User deletion while an admin investigates.
115 cancel_stripe_customer_subscription(user.stripe_customer_id)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/users/signal_handlers.py b/kuma/users/signal_handlers.py
--- a/kuma/users/signal_handlers.py
+++ b/kuma/users/signal_handlers.py
@@ -1,14 +1,11 @@
from allauth.account.signals import email_confirmed, user_signed_up
from allauth.socialaccount.signals import social_account_removed
-from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.db.models.signals import post_delete, post_save, pre_delete
from django.dispatch import receiver
-from django.utils.translation import ugettext_lazy as _
from waffle import switch_is_active
-from kuma.core.urlresolvers import reverse
from kuma.payments.utils import cancel_stripe_customer_subscription
from kuma.wiki.jobs import DocumentContributorsJob
@@ -21,10 +18,6 @@
"""
Signal handler to be called when a given user has signed up.
"""
- url = reverse('wiki.document', args=['MDN/Getting_started'])
- msg = _('You have completed the first step of '
- '<a href="%s">getting started with MDN</a>') % url
- messages.success(request, msg)
if switch_is_active('welcome_email'):
# only send if the user has already verified
# at least one email address
| {"golden_diff": "diff --git a/kuma/users/signal_handlers.py b/kuma/users/signal_handlers.py\n--- a/kuma/users/signal_handlers.py\n+++ b/kuma/users/signal_handlers.py\n@@ -1,14 +1,11 @@\n from allauth.account.signals import email_confirmed, user_signed_up\n from allauth.socialaccount.signals import social_account_removed\n-from django.contrib import messages\n from django.core.exceptions import ObjectDoesNotExist\n from django.db import transaction\n from django.db.models.signals import post_delete, post_save, pre_delete\n from django.dispatch import receiver\n-from django.utils.translation import ugettext_lazy as _\n from waffle import switch_is_active\n \n-from kuma.core.urlresolvers import reverse\n from kuma.payments.utils import cancel_stripe_customer_subscription\n from kuma.wiki.jobs import DocumentContributorsJob\n \n@@ -21,10 +18,6 @@\n \"\"\"\n Signal handler to be called when a given user has signed up.\n \"\"\"\n- url = reverse('wiki.document', args=['MDN/Getting_started'])\n- msg = _('You have completed the first step of '\n- '<a href=\"%s\">getting started with MDN</a>') % url\n- messages.success(request, msg)\n if switch_is_active('welcome_email'):\n # only send if the user has already verified\n # at least one email address\n", "issue": "Remove contributor notification post account creation\nOnce a user has successfully signed up, we show a banner similar to the one below either just below the page header, or generally at the top of the page.\r\n\r\n\r\n\r\n\r\nBecause of the changes to account roles, these no longer makes sense and should be removed.\n", "before_files": [{"content": "from allauth.account.signals import email_confirmed, user_signed_up\nfrom allauth.socialaccount.signals import social_account_removed\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.db.models.signals import post_delete, post_save, pre_delete\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom waffle import switch_is_active\n\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.payments.utils import cancel_stripe_customer_subscription\nfrom kuma.wiki.jobs import DocumentContributorsJob\n\nfrom .models import User, UserBan\nfrom .tasks import send_welcome_email\n\n\n@receiver(user_signed_up, dispatch_uid='users.user_signed_up')\ndef on_user_signed_up(sender, request, user, **kwargs):\n \"\"\"\n Signal handler to be called when a given user has signed up.\n \"\"\"\n url = reverse('wiki.document', args=['MDN/Getting_started'])\n msg = _('You have completed the first step of '\n '<a href=\"%s\">getting started with MDN</a>') % url\n messages.success(request, msg)\n if switch_is_active('welcome_email'):\n # only send if the user has already verified\n # at least one email address\n if user.emailaddress_set.filter(verified=True).exists():\n transaction.on_commit(\n lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)\n )\n\n\n@receiver(email_confirmed, dispatch_uid='users.email_confirmed')\ndef on_email_confirmed(sender, request, email_address, **kwargs):\n \"\"\"\n Signal handler to be called when a given email address was confirmed\n by a user.\n \"\"\"\n if switch_is_active('welcome_email'):\n # only send if the user has exactly one verified (the given)\n # email address, in other words if it was just confirmed\n user = email_address.user\n previous_emails = user.emailaddress_set.exclude(pk=email_address.pk)\n if not previous_emails.exists():\n transaction.on_commit(\n lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)\n )\n\n\n@receiver(social_account_removed, dispatch_uid='users.social_account_removed')\ndef on_social_account_removed(sender, request, socialaccount, **kwargs):\n \"\"\"\n Invoked just after a user successfully removed a social account\n\n We use it to reset the name of the socialaccount provider in\n the user's session to one that he also has.\n \"\"\"\n user = socialaccount.user\n try:\n all_socialaccounts = user.socialaccount_set.all()\n next_socialaccount = all_socialaccounts[0]\n request.session['sociallogin_provider'] = next_socialaccount.provider\n request.session.modified = True\n except (ObjectDoesNotExist, IndexError):\n pass\n\n\n@receiver(post_save, sender=UserBan, dispatch_uid='users.user_ban.save')\ndef on_ban_save(sender, instance, **kwargs):\n \"\"\"\n Signal handler to be called when a given user ban is saved.\n \"\"\"\n user = instance.user\n user.is_active = not instance.is_active\n user.save()\n invalidate_document_contribution(user)\n\n\n@receiver(post_delete, sender=UserBan, dispatch_uid='users.user_ban.delete')\ndef on_ban_delete(sender, instance, **kwargs):\n \"\"\"\n Signal handler to be called when a user ban is deleted.\n \"\"\"\n user = instance.user\n user.is_active = True\n user.save()\n invalidate_document_contribution(user)\n\n\ndef invalidate_document_contribution(user):\n \"\"\"\n Invalidate the contributor list for Documents the user has edited.\n\n This will remove them if they have been banned, and add them if they\n have been unbanned.\n \"\"\"\n revisions = user.created_revisions\n doc_ids = set(revisions.values_list('document_id', flat=True))\n job = DocumentContributorsJob()\n for doc_id in doc_ids:\n job.invalidate(doc_id)\n\n\n@receiver(pre_delete, sender=User, dispatch_uid='users.unsubscribe_payments')\ndef unsubscribe_payments_on_user_delete(sender, instance, **kwargs):\n \"\"\"Cancel Stripe subscriptions before deleting User.\"\"\"\n user = instance\n if user.stripe_customer_id:\n # This may raise an exception if the Stripe API call fails.\n # This will stop User deletion while an admin investigates.\n cancel_stripe_customer_subscription(user.stripe_customer_id)\n", "path": "kuma/users/signal_handlers.py"}], "after_files": [{"content": "from allauth.account.signals import email_confirmed, user_signed_up\nfrom allauth.socialaccount.signals import social_account_removed\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.db.models.signals import post_delete, post_save, pre_delete\nfrom django.dispatch import receiver\nfrom waffle import switch_is_active\n\nfrom kuma.payments.utils import cancel_stripe_customer_subscription\nfrom kuma.wiki.jobs import DocumentContributorsJob\n\nfrom .models import User, UserBan\nfrom .tasks import send_welcome_email\n\n\n@receiver(user_signed_up, dispatch_uid='users.user_signed_up')\ndef on_user_signed_up(sender, request, user, **kwargs):\n \"\"\"\n Signal handler to be called when a given user has signed up.\n \"\"\"\n if switch_is_active('welcome_email'):\n # only send if the user has already verified\n # at least one email address\n if user.emailaddress_set.filter(verified=True).exists():\n transaction.on_commit(\n lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)\n )\n\n\n@receiver(email_confirmed, dispatch_uid='users.email_confirmed')\ndef on_email_confirmed(sender, request, email_address, **kwargs):\n \"\"\"\n Signal handler to be called when a given email address was confirmed\n by a user.\n \"\"\"\n if switch_is_active('welcome_email'):\n # only send if the user has exactly one verified (the given)\n # email address, in other words if it was just confirmed\n user = email_address.user\n previous_emails = user.emailaddress_set.exclude(pk=email_address.pk)\n if not previous_emails.exists():\n transaction.on_commit(\n lambda: send_welcome_email.delay(user.pk, request.LANGUAGE_CODE)\n )\n\n\n@receiver(social_account_removed, dispatch_uid='users.social_account_removed')\ndef on_social_account_removed(sender, request, socialaccount, **kwargs):\n \"\"\"\n Invoked just after a user successfully removed a social account\n\n We use it to reset the name of the socialaccount provider in\n the user's session to one that he also has.\n \"\"\"\n user = socialaccount.user\n try:\n all_socialaccounts = user.socialaccount_set.all()\n next_socialaccount = all_socialaccounts[0]\n request.session['sociallogin_provider'] = next_socialaccount.provider\n request.session.modified = True\n except (ObjectDoesNotExist, IndexError):\n pass\n\n\n@receiver(post_save, sender=UserBan, dispatch_uid='users.user_ban.save')\ndef on_ban_save(sender, instance, **kwargs):\n \"\"\"\n Signal handler to be called when a given user ban is saved.\n \"\"\"\n user = instance.user\n user.is_active = not instance.is_active\n user.save()\n invalidate_document_contribution(user)\n\n\n@receiver(post_delete, sender=UserBan, dispatch_uid='users.user_ban.delete')\ndef on_ban_delete(sender, instance, **kwargs):\n \"\"\"\n Signal handler to be called when a user ban is deleted.\n \"\"\"\n user = instance.user\n user.is_active = True\n user.save()\n invalidate_document_contribution(user)\n\n\ndef invalidate_document_contribution(user):\n \"\"\"\n Invalidate the contributor list for Documents the user has edited.\n\n This will remove them if they have been banned, and add them if they\n have been unbanned.\n \"\"\"\n revisions = user.created_revisions\n doc_ids = set(revisions.values_list('document_id', flat=True))\n job = DocumentContributorsJob()\n for doc_id in doc_ids:\n job.invalidate(doc_id)\n\n\n@receiver(pre_delete, sender=User, dispatch_uid='users.unsubscribe_payments')\ndef unsubscribe_payments_on_user_delete(sender, instance, **kwargs):\n \"\"\"Cancel Stripe subscriptions before deleting User.\"\"\"\n user = instance\n if user.stripe_customer_id:\n # This may raise an exception if the Stripe API call fails.\n # This will stop User deletion while an admin investigates.\n cancel_stripe_customer_subscription(user.stripe_customer_id)\n", "path": "kuma/users/signal_handlers.py"}]} | 1,577 | 288 |
gh_patches_debug_11004 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-1841 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Alpha channel and grayscale in image-to-text with -image_channel_size=3
For training image to text, the argument `-image_channel_size=3` imply that the images already have the good number of channel. However, some of my images are black and white and saved with only one channel or saved in RGB but with the alpha channel.
I could fix it with a change in `onmt/inputters/image_dataset.py` [here](https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/inputters/image_dataset.py#L78):
from this:
```
if self.channel_size == 1:
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
img = transforms.ToTensor()(Image.open(img_path))
```
to this:
```
if self.channel_size == 1:
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 1)))
```
The flag in `cv2.imread` with value of 1 tell cv2 to convert to RGB no matter what the original image is.
Should I do a PR ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onmt/inputters/image_dataset.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import os
4
5 import torch
6 from torchtext.data import Field
7
8 from onmt.inputters.datareader_base import DataReaderBase
9
10 # domain specific dependencies
11 try:
12 from PIL import Image
13 from torchvision import transforms
14 import cv2
15 except ImportError:
16 Image, transforms, cv2 = None, None, None
17
18
19 class ImageDataReader(DataReaderBase):
20 """Read image data from disk.
21
22 Args:
23 truncate (tuple[int] or NoneType): maximum img size. Use
24 ``(0,0)`` or ``None`` for unlimited.
25 channel_size (int): Number of channels per image.
26
27 Raises:
28 onmt.inputters.datareader_base.MissingDependencyException: If
29 importing any of ``PIL``, ``torchvision``, or ``cv2`` fail.
30 """
31
32 def __init__(self, truncate=None, channel_size=3):
33 self._check_deps()
34 self.truncate = truncate
35 self.channel_size = channel_size
36
37 @classmethod
38 def from_opt(cls, opt):
39 return cls(channel_size=opt.image_channel_size)
40
41 @classmethod
42 def _check_deps(cls):
43 if any([Image is None, transforms is None, cv2 is None]):
44 cls._raise_missing_dep(
45 "PIL", "torchvision", "cv2")
46
47 def read(self, images, side, img_dir=None):
48 """Read data into dicts.
49
50 Args:
51 images (str or Iterable[str]): Sequence of image paths or
52 path to file containing audio paths.
53 In either case, the filenames may be relative to ``src_dir``
54 (default behavior) or absolute.
55 side (str): Prefix used in return dict. Usually
56 ``"src"`` or ``"tgt"``.
57 img_dir (str): Location of source image files. See ``images``.
58
59 Yields:
60 a dictionary containing image data, path and index for each line.
61 """
62 if isinstance(images, str):
63 images = DataReaderBase._read_file(images)
64
65 for i, filename in enumerate(images):
66 filename = filename.decode("utf-8").strip()
67 img_path = os.path.join(img_dir, filename)
68 if not os.path.exists(img_path):
69 img_path = filename
70
71 assert os.path.exists(img_path), \
72 'img path %s not found' % filename
73
74 if self.channel_size == 1:
75 img = transforms.ToTensor()(
76 Image.fromarray(cv2.imread(img_path, 0)))
77 else:
78 img = transforms.ToTensor()(Image.open(img_path))
79 if self.truncate and self.truncate != (0, 0):
80 if not (img.size(1) <= self.truncate[0]
81 and img.size(2) <= self.truncate[1]):
82 continue
83 yield {side: img, side + '_path': filename, 'indices': i}
84
85
86 def img_sort_key(ex):
87 """Sort using the size of the image: (width, height)."""
88 return ex.src.size(2), ex.src.size(1)
89
90
91 def batch_img(data, vocab):
92 """Pad and batch a sequence of images."""
93 c = data[0].size(0)
94 h = max([t.size(1) for t in data])
95 w = max([t.size(2) for t in data])
96 imgs = torch.zeros(len(data), c, h, w).fill_(1)
97 for i, img in enumerate(data):
98 imgs[i, :, 0:img.size(1), 0:img.size(2)] = img
99 return imgs
100
101
102 def image_fields(**kwargs):
103 img = Field(
104 use_vocab=False, dtype=torch.float,
105 postprocessing=batch_img, sequential=False)
106 return img
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onmt/inputters/image_dataset.py b/onmt/inputters/image_dataset.py
--- a/onmt/inputters/image_dataset.py
+++ b/onmt/inputters/image_dataset.py
@@ -75,7 +75,8 @@
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
- img = transforms.ToTensor()(Image.open(img_path))
+ img = Image.open(img_path).convert('RGB')
+ img = transforms.ToTensor()(img)
if self.truncate and self.truncate != (0, 0):
if not (img.size(1) <= self.truncate[0]
and img.size(2) <= self.truncate[1]):
| {"golden_diff": "diff --git a/onmt/inputters/image_dataset.py b/onmt/inputters/image_dataset.py\n--- a/onmt/inputters/image_dataset.py\n+++ b/onmt/inputters/image_dataset.py\n@@ -75,7 +75,8 @@\n img = transforms.ToTensor()(\n Image.fromarray(cv2.imread(img_path, 0)))\n else:\n- img = transforms.ToTensor()(Image.open(img_path))\n+ img = Image.open(img_path).convert('RGB')\n+ img = transforms.ToTensor()(img)\n if self.truncate and self.truncate != (0, 0):\n if not (img.size(1) <= self.truncate[0]\n and img.size(2) <= self.truncate[1]):\n", "issue": "Alpha channel and grayscale in image-to-text with -image_channel_size=3\nFor training image to text, the argument `-image_channel_size=3` imply that the images already have the good number of channel. However, some of my images are black and white and saved with only one channel or saved in RGB but with the alpha channel.\r\nI could fix it with a change in `onmt/inputters/image_dataset.py` [here](https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/inputters/image_dataset.py#L78):\r\n\r\nfrom this:\r\n```\r\n if self.channel_size == 1:\r\n img = transforms.ToTensor()(\r\n Image.fromarray(cv2.imread(img_path, 0)))\r\n else:\r\n img = transforms.ToTensor()(Image.open(img_path))\r\n```\r\nto this:\r\n```\r\n if self.channel_size == 1:\r\n img = transforms.ToTensor()(\r\n Image.fromarray(cv2.imread(img_path, 0)))\r\n else:\r\n img = transforms.ToTensor()(\r\n Image.fromarray(cv2.imread(img_path, 1)))\r\n```\r\nThe flag in `cv2.imread` with value of 1 tell cv2 to convert to RGB no matter what the original image is.\r\n\r\nShould I do a PR ?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport torch\nfrom torchtext.data import Field\n\nfrom onmt.inputters.datareader_base import DataReaderBase\n\n# domain specific dependencies\ntry:\n from PIL import Image\n from torchvision import transforms\n import cv2\nexcept ImportError:\n Image, transforms, cv2 = None, None, None\n\n\nclass ImageDataReader(DataReaderBase):\n \"\"\"Read image data from disk.\n\n Args:\n truncate (tuple[int] or NoneType): maximum img size. Use\n ``(0,0)`` or ``None`` for unlimited.\n channel_size (int): Number of channels per image.\n\n Raises:\n onmt.inputters.datareader_base.MissingDependencyException: If\n importing any of ``PIL``, ``torchvision``, or ``cv2`` fail.\n \"\"\"\n\n def __init__(self, truncate=None, channel_size=3):\n self._check_deps()\n self.truncate = truncate\n self.channel_size = channel_size\n\n @classmethod\n def from_opt(cls, opt):\n return cls(channel_size=opt.image_channel_size)\n\n @classmethod\n def _check_deps(cls):\n if any([Image is None, transforms is None, cv2 is None]):\n cls._raise_missing_dep(\n \"PIL\", \"torchvision\", \"cv2\")\n\n def read(self, images, side, img_dir=None):\n \"\"\"Read data into dicts.\n\n Args:\n images (str or Iterable[str]): Sequence of image paths or\n path to file containing audio paths.\n In either case, the filenames may be relative to ``src_dir``\n (default behavior) or absolute.\n side (str): Prefix used in return dict. Usually\n ``\"src\"`` or ``\"tgt\"``.\n img_dir (str): Location of source image files. See ``images``.\n\n Yields:\n a dictionary containing image data, path and index for each line.\n \"\"\"\n if isinstance(images, str):\n images = DataReaderBase._read_file(images)\n\n for i, filename in enumerate(images):\n filename = filename.decode(\"utf-8\").strip()\n img_path = os.path.join(img_dir, filename)\n if not os.path.exists(img_path):\n img_path = filename\n\n assert os.path.exists(img_path), \\\n 'img path %s not found' % filename\n\n if self.channel_size == 1:\n img = transforms.ToTensor()(\n Image.fromarray(cv2.imread(img_path, 0)))\n else:\n img = transforms.ToTensor()(Image.open(img_path))\n if self.truncate and self.truncate != (0, 0):\n if not (img.size(1) <= self.truncate[0]\n and img.size(2) <= self.truncate[1]):\n continue\n yield {side: img, side + '_path': filename, 'indices': i}\n\n\ndef img_sort_key(ex):\n \"\"\"Sort using the size of the image: (width, height).\"\"\"\n return ex.src.size(2), ex.src.size(1)\n\n\ndef batch_img(data, vocab):\n \"\"\"Pad and batch a sequence of images.\"\"\"\n c = data[0].size(0)\n h = max([t.size(1) for t in data])\n w = max([t.size(2) for t in data])\n imgs = torch.zeros(len(data), c, h, w).fill_(1)\n for i, img in enumerate(data):\n imgs[i, :, 0:img.size(1), 0:img.size(2)] = img\n return imgs\n\n\ndef image_fields(**kwargs):\n img = Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=batch_img, sequential=False)\n return img\n", "path": "onmt/inputters/image_dataset.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport torch\nfrom torchtext.data import Field\n\nfrom onmt.inputters.datareader_base import DataReaderBase\n\n# domain specific dependencies\ntry:\n from PIL import Image\n from torchvision import transforms\n import cv2\nexcept ImportError:\n Image, transforms, cv2 = None, None, None\n\n\nclass ImageDataReader(DataReaderBase):\n \"\"\"Read image data from disk.\n\n Args:\n truncate (tuple[int] or NoneType): maximum img size. Use\n ``(0,0)`` or ``None`` for unlimited.\n channel_size (int): Number of channels per image.\n\n Raises:\n onmt.inputters.datareader_base.MissingDependencyException: If\n importing any of ``PIL``, ``torchvision``, or ``cv2`` fail.\n \"\"\"\n\n def __init__(self, truncate=None, channel_size=3):\n self._check_deps()\n self.truncate = truncate\n self.channel_size = channel_size\n\n @classmethod\n def from_opt(cls, opt):\n return cls(channel_size=opt.image_channel_size)\n\n @classmethod\n def _check_deps(cls):\n if any([Image is None, transforms is None, cv2 is None]):\n cls._raise_missing_dep(\n \"PIL\", \"torchvision\", \"cv2\")\n\n def read(self, images, side, img_dir=None):\n \"\"\"Read data into dicts.\n\n Args:\n images (str or Iterable[str]): Sequence of image paths or\n path to file containing audio paths.\n In either case, the filenames may be relative to ``src_dir``\n (default behavior) or absolute.\n side (str): Prefix used in return dict. Usually\n ``\"src\"`` or ``\"tgt\"``.\n img_dir (str): Location of source image files. See ``images``.\n\n Yields:\n a dictionary containing image data, path and index for each line.\n \"\"\"\n if isinstance(images, str):\n images = DataReaderBase._read_file(images)\n\n for i, filename in enumerate(images):\n filename = filename.decode(\"utf-8\").strip()\n img_path = os.path.join(img_dir, filename)\n if not os.path.exists(img_path):\n img_path = filename\n\n assert os.path.exists(img_path), \\\n 'img path %s not found' % filename\n\n if self.channel_size == 1:\n img = transforms.ToTensor()(\n Image.fromarray(cv2.imread(img_path, 0)))\n else:\n img = Image.open(img_path).convert('RGB')\n img = transforms.ToTensor()(img)\n if self.truncate and self.truncate != (0, 0):\n if not (img.size(1) <= self.truncate[0]\n and img.size(2) <= self.truncate[1]):\n continue\n yield {side: img, side + '_path': filename, 'indices': i}\n\n\ndef img_sort_key(ex):\n \"\"\"Sort using the size of the image: (width, height).\"\"\"\n return ex.src.size(2), ex.src.size(1)\n\n\ndef batch_img(data, vocab):\n \"\"\"Pad and batch a sequence of images.\"\"\"\n c = data[0].size(0)\n h = max([t.size(1) for t in data])\n w = max([t.size(2) for t in data])\n imgs = torch.zeros(len(data), c, h, w).fill_(1)\n for i, img in enumerate(data):\n imgs[i, :, 0:img.size(1), 0:img.size(2)] = img\n return imgs\n\n\ndef image_fields(**kwargs):\n img = Field(\n use_vocab=False, dtype=torch.float,\n postprocessing=batch_img, sequential=False)\n return img\n", "path": "onmt/inputters/image_dataset.py"}]} | 1,553 | 158 |
gh_patches_debug_39793 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2965 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider tijuanaflats is broken
During the global build at 2021-05-26-14-42-23, spider **tijuanaflats** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tijuanaflats.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tijuanaflats.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tijuanaflats.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/tijuanaflats.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import re
4
5 from locations.items import GeojsonPointItem
6
7
8 class TijuanaFlatsSpider(scrapy.Spider):
9 name = "tijuanaflats"
10 item_attributes = { 'brand': "Tijuana Flats" }
11 allowed_domains = ['tijuanaflats.com']
12 start_urls = (
13 'https://tijuanaflats.com/wpsl_stores-sitemap.xml',
14 )
15
16 def parse(self, response):
17 response.selector.remove_namespaces()
18 city_urls = response.xpath('//url/loc/text()').extract()
19 for path in city_urls:
20 yield scrapy.Request(
21 path.strip(),
22 callback=self.parse_store,
23 )
24
25 def parse_store(self, response):
26
27 if response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract():
28 storeHours = str(response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract())
29 storeHours = storeHours.replace('[','').replace(']','').replace("'",'').replace(',',' - ')
30 else:
31 storeHours = response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract()
32
33
34 properties = {
35 'name': response.xpath('//h1[@class="entry-title"]/text()').extract_first(),
36 'website': response.request.url,
37 'ref': response.xpath('//h1[@class="entry-title"]/text()').extract_first(),
38 'addr_full': response.xpath('//div[@class="wpsl-location-address"]/span[1]/text()').extract_first() + " " + response.xpath('//div[@class="wpsl-location-address"]/span[2]/text()').extract_first(),
39 'city': response.xpath('//div[@class="wpsl-location-address"]/span[3]/text()').extract_first().rstrip(', '),
40 'state': response.xpath('//div[@class="wpsl-location-address"]/span[4]/text()').extract_first().strip(),
41 'postcode': response.xpath('//div[@class="wpsl-location-address"]/span[5]/text()').extract_first().strip(),
42 'opening_hours': storeHours,
43 'lat': float(response.xpath('//script/text()').extract()[-3].split('"lat":"')[1].split('"')[0]),
44 'lon': float(response.xpath('//script/text()').extract()[-3].split('"lng":"')[1].split('"')[0]),
45 }
46
47 yield GeojsonPointItem(**properties)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/tijuanaflats.py b/locations/spiders/tijuanaflats.py
--- a/locations/spiders/tijuanaflats.py
+++ b/locations/spiders/tijuanaflats.py
@@ -1,47 +1,45 @@
# -*- coding: utf-8 -*-
+import json
+
import scrapy
-import re
from locations.items import GeojsonPointItem
class TijuanaFlatsSpider(scrapy.Spider):
name = "tijuanaflats"
- item_attributes = { 'brand': "Tijuana Flats" }
- allowed_domains = ['tijuanaflats.com']
- start_urls = (
- 'https://tijuanaflats.com/wpsl_stores-sitemap.xml',
- )
+ item_attributes = {"brand": "Tijuana Flats", "brand_wikidata": "Q7801833"}
+ allowed_domains = ["tijuanaflats.com"]
+ start_urls = ("https://www.tijuanaflats.com/locations",)
def parse(self, response):
- response.selector.remove_namespaces()
- city_urls = response.xpath('//url/loc/text()').extract()
- for path in city_urls:
- yield scrapy.Request(
- path.strip(),
- callback=self.parse_store,
+ data = json.loads(
+ response.xpath(
+ '//tjs-view-locations/attribute::*[name()=":locations"]'
+ ).extract_first()
+ )
+ for row in data:
+ for ent in row["yoast_json_ld"][0]["@graph"]:
+ if ent["@type"] == "WebPage" and row["slug"] in ent["url"]:
+ name = ent["name"]
+
+ # extract text from html snippet
+ hours_of_operation = scrapy.Selector(text=row["acf"]["hours_of_operation"])
+ opening_hours = "; ".join(
+ a.strip() for a in hours_of_operation.xpath("//text()").extract()
)
- def parse_store(self, response):
-
- if response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract():
- storeHours = str(response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract())
- storeHours = storeHours.replace('[','').replace(']','').replace("'",'').replace(',',' - ')
- else:
- storeHours = response.xpath('//table[@class="wpsl-opening-hours"]/tr').extract()
-
-
- properties = {
- 'name': response.xpath('//h1[@class="entry-title"]/text()').extract_first(),
- 'website': response.request.url,
- 'ref': response.xpath('//h1[@class="entry-title"]/text()').extract_first(),
- 'addr_full': response.xpath('//div[@class="wpsl-location-address"]/span[1]/text()').extract_first() + " " + response.xpath('//div[@class="wpsl-location-address"]/span[2]/text()').extract_first(),
- 'city': response.xpath('//div[@class="wpsl-location-address"]/span[3]/text()').extract_first().rstrip(', '),
- 'state': response.xpath('//div[@class="wpsl-location-address"]/span[4]/text()').extract_first().strip(),
- 'postcode': response.xpath('//div[@class="wpsl-location-address"]/span[5]/text()').extract_first().strip(),
- 'opening_hours': storeHours,
- 'lat': float(response.xpath('//script/text()').extract()[-3].split('"lat":"')[1].split('"')[0]),
- 'lon': float(response.xpath('//script/text()').extract()[-3].split('"lng":"')[1].split('"')[0]),
- }
-
- yield GeojsonPointItem(**properties)
\ No newline at end of file
+ properties = {
+ "ref": row["slug"],
+ "name": name,
+ "lat": row["acf"]["physical_location"]["lat"],
+ "lon": row["acf"]["physical_location"]["lng"],
+ "addr_full": row["acf"]["address_1"],
+ "city": row["acf"]["city"],
+ "state": row["acf"]["state"],
+ "postcode": row["acf"]["zip"],
+ "phone": row["acf"]["contact_phone"],
+ "website": f'https://www.tijuanaflats.com/locations/{row["slug"]}',
+ "opening_hours": opening_hours,
+ }
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/tijuanaflats.py b/locations/spiders/tijuanaflats.py\n--- a/locations/spiders/tijuanaflats.py\n+++ b/locations/spiders/tijuanaflats.py\n@@ -1,47 +1,45 @@\n # -*- coding: utf-8 -*-\n+import json\n+\n import scrapy\n-import re\n \n from locations.items import GeojsonPointItem\n \n \n class TijuanaFlatsSpider(scrapy.Spider):\n name = \"tijuanaflats\"\n- item_attributes = { 'brand': \"Tijuana Flats\" }\n- allowed_domains = ['tijuanaflats.com']\n- start_urls = (\n- 'https://tijuanaflats.com/wpsl_stores-sitemap.xml',\n- )\n+ item_attributes = {\"brand\": \"Tijuana Flats\", \"brand_wikidata\": \"Q7801833\"}\n+ allowed_domains = [\"tijuanaflats.com\"]\n+ start_urls = (\"https://www.tijuanaflats.com/locations\",)\n \n def parse(self, response):\n- response.selector.remove_namespaces()\n- city_urls = response.xpath('//url/loc/text()').extract()\n- for path in city_urls:\n- yield scrapy.Request(\n- path.strip(),\n- callback=self.parse_store,\n+ data = json.loads(\n+ response.xpath(\n+ '//tjs-view-locations/attribute::*[name()=\":locations\"]'\n+ ).extract_first()\n+ )\n+ for row in data:\n+ for ent in row[\"yoast_json_ld\"][0][\"@graph\"]:\n+ if ent[\"@type\"] == \"WebPage\" and row[\"slug\"] in ent[\"url\"]:\n+ name = ent[\"name\"]\n+\n+ # extract text from html snippet\n+ hours_of_operation = scrapy.Selector(text=row[\"acf\"][\"hours_of_operation\"])\n+ opening_hours = \"; \".join(\n+ a.strip() for a in hours_of_operation.xpath(\"//text()\").extract()\n )\n \n- def parse_store(self, response):\n-\n- if response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract():\n- storeHours = str(response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract())\n- storeHours = storeHours.replace('[','').replace(']','').replace(\"'\",'').replace(',',' - ')\n- else:\n- storeHours = response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract()\n-\n-\n- properties = {\n- 'name': response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first(),\n- 'website': response.request.url,\n- 'ref': response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first(),\n- 'addr_full': response.xpath('//div[@class=\"wpsl-location-address\"]/span[1]/text()').extract_first() + \" \" + response.xpath('//div[@class=\"wpsl-location-address\"]/span[2]/text()').extract_first(),\n- 'city': response.xpath('//div[@class=\"wpsl-location-address\"]/span[3]/text()').extract_first().rstrip(', '),\n- 'state': response.xpath('//div[@class=\"wpsl-location-address\"]/span[4]/text()').extract_first().strip(),\n- 'postcode': response.xpath('//div[@class=\"wpsl-location-address\"]/span[5]/text()').extract_first().strip(),\n- 'opening_hours': storeHours,\n- 'lat': float(response.xpath('//script/text()').extract()[-3].split('\"lat\":\"')[1].split('\"')[0]),\n- 'lon': float(response.xpath('//script/text()').extract()[-3].split('\"lng\":\"')[1].split('\"')[0]),\n- }\n-\n- yield GeojsonPointItem(**properties)\n\\ No newline at end of file\n+ properties = {\n+ \"ref\": row[\"slug\"],\n+ \"name\": name,\n+ \"lat\": row[\"acf\"][\"physical_location\"][\"lat\"],\n+ \"lon\": row[\"acf\"][\"physical_location\"][\"lng\"],\n+ \"addr_full\": row[\"acf\"][\"address_1\"],\n+ \"city\": row[\"acf\"][\"city\"],\n+ \"state\": row[\"acf\"][\"state\"],\n+ \"postcode\": row[\"acf\"][\"zip\"],\n+ \"phone\": row[\"acf\"][\"contact_phone\"],\n+ \"website\": f'https://www.tijuanaflats.com/locations/{row[\"slug\"]}',\n+ \"opening_hours\": opening_hours,\n+ }\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider tijuanaflats is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tijuanaflats** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tijuanaflats.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tijuanaflats.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tijuanaflats.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass TijuanaFlatsSpider(scrapy.Spider):\n name = \"tijuanaflats\"\n item_attributes = { 'brand': \"Tijuana Flats\" }\n allowed_domains = ['tijuanaflats.com']\n start_urls = (\n 'https://tijuanaflats.com/wpsl_stores-sitemap.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n\n if response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract():\n storeHours = str(response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract())\n storeHours = storeHours.replace('[','').replace(']','').replace(\"'\",'').replace(',',' - ')\n else:\n storeHours = response.xpath('//table[@class=\"wpsl-opening-hours\"]/tr').extract()\n\n\n properties = {\n 'name': response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first(),\n 'website': response.request.url,\n 'ref': response.xpath('//h1[@class=\"entry-title\"]/text()').extract_first(),\n 'addr_full': response.xpath('//div[@class=\"wpsl-location-address\"]/span[1]/text()').extract_first() + \" \" + response.xpath('//div[@class=\"wpsl-location-address\"]/span[2]/text()').extract_first(),\n 'city': response.xpath('//div[@class=\"wpsl-location-address\"]/span[3]/text()').extract_first().rstrip(', '),\n 'state': response.xpath('//div[@class=\"wpsl-location-address\"]/span[4]/text()').extract_first().strip(),\n 'postcode': response.xpath('//div[@class=\"wpsl-location-address\"]/span[5]/text()').extract_first().strip(),\n 'opening_hours': storeHours,\n 'lat': float(response.xpath('//script/text()').extract()[-3].split('\"lat\":\"')[1].split('\"')[0]),\n 'lon': float(response.xpath('//script/text()').extract()[-3].split('\"lng\":\"')[1].split('\"')[0]),\n }\n\n yield GeojsonPointItem(**properties)", "path": "locations/spiders/tijuanaflats.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass TijuanaFlatsSpider(scrapy.Spider):\n name = \"tijuanaflats\"\n item_attributes = {\"brand\": \"Tijuana Flats\", \"brand_wikidata\": \"Q7801833\"}\n allowed_domains = [\"tijuanaflats.com\"]\n start_urls = (\"https://www.tijuanaflats.com/locations\",)\n\n def parse(self, response):\n data = json.loads(\n response.xpath(\n '//tjs-view-locations/attribute::*[name()=\":locations\"]'\n ).extract_first()\n )\n for row in data:\n for ent in row[\"yoast_json_ld\"][0][\"@graph\"]:\n if ent[\"@type\"] == \"WebPage\" and row[\"slug\"] in ent[\"url\"]:\n name = ent[\"name\"]\n\n # extract text from html snippet\n hours_of_operation = scrapy.Selector(text=row[\"acf\"][\"hours_of_operation\"])\n opening_hours = \"; \".join(\n a.strip() for a in hours_of_operation.xpath(\"//text()\").extract()\n )\n\n properties = {\n \"ref\": row[\"slug\"],\n \"name\": name,\n \"lat\": row[\"acf\"][\"physical_location\"][\"lat\"],\n \"lon\": row[\"acf\"][\"physical_location\"][\"lng\"],\n \"addr_full\": row[\"acf\"][\"address_1\"],\n \"city\": row[\"acf\"][\"city\"],\n \"state\": row[\"acf\"][\"state\"],\n \"postcode\": row[\"acf\"][\"zip\"],\n \"phone\": row[\"acf\"][\"contact_phone\"],\n \"website\": f'https://www.tijuanaflats.com/locations/{row[\"slug\"]}',\n \"opening_hours\": opening_hours,\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tijuanaflats.py"}]} | 1,075 | 1,003 |
gh_patches_debug_836 | rasdani/github-patches | git_diff | lutris__lutris-2472 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't show Steam Linux Runtime when importing games
Link to the tool on steamdb: https://steamdb.info/app/1070560/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/services/steam.py`
Content:
```
1 """Steam service"""
2 import os
3 import re
4
5 from lutris import pga
6 from lutris.config import make_game_config_id, LutrisConfig
7 from lutris.util.steam.appmanifest import AppManifest, get_appmanifests
8 from lutris.util.steam.config import get_steamapps_paths
9 from lutris.services.service_game import ServiceGame
10
11 NAME = "Steam"
12 ICON = "steam"
13 ONLINE = False
14
15
16 class SteamGame(ServiceGame):
17 """ServiceGame for Steam games"""
18
19 store = "steam"
20 installer_slug = "steam"
21 excluded_appids = [
22 "228980", # Steamworks Common Redistributables
23 ]
24
25 @classmethod
26 def new_from_steam_game(cls, appmanifest, game_id=None):
27 """Return a Steam game instance from an AppManifest"""
28 steam_game = SteamGame()
29 steam_game.appid = str(appmanifest.steamid)
30 steam_game.game_id = game_id
31 steam_game.name = appmanifest.name
32 steam_game.slug = appmanifest.slug
33 steam_game.runner = appmanifest.get_runner_name()
34 return steam_game
35
36 @classmethod
37 def new_from_lutris_id(cls, game_id):
38 steam_game = SteamGame()
39 steam_game.game_id = game_id
40 return steam_game
41
42 @property
43 def config_id(self):
44 return make_game_config_id(self.slug)
45
46 @classmethod
47 def is_importable(cls, appmanifest):
48 """Return whether a Steam game should be imported"""
49 if not appmanifest.is_installed():
50 return False
51 if appmanifest.steamid in cls.excluded_appids:
52 return False
53 if re.match(r"^Proton \d*", appmanifest.name):
54 return False
55 return True
56
57 def install(self, updated_info=None):
58 """Add an installed game to the library
59
60 Params:
61 updated_info (dict): Optional dictonary containing existing data not to overwrite
62 """
63 if updated_info:
64 name = updated_info["name"]
65 slug = updated_info["slug"]
66 else:
67 name = self.name
68 slug = self.slug
69 self.game_id = pga.add_or_update(
70 id=self.game_id,
71 name=name,
72 runner=self.runner,
73 slug=slug,
74 steamid=int(self.appid),
75 installed=1,
76 configpath=self.config_id,
77 installer_slug=self.installer_slug,
78 )
79 self.create_config()
80 return self.game_id
81
82 def create_config(self):
83 """Create the game configuration for a Steam game"""
84 game_config = LutrisConfig(
85 runner_slug=self.runner, game_config_id=self.config_id
86 )
87 game_config.raw_game_config.update({"appid": self.appid})
88 game_config.save()
89
90
91 class SteamSyncer:
92 platform = "linux"
93
94 def __init__(self):
95 self._lutris_games = None
96 self._lutris_steamids = None
97
98 @property
99 def runner(self):
100 return "steam" if self.platform == "linux" else "winesteam"
101
102 @property
103 def lutris_games(self):
104 if not self._lutris_games:
105 self._lutris_games = pga.get_games_where(
106 steamid__isnull=False, steamid__not=""
107 )
108 return self._lutris_games
109
110 @property
111 def lutris_steamids(self):
112 if not self._lutris_steamids:
113 self._lutris_steamids = {str(game["steamid"]) for game in self.lutris_games}
114 return self._lutris_steamids
115
116 def load(self, force_reload=False):
117 """Return importable Steam games"""
118 games = []
119 steamapps_paths = get_steamapps_paths()
120 for steamapps_path in steamapps_paths[self.platform]:
121 for appmanifest_file in get_appmanifests(steamapps_path):
122 app_manifest = AppManifest(
123 os.path.join(steamapps_path, appmanifest_file)
124 )
125 if SteamGame.is_importable(app_manifest):
126 games.append(SteamGame.new_from_steam_game(app_manifest))
127 return games
128
129 def get_pga_game(self, game):
130 """Return a PGA game if one is found"""
131 for pga_game in self.lutris_games:
132 if (
133 str(pga_game["steamid"]) == game.appid
134 and (pga_game["runner"] == self.runner or not pga_game["runner"])
135 and not pga_game["installed"]
136 ):
137 return pga_game
138
139 def sync(self, games, full=False):
140 """Syncs Steam games to Lutris"""
141 available_ids = set() # Set of Steam appids seen while browsing AppManifests
142 added_games = []
143 for game in games:
144 steamid = game.appid
145 available_ids.add(steamid)
146 pga_game = self.get_pga_game(game)
147
148 if pga_game:
149 if (
150 steamid in self.lutris_steamids
151 and pga_game["installed"] != 1
152 and pga_game["installed"]
153 ):
154 added_games.append(game.install())
155
156 if steamid not in self.lutris_steamids:
157 added_games.append(game.install())
158 else:
159 if pga_game:
160 added_games.append(game.install(pga_game))
161
162 if not full:
163 return added_games, games
164
165 removed_games = []
166 unavailable_ids = self.lutris_steamids.difference(available_ids)
167 for steamid in unavailable_ids:
168 for pga_game in self.lutris_games:
169 if (
170 str(pga_game["steamid"]) == steamid
171 and pga_game["installed"]
172 and pga_game["runner"] == self.runner
173 ):
174 game = SteamGame.new_from_lutris_id(pga_game["id"])
175 game.uninstall()
176 removed_games.append(pga_game["id"])
177 return (added_games, removed_games)
178
179
180 SYNCER = SteamSyncer
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/services/steam.py b/lutris/services/steam.py
--- a/lutris/services/steam.py
+++ b/lutris/services/steam.py
@@ -20,6 +20,7 @@
installer_slug = "steam"
excluded_appids = [
"228980", # Steamworks Common Redistributables
+ "1070560", # Steam Linux Runtime
]
@classmethod
| {"golden_diff": "diff --git a/lutris/services/steam.py b/lutris/services/steam.py\n--- a/lutris/services/steam.py\n+++ b/lutris/services/steam.py\n@@ -20,6 +20,7 @@\n installer_slug = \"steam\"\n excluded_appids = [\n \"228980\", # Steamworks Common Redistributables\n+ \"1070560\", # Steam Linux Runtime\n ]\n \n @classmethod\n", "issue": "Don't show Steam Linux Runtime when importing games\nLink to the tool on steamdb: https://steamdb.info/app/1070560/\n", "before_files": [{"content": "\"\"\"Steam service\"\"\"\nimport os\nimport re\n\nfrom lutris import pga\nfrom lutris.config import make_game_config_id, LutrisConfig\nfrom lutris.util.steam.appmanifest import AppManifest, get_appmanifests\nfrom lutris.util.steam.config import get_steamapps_paths\nfrom lutris.services.service_game import ServiceGame\n\nNAME = \"Steam\"\nICON = \"steam\"\nONLINE = False\n\n\nclass SteamGame(ServiceGame):\n \"\"\"ServiceGame for Steam games\"\"\"\n\n store = \"steam\"\n installer_slug = \"steam\"\n excluded_appids = [\n \"228980\", # Steamworks Common Redistributables\n ]\n\n @classmethod\n def new_from_steam_game(cls, appmanifest, game_id=None):\n \"\"\"Return a Steam game instance from an AppManifest\"\"\"\n steam_game = SteamGame()\n steam_game.appid = str(appmanifest.steamid)\n steam_game.game_id = game_id\n steam_game.name = appmanifest.name\n steam_game.slug = appmanifest.slug\n steam_game.runner = appmanifest.get_runner_name()\n return steam_game\n\n @classmethod\n def new_from_lutris_id(cls, game_id):\n steam_game = SteamGame()\n steam_game.game_id = game_id\n return steam_game\n\n @property\n def config_id(self):\n return make_game_config_id(self.slug)\n\n @classmethod\n def is_importable(cls, appmanifest):\n \"\"\"Return whether a Steam game should be imported\"\"\"\n if not appmanifest.is_installed():\n return False\n if appmanifest.steamid in cls.excluded_appids:\n return False\n if re.match(r\"^Proton \\d*\", appmanifest.name):\n return False\n return True\n\n def install(self, updated_info=None):\n \"\"\"Add an installed game to the library\n\n Params:\n updated_info (dict): Optional dictonary containing existing data not to overwrite\n \"\"\"\n if updated_info:\n name = updated_info[\"name\"]\n slug = updated_info[\"slug\"]\n else:\n name = self.name\n slug = self.slug\n self.game_id = pga.add_or_update(\n id=self.game_id,\n name=name,\n runner=self.runner,\n slug=slug,\n steamid=int(self.appid),\n installed=1,\n configpath=self.config_id,\n installer_slug=self.installer_slug,\n )\n self.create_config()\n return self.game_id\n\n def create_config(self):\n \"\"\"Create the game configuration for a Steam game\"\"\"\n game_config = LutrisConfig(\n runner_slug=self.runner, game_config_id=self.config_id\n )\n game_config.raw_game_config.update({\"appid\": self.appid})\n game_config.save()\n\n\nclass SteamSyncer:\n platform = \"linux\"\n\n def __init__(self):\n self._lutris_games = None\n self._lutris_steamids = None\n\n @property\n def runner(self):\n return \"steam\" if self.platform == \"linux\" else \"winesteam\"\n\n @property\n def lutris_games(self):\n if not self._lutris_games:\n self._lutris_games = pga.get_games_where(\n steamid__isnull=False, steamid__not=\"\"\n )\n return self._lutris_games\n\n @property\n def lutris_steamids(self):\n if not self._lutris_steamids:\n self._lutris_steamids = {str(game[\"steamid\"]) for game in self.lutris_games}\n return self._lutris_steamids\n\n def load(self, force_reload=False):\n \"\"\"Return importable Steam games\"\"\"\n games = []\n steamapps_paths = get_steamapps_paths()\n for steamapps_path in steamapps_paths[self.platform]:\n for appmanifest_file in get_appmanifests(steamapps_path):\n app_manifest = AppManifest(\n os.path.join(steamapps_path, appmanifest_file)\n )\n if SteamGame.is_importable(app_manifest):\n games.append(SteamGame.new_from_steam_game(app_manifest))\n return games\n\n def get_pga_game(self, game):\n \"\"\"Return a PGA game if one is found\"\"\"\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == game.appid\n and (pga_game[\"runner\"] == self.runner or not pga_game[\"runner\"])\n and not pga_game[\"installed\"]\n ):\n return pga_game\n\n def sync(self, games, full=False):\n \"\"\"Syncs Steam games to Lutris\"\"\"\n available_ids = set() # Set of Steam appids seen while browsing AppManifests\n added_games = []\n for game in games:\n steamid = game.appid\n available_ids.add(steamid)\n pga_game = self.get_pga_game(game)\n\n if pga_game:\n if (\n steamid in self.lutris_steamids\n and pga_game[\"installed\"] != 1\n and pga_game[\"installed\"]\n ):\n added_games.append(game.install())\n\n if steamid not in self.lutris_steamids:\n added_games.append(game.install())\n else:\n if pga_game:\n added_games.append(game.install(pga_game))\n\n if not full:\n return added_games, games\n\n removed_games = []\n unavailable_ids = self.lutris_steamids.difference(available_ids)\n for steamid in unavailable_ids:\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == steamid\n and pga_game[\"installed\"]\n and pga_game[\"runner\"] == self.runner\n ):\n game = SteamGame.new_from_lutris_id(pga_game[\"id\"])\n game.uninstall()\n removed_games.append(pga_game[\"id\"])\n return (added_games, removed_games)\n\n\nSYNCER = SteamSyncer\n", "path": "lutris/services/steam.py"}], "after_files": [{"content": "\"\"\"Steam service\"\"\"\nimport os\nimport re\n\nfrom lutris import pga\nfrom lutris.config import make_game_config_id, LutrisConfig\nfrom lutris.util.steam.appmanifest import AppManifest, get_appmanifests\nfrom lutris.util.steam.config import get_steamapps_paths\nfrom lutris.services.service_game import ServiceGame\n\nNAME = \"Steam\"\nICON = \"steam\"\nONLINE = False\n\n\nclass SteamGame(ServiceGame):\n \"\"\"ServiceGame for Steam games\"\"\"\n\n store = \"steam\"\n installer_slug = \"steam\"\n excluded_appids = [\n \"228980\", # Steamworks Common Redistributables\n \"1070560\", # Steam Linux Runtime\n ]\n\n @classmethod\n def new_from_steam_game(cls, appmanifest, game_id=None):\n \"\"\"Return a Steam game instance from an AppManifest\"\"\"\n steam_game = SteamGame()\n steam_game.appid = str(appmanifest.steamid)\n steam_game.game_id = game_id\n steam_game.name = appmanifest.name\n steam_game.slug = appmanifest.slug\n steam_game.runner = appmanifest.get_runner_name()\n return steam_game\n\n @classmethod\n def new_from_lutris_id(cls, game_id):\n steam_game = SteamGame()\n steam_game.game_id = game_id\n return steam_game\n\n @property\n def config_id(self):\n return make_game_config_id(self.slug)\n\n @classmethod\n def is_importable(cls, appmanifest):\n \"\"\"Return whether a Steam game should be imported\"\"\"\n if not appmanifest.is_installed():\n return False\n if appmanifest.steamid in cls.excluded_appids:\n return False\n if re.match(r\"^Proton \\d*\", appmanifest.name):\n return False\n return True\n\n def install(self, updated_info=None):\n \"\"\"Add an installed game to the library\n\n Params:\n updated_info (dict): Optional dictonary containing existing data not to overwrite\n \"\"\"\n if updated_info:\n name = updated_info[\"name\"]\n slug = updated_info[\"slug\"]\n else:\n name = self.name\n slug = self.slug\n self.game_id = pga.add_or_update(\n id=self.game_id,\n name=name,\n runner=self.runner,\n slug=slug,\n steamid=int(self.appid),\n installed=1,\n configpath=self.config_id,\n installer_slug=self.installer_slug,\n )\n self.create_config()\n return self.game_id\n\n def create_config(self):\n \"\"\"Create the game configuration for a Steam game\"\"\"\n game_config = LutrisConfig(\n runner_slug=self.runner, game_config_id=self.config_id\n )\n game_config.raw_game_config.update({\"appid\": self.appid})\n game_config.save()\n\n\nclass SteamSyncer:\n platform = \"linux\"\n\n def __init__(self):\n self._lutris_games = None\n self._lutris_steamids = None\n\n @property\n def runner(self):\n return \"steam\" if self.platform == \"linux\" else \"winesteam\"\n\n @property\n def lutris_games(self):\n if not self._lutris_games:\n self._lutris_games = pga.get_games_where(\n steamid__isnull=False, steamid__not=\"\"\n )\n return self._lutris_games\n\n @property\n def lutris_steamids(self):\n if not self._lutris_steamids:\n self._lutris_steamids = {str(game[\"steamid\"]) for game in self.lutris_games}\n return self._lutris_steamids\n\n def load(self, force_reload=False):\n \"\"\"Return importable Steam games\"\"\"\n games = []\n steamapps_paths = get_steamapps_paths()\n for steamapps_path in steamapps_paths[self.platform]:\n for appmanifest_file in get_appmanifests(steamapps_path):\n app_manifest = AppManifest(\n os.path.join(steamapps_path, appmanifest_file)\n )\n if SteamGame.is_importable(app_manifest):\n games.append(SteamGame.new_from_steam_game(app_manifest))\n return games\n\n def get_pga_game(self, game):\n \"\"\"Return a PGA game if one is found\"\"\"\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == game.appid\n and (pga_game[\"runner\"] == self.runner or not pga_game[\"runner\"])\n and not pga_game[\"installed\"]\n ):\n return pga_game\n\n def sync(self, games, full=False):\n \"\"\"Syncs Steam games to Lutris\"\"\"\n available_ids = set() # Set of Steam appids seen while browsing AppManifests\n added_games = []\n for game in games:\n steamid = game.appid\n available_ids.add(steamid)\n pga_game = self.get_pga_game(game)\n\n if pga_game:\n if (\n steamid in self.lutris_steamids\n and pga_game[\"installed\"] != 1\n and pga_game[\"installed\"]\n ):\n added_games.append(game.install())\n\n if steamid not in self.lutris_steamids:\n added_games.append(game.install())\n else:\n if pga_game:\n added_games.append(game.install(pga_game))\n\n if not full:\n return added_games, games\n\n removed_games = []\n unavailable_ids = self.lutris_steamids.difference(available_ids)\n for steamid in unavailable_ids:\n for pga_game in self.lutris_games:\n if (\n str(pga_game[\"steamid\"]) == steamid\n and pga_game[\"installed\"]\n and pga_game[\"runner\"] == self.runner\n ):\n game = SteamGame.new_from_lutris_id(pga_game[\"id\"])\n game.uninstall()\n removed_games.append(pga_game[\"id\"])\n return (added_games, removed_games)\n\n\nSYNCER = SteamSyncer\n", "path": "lutris/services/steam.py"}]} | 2,020 | 107 |
gh_patches_debug_7645 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3327 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/context/random/__init__.py`
Content:
```
1 from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states,
2 sync_states, moe_set_seed, reset_seeds)
3
4 __all__ = [
5 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',
6 'sync_states', 'moe_set_seed', 'reset_seeds'
7 ]
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/colossalai/context/random/__init__.py b/colossalai/context/random/__init__.py
--- a/colossalai/context/random/__init__.py
+++ b/colossalai/context/random/__init__.py
@@ -1,5 +1,16 @@
-from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states,
- sync_states, moe_set_seed, reset_seeds)
+from ._helper import (
+ add_seed,
+ get_current_mode,
+ get_seeds,
+ get_states,
+ moe_set_seed,
+ reset_seeds,
+ seed,
+ set_mode,
+ set_seed_states,
+ sync_states,
+ with_seed,
+)
__all__ = [
'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',
| {"golden_diff": "diff --git a/colossalai/context/random/__init__.py b/colossalai/context/random/__init__.py\n--- a/colossalai/context/random/__init__.py\n+++ b/colossalai/context/random/__init__.py\n@@ -1,5 +1,16 @@\n-from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states,\n- sync_states, moe_set_seed, reset_seeds)\n+from ._helper import (\n+ add_seed,\n+ get_current_mode,\n+ get_seeds,\n+ get_states,\n+ moe_set_seed,\n+ reset_seeds,\n+ seed,\n+ set_mode,\n+ set_seed_states,\n+ sync_states,\n+ with_seed,\n+)\n \n __all__ = [\n 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states,\n sync_states, moe_set_seed, reset_seeds)\n\n__all__ = [\n 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',\n 'sync_states', 'moe_set_seed', 'reset_seeds'\n]\n", "path": "colossalai/context/random/__init__.py"}], "after_files": [{"content": "from ._helper import (\n add_seed,\n get_current_mode,\n get_seeds,\n get_states,\n moe_set_seed,\n reset_seeds,\n seed,\n set_mode,\n set_seed_states,\n sync_states,\n with_seed,\n)\n\n__all__ = [\n 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states',\n 'sync_states', 'moe_set_seed', 'reset_seeds'\n]\n", "path": "colossalai/context/random/__init__.py"}]} | 397 | 217 |
gh_patches_debug_1703 | rasdani/github-patches | git_diff | unionai-oss__pandera-1591 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error Importing Pandera with Polars extra
**Describe the bug**
I get an error when importing pandera after installing the latest 0.19.0b2 version with the polars extra in a clean environment. I can import it successfully if I install without the polars extra.
- [x] I have checked that this issue has not already been reported.
- [x] I have confirmed this bug exists on the latest version of pandera.
- [ ] (optional) I have confirmed this bug exists on the main branch of pandera.
#### Code Sample, a copy-pastable example
I installed pandera 0.19.0b2 in a clean virtual environment using `pip install pandera[polars]==0.19.0b2` and attempted to import pandera:
```python
import pandera as pa
```
I got the following error message:
```
>>> import pandera as pa
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".venv/lib/python3.11/site-packages/pandera/__init__.py", line 6, in <module>
from pandera import errors, external_config, typing
File ".venv/lib/python3.11/site-packages/pandera/external_config.py", line 23, in <module>
import pyspark.pandas
ModuleNotFoundError: No module named 'pyspark'
```
#### Versions:
- Pandera: 0.19.0b2
- Python: 3.11.7
- Ubuntu: 22.04
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandera/external_config.py`
Content:
```
1 """Configuration for external packages."""
2
3 import os
4
5 is_spark_local_ip_dirty = False
6 is_pyarrow_ignore_timezone_dirty = False
7
8 try:
9 # try importing pyspark to see if it exists. This is important because the
10 # pandera.typing module defines a Series type that inherits from
11 # pandas.Series, and pyspark v1+ injects a __getitem__ method to pandas
12 # Series and DataFrames to support type hinting:
13 # https://spark.apache.org/docs/3.2.0/api/python/user_guide/pandas_on_spark/typehints.html#type-hinting-with-names
14 # pylint: disable=unused-import
15 if os.getenv("SPARK_LOCAL_IP") is None:
16 is_spark_local_ip_dirty = True
17 os.environ["SPARK_LOCAL_IP"] = "127.0.0.1"
18 if os.getenv("PYARROW_IGNORE_TIMEZONE") is None:
19 is_pyarrow_ignore_timezone_dirty = True
20 # This can be overriden by the user
21 os.environ["PYARROW_IGNORE_TIMEZONE"] = "1"
22
23 import pyspark.pandas
24 finally:
25 if is_spark_local_ip_dirty:
26 os.environ.pop("SPARK_LOCAL_IP")
27 if is_pyarrow_ignore_timezone_dirty:
28 os.environ.pop("PYARROW_IGNORE_TIMEZONE")
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandera/external_config.py b/pandera/external_config.py
--- a/pandera/external_config.py
+++ b/pandera/external_config.py
@@ -21,6 +21,8 @@
os.environ["PYARROW_IGNORE_TIMEZONE"] = "1"
import pyspark.pandas
+except (ImportError, ModuleNotFoundError):
+ pass
finally:
if is_spark_local_ip_dirty:
os.environ.pop("SPARK_LOCAL_IP")
| {"golden_diff": "diff --git a/pandera/external_config.py b/pandera/external_config.py\n--- a/pandera/external_config.py\n+++ b/pandera/external_config.py\n@@ -21,6 +21,8 @@\n os.environ[\"PYARROW_IGNORE_TIMEZONE\"] = \"1\"\n \n import pyspark.pandas\n+except (ImportError, ModuleNotFoundError):\n+ pass\n finally:\n if is_spark_local_ip_dirty:\n os.environ.pop(\"SPARK_LOCAL_IP\")\n", "issue": "Error Importing Pandera with Polars extra\n**Describe the bug**\r\nI get an error when importing pandera after installing the latest 0.19.0b2 version with the polars extra in a clean environment. I can import it successfully if I install without the polars extra.\r\n\r\n- [x] I have checked that this issue has not already been reported.\r\n- [x] I have confirmed this bug exists on the latest version of pandera.\r\n- [ ] (optional) I have confirmed this bug exists on the main branch of pandera.\r\n\r\n#### Code Sample, a copy-pastable example\r\n\r\nI installed pandera 0.19.0b2 in a clean virtual environment using `pip install pandera[polars]==0.19.0b2` and attempted to import pandera:\r\n\r\n```python\r\nimport pandera as pa\r\n```\r\n\r\nI got the following error message:\r\n```\r\n>>> import pandera as pa\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \".venv/lib/python3.11/site-packages/pandera/__init__.py\", line 6, in <module>\r\n from pandera import errors, external_config, typing\r\n File \".venv/lib/python3.11/site-packages/pandera/external_config.py\", line 23, in <module>\r\n import pyspark.pandas\r\nModuleNotFoundError: No module named 'pyspark'\r\n```\r\n\r\n#### Versions:\r\n\r\n - Pandera: 0.19.0b2\r\n - Python: 3.11.7\r\n - Ubuntu: 22.04\r\n\n", "before_files": [{"content": "\"\"\"Configuration for external packages.\"\"\"\n\nimport os\n\nis_spark_local_ip_dirty = False\nis_pyarrow_ignore_timezone_dirty = False\n\ntry:\n # try importing pyspark to see if it exists. This is important because the\n # pandera.typing module defines a Series type that inherits from\n # pandas.Series, and pyspark v1+ injects a __getitem__ method to pandas\n # Series and DataFrames to support type hinting:\n # https://spark.apache.org/docs/3.2.0/api/python/user_guide/pandas_on_spark/typehints.html#type-hinting-with-names\n # pylint: disable=unused-import\n if os.getenv(\"SPARK_LOCAL_IP\") is None:\n is_spark_local_ip_dirty = True\n os.environ[\"SPARK_LOCAL_IP\"] = \"127.0.0.1\"\n if os.getenv(\"PYARROW_IGNORE_TIMEZONE\") is None:\n is_pyarrow_ignore_timezone_dirty = True\n # This can be overriden by the user\n os.environ[\"PYARROW_IGNORE_TIMEZONE\"] = \"1\"\n\n import pyspark.pandas\nfinally:\n if is_spark_local_ip_dirty:\n os.environ.pop(\"SPARK_LOCAL_IP\")\n if is_pyarrow_ignore_timezone_dirty:\n os.environ.pop(\"PYARROW_IGNORE_TIMEZONE\")\n", "path": "pandera/external_config.py"}], "after_files": [{"content": "\"\"\"Configuration for external packages.\"\"\"\n\nimport os\n\nis_spark_local_ip_dirty = False\nis_pyarrow_ignore_timezone_dirty = False\n\ntry:\n # try importing pyspark to see if it exists. This is important because the\n # pandera.typing module defines a Series type that inherits from\n # pandas.Series, and pyspark v1+ injects a __getitem__ method to pandas\n # Series and DataFrames to support type hinting:\n # https://spark.apache.org/docs/3.2.0/api/python/user_guide/pandas_on_spark/typehints.html#type-hinting-with-names\n # pylint: disable=unused-import\n if os.getenv(\"SPARK_LOCAL_IP\") is None:\n is_spark_local_ip_dirty = True\n os.environ[\"SPARK_LOCAL_IP\"] = \"127.0.0.1\"\n if os.getenv(\"PYARROW_IGNORE_TIMEZONE\") is None:\n is_pyarrow_ignore_timezone_dirty = True\n # This can be overriden by the user\n os.environ[\"PYARROW_IGNORE_TIMEZONE\"] = \"1\"\n\n import pyspark.pandas\nexcept (ImportError, ModuleNotFoundError):\n pass\nfinally:\n if is_spark_local_ip_dirty:\n os.environ.pop(\"SPARK_LOCAL_IP\")\n if is_pyarrow_ignore_timezone_dirty:\n os.environ.pop(\"PYARROW_IGNORE_TIMEZONE\")\n", "path": "pandera/external_config.py"}]} | 945 | 109 |
gh_patches_debug_22260 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1392 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E0002 bug when using parameters for DynamoDB AttributeDefinitions
*cfn-lint version: 0.28.2*
*Description of issue.*
Rule E3039 (added in 0.28.0) doesn't support Refs and results in a E0002 error for the template.
Repeatable with this template snippet:
```
AWSTemplateFormatVersion: '2010-09-09'
Parameters:
HashKeyName:
Description: Primary Key Name
Type: String
AllowedPattern: '[a-zA-Z0-9]*'
MinLength: '1'
MaxLength: '2048'
ConstraintDescription: must contain only alphanumberic characters
HashKeyType:
Description: Primary Key Type
Type: String
Default: S
AllowedPattern: '[S|N]'
MinLength: '1'
MaxLength: '1'
ConstraintDescription: must be either S or N
RangeKeyName:
Description: Sort Key Name
Type: String
Default: 'NA'
AllowedPattern: '[a-zA-Z0-9]*'
MinLength: '0'
MaxLength: '2048'
ConstraintDescription: must contain only alphanumberic characters
RangeKeyType:
Description: Sort Key Type
Type: String
Default: S
AllowedPattern: '[S|N]'
MinLength: '0'
MaxLength: '1'
ConstraintDescription: must be either S or Ns
Conditions:
isRangeKeyAvailable: !Not [ !Equals [ !Ref RangeKeyName, 'NA' ] ]
Resources:
DynamoDBTable:
DeletionPolicy: Delete
UpdateReplacePolicy: Delete
Type: AWS::DynamoDB::Table
Properties:
AttributeDefinitions: !If
- isRangeKeyAvailable
- - AttributeName: !Ref HashKeyName
AttributeType: !Ref HashKeyType
- AttributeName: !Ref RangeKeyName
AttributeType: !Ref RangeKeyType
- - AttributeName: !Ref HashKeyName
AttributeType: !Ref HashKeyType
KeySchema: !If
- isRangeKeyAvailable
- - AttributeName: !Ref HashKeyName
KeyType: HASH
- AttributeName: !Ref RangeKeyName
KeyType: RANGE
- - AttributeName: !Ref HashKeyName
KeyType: HASH
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import six
6 from cfnlint.decode.node import list_node
7 from cfnlint.rules import CloudFormationLintRule
8 from cfnlint.rules import RuleMatch
9
10
11 class AttributeMismatch(CloudFormationLintRule):
12 """Check DynamoDB Attributes"""
13 id = 'E3039'
14 shortdesc = 'AttributeDefinitions / KeySchemas mismatch'
15 description = 'Verify the set of Attributes in AttributeDefinitions and KeySchemas match'
16 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html'
17 tags = ['resources', 'dynamodb']
18
19 def __init__(self):
20 """Init"""
21 super(AttributeMismatch, self).__init__()
22 self.resource_property_types = ['AWS::DynamoDB::Table']
23
24 def _get_key_schema_attributes(self, key_schemas_sets):
25 """ Get Key Schema attributes """
26 keys = set()
27
28 for properties, _ in key_schemas_sets:
29 for key in properties:
30 attribute_name = key.get_safe('AttributeName', type_t=six.string_types)
31 if attribute_name:
32 keys.add(key.get('AttributeName'))
33 return keys
34
35 def _get_attribute_secondary(self, property_sets):
36 """ Get the key schemas from secondary indexes """
37 keys = set()
38
39 for properties, _ in property_sets:
40 for index in properties:
41 keys = keys.union(
42 self._get_key_schema_attributes(
43 index.get_safe('KeySchema', list_node([], None, None), [], list)
44 )
45 )
46
47 return keys
48
49 def check_property_set(self, property_set, path):
50 """ Check a property set """
51 matches = []
52 properties = property_set.get('Object')
53
54 keys = set()
55 attributes = set()
56
57 for attribute in properties.get('AttributeDefinitions', []):
58 attribute_name = attribute.get('AttributeName')
59 if isinstance(attribute_name, six.string_types):
60 attributes.add(attribute.get('AttributeName'))
61 else:
62 self.logger.info('attribute definitions is not using just strings')
63 return matches
64 keys = keys.union(
65 self._get_key_schema_attributes(
66 properties.get_safe('KeySchema', list_node([], None, None), [], list)
67 )
68 )
69 keys = keys.union(self._get_attribute_secondary(
70 properties.get_safe('GlobalSecondaryIndexes', list_node([], None, None), path, list
71 ))) # pylint: disable=bad-continuation
72 keys = keys.union(self._get_attribute_secondary(
73 properties.get_safe('LocalSecondaryIndexes', list_node([], None, None), path, list
74 ))) # pylint: disable=bad-continuation
75
76 if attributes != keys:
77 message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'
78 matches.append(RuleMatch(
79 path,
80 message.format(sorted(list(attributes)), sorted(list(keys)), '/'.join(map(str, path)))
81 ))
82
83 return matches
84
85 def check(self, properties, path, cfn):
86 """Check itself"""
87 matches = []
88
89 property_sets = cfn.get_object_without_conditions(properties, path)
90 for property_set in property_sets:
91 matches.extend(self.check_property_set(property_set, path))
92 return matches
93
94 def match_resource_properties(self, properties, _, path, cfn):
95 """Match for sub properties"""
96 matches = []
97 matches.extend(self.check(properties, path, cfn))
98 return matches
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py b/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py
--- a/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py
+++ b/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py
@@ -77,7 +77,8 @@
message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'
matches.append(RuleMatch(
path,
- message.format(sorted(list(attributes)), sorted(list(keys)), '/'.join(map(str, path)))
+ message.format(sorted(list(attributes)), sorted(
+ list(keys)), '/'.join(map(str, path)))
))
return matches
@@ -86,7 +87,8 @@
"""Check itself"""
matches = []
- property_sets = cfn.get_object_without_conditions(properties, path)
+ property_sets = cfn.get_object_without_conditions(
+ properties, ['AttributeDefinitions', 'KeySchema', 'GlobalSecondaryIndexes', 'LocalSecondaryIndexes'])
for property_set in property_sets:
matches.extend(self.check_property_set(property_set, path))
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py b/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py\n--- a/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py\n+++ b/src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py\n@@ -77,7 +77,8 @@\n message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'\n matches.append(RuleMatch(\n path,\n- message.format(sorted(list(attributes)), sorted(list(keys)), '/'.join(map(str, path)))\n+ message.format(sorted(list(attributes)), sorted(\n+ list(keys)), '/'.join(map(str, path)))\n ))\n \n return matches\n@@ -86,7 +87,8 @@\n \"\"\"Check itself\"\"\"\n matches = []\n \n- property_sets = cfn.get_object_without_conditions(properties, path)\n+ property_sets = cfn.get_object_without_conditions(\n+ properties, ['AttributeDefinitions', 'KeySchema', 'GlobalSecondaryIndexes', 'LocalSecondaryIndexes'])\n for property_set in property_sets:\n matches.extend(self.check_property_set(property_set, path))\n return matches\n", "issue": "E0002 bug when using parameters for DynamoDB AttributeDefinitions\n*cfn-lint version: 0.28.2*\r\n\r\n*Description of issue.*\r\n\r\nRule E3039 (added in 0.28.0) doesn't support Refs and results in a E0002 error for the template. \r\n\r\nRepeatable with this template snippet:\r\n\r\n```\r\nAWSTemplateFormatVersion: '2010-09-09'\r\n\r\nParameters:\r\n HashKeyName:\r\n Description: Primary Key Name\r\n Type: String\r\n AllowedPattern: '[a-zA-Z0-9]*'\r\n MinLength: '1'\r\n MaxLength: '2048'\r\n ConstraintDescription: must contain only alphanumberic characters\r\n\r\n HashKeyType:\r\n Description: Primary Key Type\r\n Type: String\r\n Default: S\r\n AllowedPattern: '[S|N]'\r\n MinLength: '1'\r\n MaxLength: '1'\r\n ConstraintDescription: must be either S or N\r\n\r\n RangeKeyName:\r\n Description: Sort Key Name\r\n Type: String\r\n Default: 'NA'\r\n AllowedPattern: '[a-zA-Z0-9]*'\r\n MinLength: '0'\r\n MaxLength: '2048'\r\n ConstraintDescription: must contain only alphanumberic characters\r\n\r\n RangeKeyType:\r\n Description: Sort Key Type\r\n Type: String\r\n Default: S\r\n AllowedPattern: '[S|N]'\r\n MinLength: '0'\r\n MaxLength: '1'\r\n ConstraintDescription: must be either S or Ns\r\n\r\nConditions:\r\n isRangeKeyAvailable: !Not [ !Equals [ !Ref RangeKeyName, 'NA' ] ]\r\n\r\nResources:\r\n DynamoDBTable:\r\n DeletionPolicy: Delete\r\n UpdateReplacePolicy: Delete\r\n Type: AWS::DynamoDB::Table\r\n Properties:\r\n AttributeDefinitions: !If\r\n - isRangeKeyAvailable\r\n - - AttributeName: !Ref HashKeyName\r\n AttributeType: !Ref HashKeyType\r\n - AttributeName: !Ref RangeKeyName\r\n AttributeType: !Ref RangeKeyType\r\n - - AttributeName: !Ref HashKeyName\r\n AttributeType: !Ref HashKeyType\r\n KeySchema: !If\r\n - isRangeKeyAvailable\r\n - - AttributeName: !Ref HashKeyName\r\n KeyType: HASH\r\n - AttributeName: !Ref RangeKeyName\r\n KeyType: RANGE\r\n - - AttributeName: !Ref HashKeyName\r\n KeyType: HASH\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.decode.node import list_node\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass AttributeMismatch(CloudFormationLintRule):\n \"\"\"Check DynamoDB Attributes\"\"\"\n id = 'E3039'\n shortdesc = 'AttributeDefinitions / KeySchemas mismatch'\n description = 'Verify the set of Attributes in AttributeDefinitions and KeySchemas match'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html'\n tags = ['resources', 'dynamodb']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(AttributeMismatch, self).__init__()\n self.resource_property_types = ['AWS::DynamoDB::Table']\n\n def _get_key_schema_attributes(self, key_schemas_sets):\n \"\"\" Get Key Schema attributes \"\"\"\n keys = set()\n\n for properties, _ in key_schemas_sets:\n for key in properties:\n attribute_name = key.get_safe('AttributeName', type_t=six.string_types)\n if attribute_name:\n keys.add(key.get('AttributeName'))\n return keys\n\n def _get_attribute_secondary(self, property_sets):\n \"\"\" Get the key schemas from secondary indexes \"\"\"\n keys = set()\n\n for properties, _ in property_sets:\n for index in properties:\n keys = keys.union(\n self._get_key_schema_attributes(\n index.get_safe('KeySchema', list_node([], None, None), [], list)\n )\n )\n\n return keys\n\n def check_property_set(self, property_set, path):\n \"\"\" Check a property set \"\"\"\n matches = []\n properties = property_set.get('Object')\n\n keys = set()\n attributes = set()\n\n for attribute in properties.get('AttributeDefinitions', []):\n attribute_name = attribute.get('AttributeName')\n if isinstance(attribute_name, six.string_types):\n attributes.add(attribute.get('AttributeName'))\n else:\n self.logger.info('attribute definitions is not using just strings')\n return matches\n keys = keys.union(\n self._get_key_schema_attributes(\n properties.get_safe('KeySchema', list_node([], None, None), [], list)\n )\n )\n keys = keys.union(self._get_attribute_secondary(\n properties.get_safe('GlobalSecondaryIndexes', list_node([], None, None), path, list\n ))) # pylint: disable=bad-continuation\n keys = keys.union(self._get_attribute_secondary(\n properties.get_safe('LocalSecondaryIndexes', list_node([], None, None), path, list\n ))) # pylint: disable=bad-continuation\n\n if attributes != keys:\n message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'\n matches.append(RuleMatch(\n path,\n message.format(sorted(list(attributes)), sorted(list(keys)), '/'.join(map(str, path)))\n ))\n\n return matches\n\n def check(self, properties, path, cfn):\n \"\"\"Check itself\"\"\"\n matches = []\n\n property_sets = cfn.get_object_without_conditions(properties, path)\n for property_set in property_sets:\n matches.extend(self.check_property_set(property_set, path))\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n matches.extend(self.check(properties, path, cfn))\n return matches\n", "path": "src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py"}], "after_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.decode.node import list_node\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass AttributeMismatch(CloudFormationLintRule):\n \"\"\"Check DynamoDB Attributes\"\"\"\n id = 'E3039'\n shortdesc = 'AttributeDefinitions / KeySchemas mismatch'\n description = 'Verify the set of Attributes in AttributeDefinitions and KeySchemas match'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html'\n tags = ['resources', 'dynamodb']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(AttributeMismatch, self).__init__()\n self.resource_property_types = ['AWS::DynamoDB::Table']\n\n def _get_key_schema_attributes(self, key_schemas_sets):\n \"\"\" Get Key Schema attributes \"\"\"\n keys = set()\n\n for properties, _ in key_schemas_sets:\n for key in properties:\n attribute_name = key.get_safe('AttributeName', type_t=six.string_types)\n if attribute_name:\n keys.add(key.get('AttributeName'))\n return keys\n\n def _get_attribute_secondary(self, property_sets):\n \"\"\" Get the key schemas from secondary indexes \"\"\"\n keys = set()\n\n for properties, _ in property_sets:\n for index in properties:\n keys = keys.union(\n self._get_key_schema_attributes(\n index.get_safe('KeySchema', list_node([], None, None), [], list)\n )\n )\n\n return keys\n\n def check_property_set(self, property_set, path):\n \"\"\" Check a property set \"\"\"\n matches = []\n properties = property_set.get('Object')\n\n keys = set()\n attributes = set()\n\n for attribute in properties.get('AttributeDefinitions', []):\n attribute_name = attribute.get('AttributeName')\n if isinstance(attribute_name, six.string_types):\n attributes.add(attribute.get('AttributeName'))\n else:\n self.logger.info('attribute definitions is not using just strings')\n return matches\n keys = keys.union(\n self._get_key_schema_attributes(\n properties.get_safe('KeySchema', list_node([], None, None), [], list)\n )\n )\n keys = keys.union(self._get_attribute_secondary(\n properties.get_safe('GlobalSecondaryIndexes', list_node([], None, None), path, list\n ))) # pylint: disable=bad-continuation\n keys = keys.union(self._get_attribute_secondary(\n properties.get_safe('LocalSecondaryIndexes', list_node([], None, None), path, list\n ))) # pylint: disable=bad-continuation\n\n if attributes != keys:\n message = 'The set of Attributes in AttributeDefinitions: {0} and KeySchemas: {1} must match at {2}'\n matches.append(RuleMatch(\n path,\n message.format(sorted(list(attributes)), sorted(\n list(keys)), '/'.join(map(str, path)))\n ))\n\n return matches\n\n def check(self, properties, path, cfn):\n \"\"\"Check itself\"\"\"\n matches = []\n\n property_sets = cfn.get_object_without_conditions(\n properties, ['AttributeDefinitions', 'KeySchema', 'GlobalSecondaryIndexes', 'LocalSecondaryIndexes'])\n for property_set in property_sets:\n matches.extend(self.check_property_set(property_set, path))\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n matches.extend(self.check(properties, path, cfn))\n return matches\n", "path": "src/cfnlint/rules/resources/dynamodb/AttributeMismatch.py"}]} | 1,753 | 256 |
gh_patches_debug_5432 | rasdani/github-patches | git_diff | lhotse-speech__lhotse-240 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cut concatenate doesn't consider the first sample in each batch
Found in #234
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lhotse/dataset/cut_transforms/concatenate.py`
Content:
```
1 from typing import Optional, Sequence
2
3 from lhotse import CutSet
4 from lhotse.cut import AnyCut
5 from lhotse.utils import Seconds
6
7
8 class CutConcatenate:
9 """
10 A transform on batch of cuts (``CutSet``) that concatenates the cuts to minimize the total amount of padding;
11 e.g. instead of creating a batch with 40 examples, we will merge some of the examples together
12 adding some silence between them to avoid a large number of padding frames that waste the computation.
13 """
14
15 def __init__(
16 self,
17 gap: Seconds = 1.0,
18 duration_factor: float = 1.0
19 ) -> None:
20 """
21 CutConcatenate's constructor.
22
23 :param gap: The duration of silence in seconds that is inserted between the cuts;
24 it's goal is to let the model "know" that there are separate utterances in a single example.
25 :param duration_factor: Determines the maximum duration of the concatenated cuts;
26 by default it's 1, setting the limit at the duration of the longest cut in the batch.
27 """
28 self.gap = gap
29 self.duration_factor = duration_factor
30
31 def __call__(self, cuts: CutSet) -> CutSet:
32 cuts = cuts.sort_by_duration(ascending=False)
33 return concat_cuts(
34 cuts,
35 gap=self.gap,
36 max_duration=cuts[0].duration * self.duration_factor
37 )
38
39
40 def concat_cuts(
41 cuts: Sequence[AnyCut],
42 gap: Seconds = 1.0,
43 max_duration: Optional[Seconds] = None
44 ) -> CutSet:
45 """
46 We're going to concatenate the cuts to minimize the amount of total padding frames used.
47 This means that some samples in the batch will be merged together into one sample,
48 separated by an interval of silence.
49 This is actually solving a knapsack problem.
50 In this initial implementation we're using a greedy approach:
51 going from the back (i.e. the shortest cuts) we'll try to concat them to the longest cut
52 that still has some "space" at the end.
53
54 :param cuts: a list of cuts to pack.
55 :param gap: the duration of silence inserted between concatenated cuts.
56 :param max_duration: the maximum duration for the concatenated cuts
57 (by default set to the duration of the first cut).
58 :return a list of packed cuts.
59 """
60 if len(cuts) <= 1:
61 # Nothing to do.
62 return CutSet.from_cuts(cuts)
63 cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)
64 max_duration = cuts[0].duration if max_duration is None else max_duration
65 current_idx = 1
66 while True:
67 can_fit = False
68 shortest = cuts[-1]
69 for idx in range(current_idx, len(cuts) - 1):
70 cut = cuts[current_idx]
71 can_fit = cut.duration + gap + shortest.duration <= max_duration
72 if can_fit:
73 cuts[current_idx] = cut.pad(cut.duration + gap).append(shortest)
74 cuts = cuts[:-1]
75 break
76 current_idx += 1
77 if not can_fit:
78 break
79 return CutSet.from_cuts(cuts)
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lhotse/dataset/cut_transforms/concatenate.py b/lhotse/dataset/cut_transforms/concatenate.py
--- a/lhotse/dataset/cut_transforms/concatenate.py
+++ b/lhotse/dataset/cut_transforms/concatenate.py
@@ -62,7 +62,7 @@
return CutSet.from_cuts(cuts)
cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)
max_duration = cuts[0].duration if max_duration is None else max_duration
- current_idx = 1
+ current_idx = 0
while True:
can_fit = False
shortest = cuts[-1]
| {"golden_diff": "diff --git a/lhotse/dataset/cut_transforms/concatenate.py b/lhotse/dataset/cut_transforms/concatenate.py\n--- a/lhotse/dataset/cut_transforms/concatenate.py\n+++ b/lhotse/dataset/cut_transforms/concatenate.py\n@@ -62,7 +62,7 @@\n return CutSet.from_cuts(cuts)\n cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)\n max_duration = cuts[0].duration if max_duration is None else max_duration\n- current_idx = 1\n+ current_idx = 0\n while True:\n can_fit = False\n shortest = cuts[-1]\n", "issue": "Cut concatenate doesn't consider the first sample in each batch\nFound in #234 \n", "before_files": [{"content": "from typing import Optional, Sequence\n\nfrom lhotse import CutSet\nfrom lhotse.cut import AnyCut\nfrom lhotse.utils import Seconds\n\n\nclass CutConcatenate:\n \"\"\"\n A transform on batch of cuts (``CutSet``) that concatenates the cuts to minimize the total amount of padding;\n e.g. instead of creating a batch with 40 examples, we will merge some of the examples together\n adding some silence between them to avoid a large number of padding frames that waste the computation.\n \"\"\"\n\n def __init__(\n self,\n gap: Seconds = 1.0,\n duration_factor: float = 1.0\n ) -> None:\n \"\"\"\n CutConcatenate's constructor.\n\n :param gap: The duration of silence in seconds that is inserted between the cuts;\n it's goal is to let the model \"know\" that there are separate utterances in a single example.\n :param duration_factor: Determines the maximum duration of the concatenated cuts;\n by default it's 1, setting the limit at the duration of the longest cut in the batch.\n \"\"\"\n self.gap = gap\n self.duration_factor = duration_factor\n\n def __call__(self, cuts: CutSet) -> CutSet:\n cuts = cuts.sort_by_duration(ascending=False)\n return concat_cuts(\n cuts,\n gap=self.gap,\n max_duration=cuts[0].duration * self.duration_factor\n )\n\n\ndef concat_cuts(\n cuts: Sequence[AnyCut],\n gap: Seconds = 1.0,\n max_duration: Optional[Seconds] = None\n) -> CutSet:\n \"\"\"\n We're going to concatenate the cuts to minimize the amount of total padding frames used.\n This means that some samples in the batch will be merged together into one sample,\n separated by an interval of silence.\n This is actually solving a knapsack problem.\n In this initial implementation we're using a greedy approach:\n going from the back (i.e. the shortest cuts) we'll try to concat them to the longest cut\n that still has some \"space\" at the end.\n\n :param cuts: a list of cuts to pack.\n :param gap: the duration of silence inserted between concatenated cuts.\n :param max_duration: the maximum duration for the concatenated cuts\n (by default set to the duration of the first cut).\n :return a list of packed cuts.\n \"\"\"\n if len(cuts) <= 1:\n # Nothing to do.\n return CutSet.from_cuts(cuts)\n cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)\n max_duration = cuts[0].duration if max_duration is None else max_duration\n current_idx = 1\n while True:\n can_fit = False\n shortest = cuts[-1]\n for idx in range(current_idx, len(cuts) - 1):\n cut = cuts[current_idx]\n can_fit = cut.duration + gap + shortest.duration <= max_duration\n if can_fit:\n cuts[current_idx] = cut.pad(cut.duration + gap).append(shortest)\n cuts = cuts[:-1]\n break\n current_idx += 1\n if not can_fit:\n break\n return CutSet.from_cuts(cuts)\n", "path": "lhotse/dataset/cut_transforms/concatenate.py"}], "after_files": [{"content": "from typing import Optional, Sequence\n\nfrom lhotse import CutSet\nfrom lhotse.cut import AnyCut\nfrom lhotse.utils import Seconds\n\n\nclass CutConcatenate:\n \"\"\"\n A transform on batch of cuts (``CutSet``) that concatenates the cuts to minimize the total amount of padding;\n e.g. instead of creating a batch with 40 examples, we will merge some of the examples together\n adding some silence between them to avoid a large number of padding frames that waste the computation.\n \"\"\"\n\n def __init__(\n self,\n gap: Seconds = 1.0,\n duration_factor: float = 1.0\n ) -> None:\n \"\"\"\n CutConcatenate's constructor.\n\n :param gap: The duration of silence in seconds that is inserted between the cuts;\n it's goal is to let the model \"know\" that there are separate utterances in a single example.\n :param duration_factor: Determines the maximum duration of the concatenated cuts;\n by default it's 1, setting the limit at the duration of the longest cut in the batch.\n \"\"\"\n self.gap = gap\n self.duration_factor = duration_factor\n\n def __call__(self, cuts: CutSet) -> CutSet:\n cuts = cuts.sort_by_duration(ascending=False)\n return concat_cuts(\n cuts,\n gap=self.gap,\n max_duration=cuts[0].duration * self.duration_factor\n )\n\n\ndef concat_cuts(\n cuts: Sequence[AnyCut],\n gap: Seconds = 1.0,\n max_duration: Optional[Seconds] = None\n) -> CutSet:\n \"\"\"\n We're going to concatenate the cuts to minimize the amount of total padding frames used.\n This means that some samples in the batch will be merged together into one sample,\n separated by an interval of silence.\n This is actually solving a knapsack problem.\n In this initial implementation we're using a greedy approach:\n going from the back (i.e. the shortest cuts) we'll try to concat them to the longest cut\n that still has some \"space\" at the end.\n\n :param cuts: a list of cuts to pack.\n :param gap: the duration of silence inserted between concatenated cuts.\n :param max_duration: the maximum duration for the concatenated cuts\n (by default set to the duration of the first cut).\n :return a list of packed cuts.\n \"\"\"\n if len(cuts) <= 1:\n # Nothing to do.\n return CutSet.from_cuts(cuts)\n cuts = sorted(cuts, key=lambda c: c.duration, reverse=True)\n max_duration = cuts[0].duration if max_duration is None else max_duration\n current_idx = 0\n while True:\n can_fit = False\n shortest = cuts[-1]\n for idx in range(current_idx, len(cuts) - 1):\n cut = cuts[current_idx]\n can_fit = cut.duration + gap + shortest.duration <= max_duration\n if can_fit:\n cuts[current_idx] = cut.pad(cut.duration + gap).append(shortest)\n cuts = cuts[:-1]\n break\n current_idx += 1\n if not can_fit:\n break\n return CutSet.from_cuts(cuts)\n", "path": "lhotse/dataset/cut_transforms/concatenate.py"}]} | 1,140 | 154 |
gh_patches_debug_34601 | rasdani/github-patches | git_diff | sunpy__sunpy-7316 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Resampling Maps in the example gallery gives a confusing example for the superpixel method
### Provide a general description of the issue or problem.
That's a minor thing perhaps but checking this [page](https://docs.sunpy.org/en/stable/generated/gallery/map/map_resampling_and_superpixels.html) I got confused by the example for the superpixel method.
It says:
`new_dimensions = u.Quantity(aia_map.dimensions) / 16`
`aia_superpixel_map = aia_map.superpixel([new_dimensions]`
The first line should be instead e.g.:
`new_dimensions=[16,16]*u.pixel `
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/map/map_resampling_and_superpixels.py`
Content:
```
1 """
2 ===============
3 Resampling Maps
4 ===============
5
6 How to resample a map using the resample method, which implements interpolation, or
7 using superpixels, which combines pixels.
8 """
9 import matplotlib.pyplot as plt
10
11 import astropy.units as u
12
13 import sunpy.data.sample
14 import sunpy.map
15
16 ###############################################################################
17 # We start with the sample data.
18
19 aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
20
21 ##############################################################################
22 # To reduce the angular resolution of the map you can use the `~sunpy.map.GenericMap.resample` method,
23 # specifying the new dimensions in pixels. By default, this method uses linear interpolation
24 # but this can be changed with the ``method`` argument ('nearest', 'linear' or 'spline').
25
26 new_dimensions = [40, 40] * u.pixel
27 aia_resampled_map = aia_map.resample(new_dimensions)
28
29 ##############################################################################
30 # Let's plot the result.
31
32 fig = plt.figure()
33 ax = fig.add_subplot(projection=aia_resampled_map)
34 aia_resampled_map.plot(axes=ax)
35 plt.show()
36
37 ##############################################################################
38 # Another way to resample is by using the `~sunpy.map.GenericMap.superpixel` method.
39 # This can be used to increase the signal to noise ratio by reducing the
40 # resolution of the image by combining pixels. This means that the new dimension
41 # must divide the original size exactly.
42 # For example you can reduce the AIA map resolution by a factor of 16.
43
44 new_dimensions = u.Quantity(aia_map.dimensions) / 16
45 aia_superpixel_map = aia_map.superpixel(new_dimensions)
46
47 ##############################################################################
48 # Let's plot the result.
49
50 fig = plt.figure()
51 ax = fig.add_subplot(projection=aia_superpixel_map)
52 aia_superpixel_map.plot(axes=ax)
53 plt.show()
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/map/map_resampling_and_superpixels.py b/examples/map/map_resampling_and_superpixels.py
--- a/examples/map/map_resampling_and_superpixels.py
+++ b/examples/map/map_resampling_and_superpixels.py
@@ -13,15 +13,16 @@
import sunpy.data.sample
import sunpy.map
-###############################################################################
+##############################################################################
# We start with the sample data.
aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
##############################################################################
-# To reduce the angular resolution of the map you can use the `~sunpy.map.GenericMap.resample` method,
-# specifying the new dimensions in pixels. By default, this method uses linear interpolation
-# but this can be changed with the ``method`` argument ('nearest', 'linear' or 'spline').
+# To reduce the angular resolution of the map, you can use the
+# :meth:`~sunpy.map.GenericMap.resample` method, specifying the new dimensions
+# in pixels. By default, this method uses linear interpolation but this can be
+# changed with the ``method`` argument ('nearest', 'linear' or 'spline').
new_dimensions = [40, 40] * u.pixel
aia_resampled_map = aia_map.resample(new_dimensions)
@@ -35,14 +36,15 @@
plt.show()
##############################################################################
-# Another way to resample is by using the `~sunpy.map.GenericMap.superpixel` method.
-# This can be used to increase the signal to noise ratio by reducing the
-# resolution of the image by combining pixels. This means that the new dimension
-# must divide the original size exactly.
-# For example you can reduce the AIA map resolution by a factor of 16.
-
-new_dimensions = u.Quantity(aia_map.dimensions) / 16
-aia_superpixel_map = aia_map.superpixel(new_dimensions)
+# Another way to reduce the angular resolution of the map is by using the
+# :meth:`~sunpy.map.GenericMap.superpixel` method, which combines pixels.
+# The superpixel dimensions do not need to be square, and the intensity of
+# each superpixel defaults to the sum of the constituent pixels. For example,
+# you can reduce the AIA map resolution by a factor of 16 by specifying 16x16
+# superpixels.
+
+superpixel_size = [16, 16] * u.pixel
+aia_superpixel_map = aia_map.superpixel(superpixel_size)
##############################################################################
# Let's plot the result.
| {"golden_diff": "diff --git a/examples/map/map_resampling_and_superpixels.py b/examples/map/map_resampling_and_superpixels.py\n--- a/examples/map/map_resampling_and_superpixels.py\n+++ b/examples/map/map_resampling_and_superpixels.py\n@@ -13,15 +13,16 @@\n import sunpy.data.sample\n import sunpy.map\n \n-###############################################################################\n+##############################################################################\n # We start with the sample data.\n \n aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\n \n ##############################################################################\n-# To reduce the angular resolution of the map you can use the `~sunpy.map.GenericMap.resample` method,\n-# specifying the new dimensions in pixels. By default, this method uses linear interpolation\n-# but this can be changed with the ``method`` argument ('nearest', 'linear' or 'spline').\n+# To reduce the angular resolution of the map, you can use the\n+# :meth:`~sunpy.map.GenericMap.resample` method, specifying the new dimensions\n+# in pixels. By default, this method uses linear interpolation but this can be\n+# changed with the ``method`` argument ('nearest', 'linear' or 'spline').\n \n new_dimensions = [40, 40] * u.pixel\n aia_resampled_map = aia_map.resample(new_dimensions)\n@@ -35,14 +36,15 @@\n plt.show()\n \n ##############################################################################\n-# Another way to resample is by using the `~sunpy.map.GenericMap.superpixel` method.\n-# This can be used to increase the signal to noise ratio by reducing the\n-# resolution of the image by combining pixels. This means that the new dimension\n-# must divide the original size exactly.\n-# For example you can reduce the AIA map resolution by a factor of 16.\n-\n-new_dimensions = u.Quantity(aia_map.dimensions) / 16\n-aia_superpixel_map = aia_map.superpixel(new_dimensions)\n+# Another way to reduce the angular resolution of the map is by using the\n+# :meth:`~sunpy.map.GenericMap.superpixel` method, which combines pixels.\n+# The superpixel dimensions do not need to be square, and the intensity of\n+# each superpixel defaults to the sum of the constituent pixels. For example,\n+# you can reduce the AIA map resolution by a factor of 16 by specifying 16x16\n+# superpixels.\n+\n+superpixel_size = [16, 16] * u.pixel\n+aia_superpixel_map = aia_map.superpixel(superpixel_size)\n \n ##############################################################################\n # Let's plot the result.\n", "issue": "Resampling Maps in the example gallery gives a confusing example for the superpixel method\n### Provide a general description of the issue or problem.\n\nThat's a minor thing perhaps but checking this [page](https://docs.sunpy.org/en/stable/generated/gallery/map/map_resampling_and_superpixels.html) I got confused by the example for the superpixel method. \r\nIt says:\r\n`new_dimensions = u.Quantity(aia_map.dimensions) / 16`\r\n`aia_superpixel_map = aia_map.superpixel([new_dimensions]`\r\n\r\nThe first line should be instead e.g.:\r\n`new_dimensions=[16,16]*u.pixel `\n", "before_files": [{"content": "\"\"\"\n===============\nResampling Maps\n===============\n\nHow to resample a map using the resample method, which implements interpolation, or\nusing superpixels, which combines pixels.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\nimport sunpy.data.sample\nimport sunpy.map\n\n###############################################################################\n# We start with the sample data.\n\naia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\n\n##############################################################################\n# To reduce the angular resolution of the map you can use the `~sunpy.map.GenericMap.resample` method,\n# specifying the new dimensions in pixels. By default, this method uses linear interpolation\n# but this can be changed with the ``method`` argument ('nearest', 'linear' or 'spline').\n\nnew_dimensions = [40, 40] * u.pixel\naia_resampled_map = aia_map.resample(new_dimensions)\n\n##############################################################################\n# Let's plot the result.\n\nfig = plt.figure()\nax = fig.add_subplot(projection=aia_resampled_map)\naia_resampled_map.plot(axes=ax)\nplt.show()\n\n##############################################################################\n# Another way to resample is by using the `~sunpy.map.GenericMap.superpixel` method.\n# This can be used to increase the signal to noise ratio by reducing the\n# resolution of the image by combining pixels. This means that the new dimension\n# must divide the original size exactly.\n# For example you can reduce the AIA map resolution by a factor of 16.\n\nnew_dimensions = u.Quantity(aia_map.dimensions) / 16\naia_superpixel_map = aia_map.superpixel(new_dimensions)\n\n##############################################################################\n# Let's plot the result.\n\nfig = plt.figure()\nax = fig.add_subplot(projection=aia_superpixel_map)\naia_superpixel_map.plot(axes=ax)\nplt.show()\n", "path": "examples/map/map_resampling_and_superpixels.py"}], "after_files": [{"content": "\"\"\"\n===============\nResampling Maps\n===============\n\nHow to resample a map using the resample method, which implements interpolation, or\nusing superpixels, which combines pixels.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\nimport sunpy.data.sample\nimport sunpy.map\n\n##############################################################################\n# We start with the sample data.\n\naia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\n\n##############################################################################\n# To reduce the angular resolution of the map, you can use the\n# :meth:`~sunpy.map.GenericMap.resample` method, specifying the new dimensions\n# in pixels. By default, this method uses linear interpolation but this can be\n# changed with the ``method`` argument ('nearest', 'linear' or 'spline').\n\nnew_dimensions = [40, 40] * u.pixel\naia_resampled_map = aia_map.resample(new_dimensions)\n\n##############################################################################\n# Let's plot the result.\n\nfig = plt.figure()\nax = fig.add_subplot(projection=aia_resampled_map)\naia_resampled_map.plot(axes=ax)\nplt.show()\n\n##############################################################################\n# Another way to reduce the angular resolution of the map is by using the\n# :meth:`~sunpy.map.GenericMap.superpixel` method, which combines pixels.\n# The superpixel dimensions do not need to be square, and the intensity of\n# each superpixel defaults to the sum of the constituent pixels. For example,\n# you can reduce the AIA map resolution by a factor of 16 by specifying 16x16\n# superpixels.\n\nsuperpixel_size = [16, 16] * u.pixel\naia_superpixel_map = aia_map.superpixel(superpixel_size)\n\n##############################################################################\n# Let's plot the result.\n\nfig = plt.figure()\nax = fig.add_subplot(projection=aia_superpixel_map)\naia_superpixel_map.plot(axes=ax)\nplt.show()\n", "path": "examples/map/map_resampling_and_superpixels.py"}]} | 888 | 554 |
gh_patches_debug_11324 | rasdani/github-patches | git_diff | spack__spack-5006 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issue while building mpiP
Trying to build this on ubuntu desktop gives (config.log):
```
configure:4289: /home/guest/workarena/softwares/sources/spack/lib/spack/env/gcc/gcc -c conftest.c >&5
conftest.c:11:17: fatal error: mpi.h: No such file or directory
compilation terminated.
configure:4289: $? = 1
configure: failed program was:
| /* confdefs.h */
| #define PACKAGE_NAME "mpiP"
| #define PACKAGE_TARNAME "mpip"
| #define PACKAGE_VERSION "3.3"
| #define PACKAGE_STRING "mpiP 3.3"
| #define PACKAGE_BUGREPORT "[email protected]"
| #define PACKAGE_URL ""
| #define DEFAULT_REPORT_FORMAT mpiPi_style_verbose
| #define HAVE_LIBM 1
| /* end confdefs.h. */
| #include "mpi.h"
|
| int
| main ()
| {
| #ifndef MPI_Init
| #ifdef __cplusplus
| (void) MPI_Init;
| #else
| (void) MPI_Init;
| #endif
| #endif
|
| ;
| return 0;
| }
configure:4289: result: no
configure:4294: error: "Failed to find declaration for MPI_Init!"
```
I am creating PR with additional `--with-cc` option to configure but then seeing next error:
```
File "/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py", line 712, in StandardFileHeader
olist = StandardFileHeader(sname)
File "/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py", line 712, in StandardFileHeader
olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n")
File "/usr/lib/python2.7/UserDict.py", line 40, in __getitem__
olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n")
raise KeyError(key)
File "/usr/lib/python2.7/UserDict.py", line 40, in __getitem__
KeyError: 'LOGNAME' raise KeyError(key)
```
Issue while building mpiP
Trying to build this on ubuntu desktop gives (config.log):
```
configure:4289: /home/guest/workarena/softwares/sources/spack/lib/spack/env/gcc/gcc -c conftest.c >&5
conftest.c:11:17: fatal error: mpi.h: No such file or directory
compilation terminated.
configure:4289: $? = 1
configure: failed program was:
| /* confdefs.h */
| #define PACKAGE_NAME "mpiP"
| #define PACKAGE_TARNAME "mpip"
| #define PACKAGE_VERSION "3.3"
| #define PACKAGE_STRING "mpiP 3.3"
| #define PACKAGE_BUGREPORT "[email protected]"
| #define PACKAGE_URL ""
| #define DEFAULT_REPORT_FORMAT mpiPi_style_verbose
| #define HAVE_LIBM 1
| /* end confdefs.h. */
| #include "mpi.h"
|
| int
| main ()
| {
| #ifndef MPI_Init
| #ifdef __cplusplus
| (void) MPI_Init;
| #else
| (void) MPI_Init;
| #endif
| #endif
|
| ;
| return 0;
| }
configure:4289: result: no
configure:4294: error: "Failed to find declaration for MPI_Init!"
```
I am creating PR with additional `--with-cc` option to configure but then seeing next error:
```
File "/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py", line 712, in StandardFileHeader
olist = StandardFileHeader(sname)
File "/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py", line 712, in StandardFileHeader
olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n")
File "/usr/lib/python2.7/UserDict.py", line 40, in __getitem__
olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n")
raise KeyError(key)
File "/usr/lib/python2.7/UserDict.py", line 40, in __getitem__
KeyError: 'LOGNAME' raise KeyError(key)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/mpip/package.py`
Content:
```
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26 import os
27
28
29 class Mpip(AutotoolsPackage):
30 """mpiP: Lightweight, Scalable MPI Profiling"""
31 homepage = "http://mpip.sourceforge.net/"
32 url = "http://downloads.sourceforge.net/project/mpip/mpiP/mpiP-3.4.1/mpiP-3.4.1.tar.gz"
33
34 version("3.4.1", "1168adc83777ac31d6ebd385823aabbd")
35
36 depends_on("libelf", type="build")
37 depends_on("libdwarf", type="build")
38 depends_on('libunwind', when=os.uname()[4] == "x86_64", type="build")
39 depends_on("mpi", type="build")
40
41 def configure_args(self):
42 return ['--without-f77']
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/mpip/package.py b/var/spack/repos/builtin/packages/mpip/package.py
--- a/var/spack/repos/builtin/packages/mpip/package.py
+++ b/var/spack/repos/builtin/packages/mpip/package.py
@@ -33,10 +33,14 @@
version("3.4.1", "1168adc83777ac31d6ebd385823aabbd")
- depends_on("libelf", type="build")
- depends_on("libdwarf", type="build")
- depends_on('libunwind', when=os.uname()[4] == "x86_64", type="build")
- depends_on("mpi", type="build")
+ depends_on("libelf")
+ depends_on("libdwarf")
+ depends_on('libunwind', when=os.uname()[4] == "x86_64")
+ depends_on("mpi")
def configure_args(self):
- return ['--without-f77']
+ config_args = ['--without-f77']
+ config_args.append("--with-cc=%s" % self.spec['mpi'].mpicc)
+ config_args.append("--with-cxx=%s" % self.spec['mpi'].mpicxx)
+
+ return config_args
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/mpip/package.py b/var/spack/repos/builtin/packages/mpip/package.py\n--- a/var/spack/repos/builtin/packages/mpip/package.py\n+++ b/var/spack/repos/builtin/packages/mpip/package.py\n@@ -33,10 +33,14 @@\n \n version(\"3.4.1\", \"1168adc83777ac31d6ebd385823aabbd\")\n \n- depends_on(\"libelf\", type=\"build\")\n- depends_on(\"libdwarf\", type=\"build\")\n- depends_on('libunwind', when=os.uname()[4] == \"x86_64\", type=\"build\")\n- depends_on(\"mpi\", type=\"build\")\n+ depends_on(\"libelf\")\n+ depends_on(\"libdwarf\")\n+ depends_on('libunwind', when=os.uname()[4] == \"x86_64\")\n+ depends_on(\"mpi\")\n \n def configure_args(self):\n- return ['--without-f77']\n+ config_args = ['--without-f77']\n+ config_args.append(\"--with-cc=%s\" % self.spec['mpi'].mpicc)\n+ config_args.append(\"--with-cxx=%s\" % self.spec['mpi'].mpicxx)\n+\n+ return config_args\n", "issue": "Issue while building mpiP\nTrying to build this on ubuntu desktop gives (config.log):\r\n\r\n```\r\nconfigure:4289: /home/guest/workarena/softwares/sources/spack/lib/spack/env/gcc/gcc -c conftest.c >&5\r\nconftest.c:11:17: fatal error: mpi.h: No such file or directory\r\ncompilation terminated.\r\nconfigure:4289: $? = 1\r\nconfigure: failed program was:\r\n| /* confdefs.h */\r\n| #define PACKAGE_NAME \"mpiP\"\r\n| #define PACKAGE_TARNAME \"mpip\"\r\n| #define PACKAGE_VERSION \"3.3\"\r\n| #define PACKAGE_STRING \"mpiP 3.3\"\r\n| #define PACKAGE_BUGREPORT \"[email protected]\"\r\n| #define PACKAGE_URL \"\"\r\n| #define DEFAULT_REPORT_FORMAT mpiPi_style_verbose\r\n| #define HAVE_LIBM 1\r\n| /* end confdefs.h. */\r\n| #include \"mpi.h\"\r\n|\r\n| int\r\n| main ()\r\n| {\r\n| #ifndef MPI_Init\r\n| #ifdef __cplusplus\r\n| (void) MPI_Init;\r\n| #else\r\n| (void) MPI_Init;\r\n| #endif\r\n| #endif\r\n|\r\n| ;\r\n| return 0;\r\n| }\r\nconfigure:4289: result: no\r\nconfigure:4294: error: \"Failed to find declaration for MPI_Init!\"\r\n```\r\n\r\nI am creating PR with additional `--with-cc` option to configure but then seeing next error:\r\n\r\n```\r\n File \"/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py\", line 712, in StandardFileHeader\r\n olist = StandardFileHeader(sname)\r\n File \"/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py\", line 712, in StandardFileHeader\r\n olist.append(\"/* Creator: \" + os.environ[\"LOGNAME\"] + \" */\\n\")\r\n File \"/usr/lib/python2.7/UserDict.py\", line 40, in __getitem__\r\n olist.append(\"/* Creator: \" + os.environ[\"LOGNAME\"] + \" */\\n\")\r\n raise KeyError(key)\r\n File \"/usr/lib/python2.7/UserDict.py\", line 40, in __getitem__\r\nKeyError: 'LOGNAME' raise KeyError(key)\r\n```\r\n\nIssue while building mpiP\nTrying to build this on ubuntu desktop gives (config.log):\r\n\r\n```\r\nconfigure:4289: /home/guest/workarena/softwares/sources/spack/lib/spack/env/gcc/gcc -c conftest.c >&5\r\nconftest.c:11:17: fatal error: mpi.h: No such file or directory\r\ncompilation terminated.\r\nconfigure:4289: $? = 1\r\nconfigure: failed program was:\r\n| /* confdefs.h */\r\n| #define PACKAGE_NAME \"mpiP\"\r\n| #define PACKAGE_TARNAME \"mpip\"\r\n| #define PACKAGE_VERSION \"3.3\"\r\n| #define PACKAGE_STRING \"mpiP 3.3\"\r\n| #define PACKAGE_BUGREPORT \"[email protected]\"\r\n| #define PACKAGE_URL \"\"\r\n| #define DEFAULT_REPORT_FORMAT mpiPi_style_verbose\r\n| #define HAVE_LIBM 1\r\n| /* end confdefs.h. */\r\n| #include \"mpi.h\"\r\n|\r\n| int\r\n| main ()\r\n| {\r\n| #ifndef MPI_Init\r\n| #ifdef __cplusplus\r\n| (void) MPI_Init;\r\n| #else\r\n| (void) MPI_Init;\r\n| #endif\r\n| #endif\r\n|\r\n| ;\r\n| return 0;\r\n| }\r\nconfigure:4289: result: no\r\nconfigure:4294: error: \"Failed to find declaration for MPI_Init!\"\r\n```\r\n\r\nI am creating PR with additional `--with-cc` option to configure but then seeing next error:\r\n\r\n```\r\n File \"/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py\", line 712, in StandardFileHeader\r\n olist = StandardFileHeader(sname)\r\n File \"/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py\", line 712, in StandardFileHeader\r\n olist.append(\"/* Creator: \" + os.environ[\"LOGNAME\"] + \" */\\n\")\r\n File \"/usr/lib/python2.7/UserDict.py\", line 40, in __getitem__\r\n olist.append(\"/* Creator: \" + os.environ[\"LOGNAME\"] + \" */\\n\")\r\n raise KeyError(key)\r\n File \"/usr/lib/python2.7/UserDict.py\", line 40, in __getitem__\r\nKeyError: 'LOGNAME' raise KeyError(key)\r\n```\r\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\nimport os\n\n\nclass Mpip(AutotoolsPackage):\n \"\"\"mpiP: Lightweight, Scalable MPI Profiling\"\"\"\n homepage = \"http://mpip.sourceforge.net/\"\n url = \"http://downloads.sourceforge.net/project/mpip/mpiP/mpiP-3.4.1/mpiP-3.4.1.tar.gz\"\n\n version(\"3.4.1\", \"1168adc83777ac31d6ebd385823aabbd\")\n\n depends_on(\"libelf\", type=\"build\")\n depends_on(\"libdwarf\", type=\"build\")\n depends_on('libunwind', when=os.uname()[4] == \"x86_64\", type=\"build\")\n depends_on(\"mpi\", type=\"build\")\n\n def configure_args(self):\n return ['--without-f77']\n", "path": "var/spack/repos/builtin/packages/mpip/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\nimport os\n\n\nclass Mpip(AutotoolsPackage):\n \"\"\"mpiP: Lightweight, Scalable MPI Profiling\"\"\"\n homepage = \"http://mpip.sourceforge.net/\"\n url = \"http://downloads.sourceforge.net/project/mpip/mpiP/mpiP-3.4.1/mpiP-3.4.1.tar.gz\"\n\n version(\"3.4.1\", \"1168adc83777ac31d6ebd385823aabbd\")\n\n depends_on(\"libelf\")\n depends_on(\"libdwarf\")\n depends_on('libunwind', when=os.uname()[4] == \"x86_64\")\n depends_on(\"mpi\")\n\n def configure_args(self):\n config_args = ['--without-f77']\n config_args.append(\"--with-cc=%s\" % self.spec['mpi'].mpicc)\n config_args.append(\"--with-cxx=%s\" % self.spec['mpi'].mpicxx)\n\n return config_args\n", "path": "var/spack/repos/builtin/packages/mpip/package.py"}]} | 2,008 | 301 |
gh_patches_debug_12235 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2303 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
_OTEL_METRICS_EXPORTER env var should be OTEL_METRICS_EXPORTER
The environment variable `_OTEL_METRICS_EXPORTER` is prefixed with an underscore, but there's no need for it as that environment variable is marked as stable in the specification https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#exporter-selection
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-api/src/opentelemetry/environment_variables.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 OTEL_PROPAGATORS = "OTEL_PROPAGATORS"
16 """
17 .. envvar:: OTEL_PROPAGATORS
18 """
19
20 OTEL_PYTHON_CONTEXT = "OTEL_PYTHON_CONTEXT"
21 """
22 .. envvar:: OTEL_PYTHON_CONTEXT
23 """
24
25 OTEL_PYTHON_ID_GENERATOR = "OTEL_PYTHON_ID_GENERATOR"
26 """
27 .. envvar:: OTEL_PYTHON_ID_GENERATOR
28 """
29
30 OTEL_TRACES_EXPORTER = "OTEL_TRACES_EXPORTER"
31 """
32 .. envvar:: OTEL_TRACES_EXPORTER
33 """
34
35 OTEL_PYTHON_TRACER_PROVIDER = "OTEL_PYTHON_TRACER_PROVIDER"
36 """
37 .. envvar:: OTEL_PYTHON_TRACER_PROVIDER
38 """
39
40 _OTEL_PYTHON_METER_PROVIDER = "OTEL_PYTHON_METER_PROVIDER"
41 """
42 .. envvar:: OTEL_PYTHON_METER_PROVIDER
43 """
44
45 _OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER"
46 """
47 .. envvar:: OTEL_METRICS_EXPORTER
48
49 """
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-api/src/opentelemetry/environment_variables.py b/opentelemetry-api/src/opentelemetry/environment_variables.py
--- a/opentelemetry-api/src/opentelemetry/environment_variables.py
+++ b/opentelemetry-api/src/opentelemetry/environment_variables.py
@@ -12,6 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER"
+"""
+.. envvar:: OTEL_METRICS_EXPORTER
+
+"""
+
OTEL_PROPAGATORS = "OTEL_PROPAGATORS"
"""
.. envvar:: OTEL_PROPAGATORS
@@ -41,9 +47,3 @@
"""
.. envvar:: OTEL_PYTHON_METER_PROVIDER
"""
-
-_OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER"
-"""
-.. envvar:: OTEL_METRICS_EXPORTER
-
-"""
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/environment_variables.py b/opentelemetry-api/src/opentelemetry/environment_variables.py\n--- a/opentelemetry-api/src/opentelemetry/environment_variables.py\n+++ b/opentelemetry-api/src/opentelemetry/environment_variables.py\n@@ -12,6 +12,12 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+OTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n+\"\"\"\n+.. envvar:: OTEL_METRICS_EXPORTER\n+\n+\"\"\"\n+\n OTEL_PROPAGATORS = \"OTEL_PROPAGATORS\"\n \"\"\"\n .. envvar:: OTEL_PROPAGATORS\n@@ -41,9 +47,3 @@\n \"\"\"\n .. envvar:: OTEL_PYTHON_METER_PROVIDER\n \"\"\"\n-\n-_OTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n-\"\"\"\n-.. envvar:: OTEL_METRICS_EXPORTER\n-\n-\"\"\"\n", "issue": "_OTEL_METRICS_EXPORTER env var should be OTEL_METRICS_EXPORTER\nThe environment variable `_OTEL_METRICS_EXPORTER` is prefixed with an underscore, but there's no need for it as that environment variable is marked as stable in the specification https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#exporter-selection\r\n\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nOTEL_PROPAGATORS = \"OTEL_PROPAGATORS\"\n\"\"\"\n.. envvar:: OTEL_PROPAGATORS\n\"\"\"\n\nOTEL_PYTHON_CONTEXT = \"OTEL_PYTHON_CONTEXT\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_CONTEXT\n\"\"\"\n\nOTEL_PYTHON_ID_GENERATOR = \"OTEL_PYTHON_ID_GENERATOR\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_ID_GENERATOR\n\"\"\"\n\nOTEL_TRACES_EXPORTER = \"OTEL_TRACES_EXPORTER\"\n\"\"\"\n.. envvar:: OTEL_TRACES_EXPORTER\n\"\"\"\n\nOTEL_PYTHON_TRACER_PROVIDER = \"OTEL_PYTHON_TRACER_PROVIDER\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_TRACER_PROVIDER\n\"\"\"\n\n_OTEL_PYTHON_METER_PROVIDER = \"OTEL_PYTHON_METER_PROVIDER\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_METER_PROVIDER\n\"\"\"\n\n_OTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n\"\"\"\n.. envvar:: OTEL_METRICS_EXPORTER\n\n\"\"\"\n", "path": "opentelemetry-api/src/opentelemetry/environment_variables.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nOTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n\"\"\"\n.. envvar:: OTEL_METRICS_EXPORTER\n\n\"\"\"\n\nOTEL_PROPAGATORS = \"OTEL_PROPAGATORS\"\n\"\"\"\n.. envvar:: OTEL_PROPAGATORS\n\"\"\"\n\nOTEL_PYTHON_CONTEXT = \"OTEL_PYTHON_CONTEXT\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_CONTEXT\n\"\"\"\n\nOTEL_PYTHON_ID_GENERATOR = \"OTEL_PYTHON_ID_GENERATOR\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_ID_GENERATOR\n\"\"\"\n\nOTEL_TRACES_EXPORTER = \"OTEL_TRACES_EXPORTER\"\n\"\"\"\n.. envvar:: OTEL_TRACES_EXPORTER\n\"\"\"\n\nOTEL_PYTHON_TRACER_PROVIDER = \"OTEL_PYTHON_TRACER_PROVIDER\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_TRACER_PROVIDER\n\"\"\"\n\n_OTEL_PYTHON_METER_PROVIDER = \"OTEL_PYTHON_METER_PROVIDER\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_METER_PROVIDER\n\"\"\"\n", "path": "opentelemetry-api/src/opentelemetry/environment_variables.py"}]} | 777 | 206 |
gh_patches_debug_42145 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1140 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Walmart Spider Error
Something with the Walmart spider appears to be failing. When importing the geojson file from alltheplaces.xyz to qgis or geojson.io, there are a large number of locations missing in the western US.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/walmart.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4
5 from locations.items import GeojsonPointItem
6
7
8 class WalmartSpider(scrapy.Spider):
9 name = "walmart"
10 allowed_domains = ["walmart.com"]
11 start_urls = (
12 'https://www.walmart.com/sitemap_store_main.xml',
13 )
14
15 def store_hours(self, store_hours):
16 if store_hours == 'Mo-Su':
17 return u'24/7'
18 elif store_hours is None:
19 return None
20 else:
21 return store_hours
22
23 def parse(self, response):
24 response.selector.remove_namespaces()
25 for u in response.xpath('//loc/text()').extract():
26 if u.endswith('/details'):
27 yield scrapy.Request(u.strip(), callback=self.parse_store)
28
29 def parse_store(self, response):
30 addr = response.xpath('//div[@itemprop="address"]')[0]
31 yield GeojsonPointItem(
32 lat=response.xpath('//meta[@itemprop="latitude"]/@content').extract_first(),
33 lon=response.xpath('//meta[@itemprop="longitude"]/@content').extract_first(),
34 ref=response.url.split('/')[4],
35 phone=response.xpath('//meta[@itemprop="telephone"]/@content').extract_first(),
36 name=response.xpath('//meta[@itemprop="name"]/@content').extract_first(),
37 opening_hours=self.store_hours(response.xpath('//meta[@itemprop="openingHours"]/@content').extract_first()),
38 addr_full=addr.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(),
39 city=addr.xpath('//span[@itemprop="locality"]/text()').extract_first(),
40 state=addr.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(),
41 postcode=addr.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
42 )
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/walmart.py b/locations/spiders/walmart.py
--- a/locations/spiders/walmart.py
+++ b/locations/spiders/walmart.py
@@ -1,7 +1,9 @@
# -*- coding: utf-8 -*-
import scrapy
import json
+import re
+from collections import defaultdict
from locations.items import GeojsonPointItem
@@ -11,14 +13,39 @@
start_urls = (
'https://www.walmart.com/sitemap_store_main.xml',
)
+ retries = defaultdict(int)
def store_hours(self, store_hours):
- if store_hours == 'Mo-Su':
+ if store_hours.get('operationalHours').get('open24Hours') is True:
return u'24/7'
- elif store_hours is None:
+ elif not store_hours.get('operationalHoursCombined'):
return None
else:
- return store_hours
+ op_hours = store_hours.get('operationalHoursCombined')
+ open_hours = []
+ for op_hour in op_hours:
+ if op_hour.get('dailyHours').get('closed') is True:
+ continue
+
+ if op_hour.get('dailyHours').get('openFullDay') is True:
+ start_hr = '00:00'
+ end_hr = '24:00'
+ else:
+ start_hr = op_hour.get('dailyHours').get('startHr')
+ end_hr = op_hour.get('dailyHours').get('endHr')
+
+ start_day = op_hour.get('startDayName')
+ end_day = op_hour.get('endDayName')
+
+ if end_day is None:
+ end_day = ''
+
+ hours = start_day+'-'+end_day+' '+start_hr+'-'+end_hr
+ open_hours.append(hours)
+
+ hours_combined = '; '.join(open_hours)
+
+ return hours_combined
def parse(self, response):
response.selector.remove_namespaces()
@@ -27,16 +54,30 @@
yield scrapy.Request(u.strip(), callback=self.parse_store)
def parse_store(self, response):
- addr = response.xpath('//div[@itemprop="address"]')[0]
+ script = response.xpath("//script[contains(.,'WML_REDUX_INITIAL_STATE')]").extract_first()
+ # In rare cases will hit page before script tag loads with content
+ if script is None:
+ if self.retries.get(response.url, 0) <= 2:
+ self.retries[response.url] += 1
+ yield scrapy.Request(response.url, callback=self.parse_store) # Try again
+ else:
+ raise Exception('Retried too many times')
+
+ script_content = re.search(r'window.__WML_REDUX_INITIAL_STATE__ = (.*);</script>', script,
+ flags=re.IGNORECASE | re.DOTALL).group(1)
+
+ store_data = json.loads(script_content).get('store')
+
yield GeojsonPointItem(
- lat=response.xpath('//meta[@itemprop="latitude"]/@content').extract_first(),
- lon=response.xpath('//meta[@itemprop="longitude"]/@content').extract_first(),
- ref=response.url.split('/')[4],
- phone=response.xpath('//meta[@itemprop="telephone"]/@content').extract_first(),
- name=response.xpath('//meta[@itemprop="name"]/@content').extract_first(),
- opening_hours=self.store_hours(response.xpath('//meta[@itemprop="openingHours"]/@content').extract_first()),
- addr_full=addr.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(),
- city=addr.xpath('//span[@itemprop="locality"]/text()').extract_first(),
- state=addr.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(),
- postcode=addr.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
+ lat=store_data.get('geoPoint').get('latitude'),
+ lon=store_data.get('geoPoint').get('longitude'),
+ ref=store_data.get('id'),
+ phone=store_data.get('phone'),
+ name=store_data.get('displayName'),
+ opening_hours=self.store_hours(store_data),
+ addr_full=store_data.get('address').get('streetAddress'),
+ city=store_data.get('address').get('city'),
+ state=store_data.get('address').get('state'),
+ postcode=store_data.get('address').get('postalCode'),
+ website=store_data.get('detailsPageURL'),
)
| {"golden_diff": "diff --git a/locations/spiders/walmart.py b/locations/spiders/walmart.py\n--- a/locations/spiders/walmart.py\n+++ b/locations/spiders/walmart.py\n@@ -1,7 +1,9 @@\n # -*- coding: utf-8 -*-\n import scrapy\n import json\n+import re\n \n+from collections import defaultdict\n from locations.items import GeojsonPointItem\n \n \n@@ -11,14 +13,39 @@\n start_urls = (\n 'https://www.walmart.com/sitemap_store_main.xml',\n )\n+ retries = defaultdict(int)\n \n def store_hours(self, store_hours):\n- if store_hours == 'Mo-Su':\n+ if store_hours.get('operationalHours').get('open24Hours') is True:\n return u'24/7'\n- elif store_hours is None:\n+ elif not store_hours.get('operationalHoursCombined'):\n return None\n else:\n- return store_hours\n+ op_hours = store_hours.get('operationalHoursCombined')\n+ open_hours = []\n+ for op_hour in op_hours:\n+ if op_hour.get('dailyHours').get('closed') is True:\n+ continue\n+\n+ if op_hour.get('dailyHours').get('openFullDay') is True:\n+ start_hr = '00:00'\n+ end_hr = '24:00'\n+ else:\n+ start_hr = op_hour.get('dailyHours').get('startHr')\n+ end_hr = op_hour.get('dailyHours').get('endHr')\n+\n+ start_day = op_hour.get('startDayName')\n+ end_day = op_hour.get('endDayName')\n+\n+ if end_day is None:\n+ end_day = ''\n+\n+ hours = start_day+'-'+end_day+' '+start_hr+'-'+end_hr\n+ open_hours.append(hours)\n+\n+ hours_combined = '; '.join(open_hours)\n+\n+ return hours_combined\n \n def parse(self, response):\n response.selector.remove_namespaces()\n@@ -27,16 +54,30 @@\n yield scrapy.Request(u.strip(), callback=self.parse_store)\n \n def parse_store(self, response):\n- addr = response.xpath('//div[@itemprop=\"address\"]')[0]\n+ script = response.xpath(\"//script[contains(.,'WML_REDUX_INITIAL_STATE')]\").extract_first()\n+ # In rare cases will hit page before script tag loads with content\n+ if script is None:\n+ if self.retries.get(response.url, 0) <= 2:\n+ self.retries[response.url] += 1\n+ yield scrapy.Request(response.url, callback=self.parse_store) # Try again\n+ else:\n+ raise Exception('Retried too many times')\n+\n+ script_content = re.search(r'window.__WML_REDUX_INITIAL_STATE__ = (.*);</script>', script,\n+ flags=re.IGNORECASE | re.DOTALL).group(1)\n+\n+ store_data = json.loads(script_content).get('store')\n+\n yield GeojsonPointItem(\n- lat=response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first(),\n- lon=response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first(),\n- ref=response.url.split('/')[4],\n- phone=response.xpath('//meta[@itemprop=\"telephone\"]/@content').extract_first(),\n- name=response.xpath('//meta[@itemprop=\"name\"]/@content').extract_first(),\n- opening_hours=self.store_hours(response.xpath('//meta[@itemprop=\"openingHours\"]/@content').extract_first()),\n- addr_full=addr.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first(),\n- city=addr.xpath('//span[@itemprop=\"locality\"]/text()').extract_first(),\n- state=addr.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first(),\n- postcode=addr.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n+ lat=store_data.get('geoPoint').get('latitude'),\n+ lon=store_data.get('geoPoint').get('longitude'),\n+ ref=store_data.get('id'),\n+ phone=store_data.get('phone'),\n+ name=store_data.get('displayName'),\n+ opening_hours=self.store_hours(store_data),\n+ addr_full=store_data.get('address').get('streetAddress'),\n+ city=store_data.get('address').get('city'),\n+ state=store_data.get('address').get('state'),\n+ postcode=store_data.get('address').get('postalCode'),\n+ website=store_data.get('detailsPageURL'),\n )\n", "issue": "Walmart Spider Error\nSomething with the Walmart spider appears to be failing. When importing the geojson file from alltheplaces.xyz to qgis or geojson.io, there are a large number of locations missing in the western US.\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\n\n\nclass WalmartSpider(scrapy.Spider):\n name = \"walmart\"\n allowed_domains = [\"walmart.com\"]\n start_urls = (\n 'https://www.walmart.com/sitemap_store_main.xml',\n )\n\n def store_hours(self, store_hours):\n if store_hours == 'Mo-Su':\n return u'24/7'\n elif store_hours is None:\n return None\n else:\n return store_hours\n\n def parse(self, response):\n response.selector.remove_namespaces()\n for u in response.xpath('//loc/text()').extract():\n if u.endswith('/details'):\n yield scrapy.Request(u.strip(), callback=self.parse_store)\n\n def parse_store(self, response):\n addr = response.xpath('//div[@itemprop=\"address\"]')[0]\n yield GeojsonPointItem(\n lat=response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first(),\n lon=response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first(),\n ref=response.url.split('/')[4],\n phone=response.xpath('//meta[@itemprop=\"telephone\"]/@content').extract_first(),\n name=response.xpath('//meta[@itemprop=\"name\"]/@content').extract_first(),\n opening_hours=self.store_hours(response.xpath('//meta[@itemprop=\"openingHours\"]/@content').extract_first()),\n addr_full=addr.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first(),\n city=addr.xpath('//span[@itemprop=\"locality\"]/text()').extract_first(),\n state=addr.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first(),\n postcode=addr.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n )\n", "path": "locations/spiders/walmart.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom collections import defaultdict\nfrom locations.items import GeojsonPointItem\n\n\nclass WalmartSpider(scrapy.Spider):\n name = \"walmart\"\n allowed_domains = [\"walmart.com\"]\n start_urls = (\n 'https://www.walmart.com/sitemap_store_main.xml',\n )\n retries = defaultdict(int)\n\n def store_hours(self, store_hours):\n if store_hours.get('operationalHours').get('open24Hours') is True:\n return u'24/7'\n elif not store_hours.get('operationalHoursCombined'):\n return None\n else:\n op_hours = store_hours.get('operationalHoursCombined')\n open_hours = []\n for op_hour in op_hours:\n if op_hour.get('dailyHours').get('closed') is True:\n continue\n\n if op_hour.get('dailyHours').get('openFullDay') is True:\n start_hr = '00:00'\n end_hr = '24:00'\n else:\n start_hr = op_hour.get('dailyHours').get('startHr')\n end_hr = op_hour.get('dailyHours').get('endHr')\n\n start_day = op_hour.get('startDayName')\n end_day = op_hour.get('endDayName')\n\n if end_day is None:\n end_day = ''\n\n hours = start_day+'-'+end_day+' '+start_hr+'-'+end_hr\n open_hours.append(hours)\n\n hours_combined = '; '.join(open_hours)\n\n return hours_combined\n\n def parse(self, response):\n response.selector.remove_namespaces()\n for u in response.xpath('//loc/text()').extract():\n if u.endswith('/details'):\n yield scrapy.Request(u.strip(), callback=self.parse_store)\n\n def parse_store(self, response):\n script = response.xpath(\"//script[contains(.,'WML_REDUX_INITIAL_STATE')]\").extract_first()\n # In rare cases will hit page before script tag loads with content\n if script is None:\n if self.retries.get(response.url, 0) <= 2:\n self.retries[response.url] += 1\n yield scrapy.Request(response.url, callback=self.parse_store) # Try again\n else:\n raise Exception('Retried too many times')\n\n script_content = re.search(r'window.__WML_REDUX_INITIAL_STATE__ = (.*);</script>', script,\n flags=re.IGNORECASE | re.DOTALL).group(1)\n\n store_data = json.loads(script_content).get('store')\n\n yield GeojsonPointItem(\n lat=store_data.get('geoPoint').get('latitude'),\n lon=store_data.get('geoPoint').get('longitude'),\n ref=store_data.get('id'),\n phone=store_data.get('phone'),\n name=store_data.get('displayName'),\n opening_hours=self.store_hours(store_data),\n addr_full=store_data.get('address').get('streetAddress'),\n city=store_data.get('address').get('city'),\n state=store_data.get('address').get('state'),\n postcode=store_data.get('address').get('postalCode'),\n website=store_data.get('detailsPageURL'),\n )\n", "path": "locations/spiders/walmart.py"}]} | 834 | 1,012 |
gh_patches_debug_12789 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1463 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error validating return types when using a Generic in Union
There seems to be an issue with types.
Generics worked fine and dandy until I've tried to use them in a Union.
```python
@strawberry.type
class FaultyType:
code_sent: bool
result: Union[Pet, ErrorNode[Codes]]
```
Here returning an ErrorNode in "result" field fails type check.
Fails with strawberry.exceptions.UnallowedReturnTypeForUnion: The type "<class 'strawberry_sample.ErrorNode'>" of the field "result" is not in the list of the types of the union: "['CodesErrorNode', 'Pet']"
Returning the same generic without unions in type declaration works fine.
Full sample:
https://gist.github.com/MeRuslan/5bd179f28fc5ae05e815429ee02ebdf6
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/types/types.py`
Content:
```
1 from __future__ import annotations
2
3 import dataclasses
4 from typing import (
5 TYPE_CHECKING,
6 List,
7 Mapping,
8 Optional,
9 Sequence,
10 Type,
11 TypeVar,
12 Union,
13 )
14
15 from strawberry.type import StrawberryType, StrawberryTypeVar
16 from strawberry.utils.typing import is_generic as is_type_generic
17
18
19 if TYPE_CHECKING:
20 from strawberry.field import StrawberryField
21 from strawberry.schema_directive import StrawberrySchemaDirective
22
23
24 @dataclasses.dataclass(eq=False)
25 class TypeDefinition(StrawberryType):
26 name: str
27 is_input: bool
28 is_interface: bool
29 origin: Type
30 description: Optional[str]
31 interfaces: List["TypeDefinition"]
32 extend: bool
33 directives: Optional[Sequence[StrawberrySchemaDirective]]
34
35 _fields: List["StrawberryField"]
36
37 concrete_of: Optional["TypeDefinition"] = None
38 """Concrete implementations of Generic TypeDefinitions fill this in"""
39 type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] = dataclasses.field(
40 default_factory=dict
41 )
42
43 # TODO: remove wrapped cls when we "merge" this with `StrawberryObject`
44 def resolve_generic(self, wrapped_cls: type) -> type:
45 from strawberry.annotation import StrawberryAnnotation
46
47 passed_types = wrapped_cls.__args__ # type: ignore
48 params = wrapped_cls.__origin__.__parameters__ # type: ignore
49
50 # Make sure all passed_types are turned into StrawberryTypes
51 resolved_types = []
52 for passed_type in passed_types:
53 resolved_type = StrawberryAnnotation(passed_type).resolve()
54 resolved_types.append(resolved_type)
55
56 type_var_map = dict(zip(params, resolved_types))
57
58 return self.copy_with(type_var_map)
59
60 # TODO: Return a StrawberryObject
61 def copy_with(
62 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
63 ) -> type:
64 fields = []
65 for field in self.fields:
66 # TODO: Logic unnecessary with StrawberryObject
67 field_type = field.type
68 if hasattr(field_type, "_type_definition"):
69 field_type = field_type._type_definition # type: ignore
70
71 # TODO: All types should end up being StrawberryTypes
72 # The first check is here as a symptom of strawberry.ID being a
73 # Scalar, but not a StrawberryType
74 if isinstance(field_type, StrawberryType) and field_type.is_generic:
75 field = field.copy_with(type_var_map)
76
77 fields.append(field)
78
79 new_type_definition = TypeDefinition(
80 name=self.name,
81 is_input=self.is_input,
82 origin=self.origin,
83 is_interface=self.is_interface,
84 directives=self.directives,
85 interfaces=self.interfaces,
86 description=self.description,
87 extend=self.extend,
88 _fields=fields,
89 concrete_of=self,
90 type_var_map=type_var_map,
91 )
92
93 new_type = type(
94 new_type_definition.name,
95 (self.origin,),
96 {"_type_definition": new_type_definition},
97 )
98
99 new_type_definition.origin = new_type
100
101 return new_type
102
103 def get_field(self, python_name: str) -> Optional["StrawberryField"]:
104 return next(
105 (field for field in self.fields if field.python_name == python_name), None
106 )
107
108 @property
109 def fields(self) -> List["StrawberryField"]:
110 # TODO: rename _fields to fields and remove this property
111 return self._fields
112
113 @property
114 def is_generic(self) -> bool:
115 return is_type_generic(self.origin)
116
117 @property
118 def type_params(self) -> List[TypeVar]:
119 type_params: List[TypeVar] = []
120 for field in self.fields:
121 type_params.extend(field.type_params)
122
123 return type_params
124
125 def is_implemented_by(self, root: Union[type, dict]) -> bool:
126 # TODO: Accept StrawberryObject instead
127 # TODO: Support dicts
128 if isinstance(root, dict):
129 raise NotImplementedError()
130
131 type_definition = root._type_definition # type: ignore
132
133 if type_definition is self:
134 # No generics involved. Exact type match
135 return True
136
137 if type_definition is not self.concrete_of:
138 # Either completely different type, or concrete type of a different generic
139 return False
140
141 # Check the mapping of all fields' TypeVars
142 for generic_field in type_definition.fields:
143 generic_field_type = generic_field.type
144 if not isinstance(generic_field_type, StrawberryTypeVar):
145 continue
146
147 # For each TypeVar found, get the expected type from the copy's type map
148 expected_concrete_type = self.type_var_map.get(generic_field_type.type_var)
149 if expected_concrete_type is None:
150 # TODO: Should this return False?
151 continue
152
153 # Check if the expected type matches the type found on the type_map
154 real_concrete_type = type(getattr(root, generic_field.name))
155 if real_concrete_type is not expected_concrete_type:
156 return False
157
158 # All field mappings succeeded. This is a match
159 return True
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/types/types.py b/strawberry/types/types.py
--- a/strawberry/types/types.py
+++ b/strawberry/types/types.py
@@ -152,6 +152,13 @@
# Check if the expected type matches the type found on the type_map
real_concrete_type = type(getattr(root, generic_field.name))
+
+ # TODO: uniform type var map, at the moment we map object types
+ # to their class (not to TypeDefinition) while we map enum to
+ # the EnumDefinition class. This is why we do this check here:
+ if hasattr(real_concrete_type, "_enum_definition"):
+ real_concrete_type = real_concrete_type._enum_definition
+
if real_concrete_type is not expected_concrete_type:
return False
| {"golden_diff": "diff --git a/strawberry/types/types.py b/strawberry/types/types.py\n--- a/strawberry/types/types.py\n+++ b/strawberry/types/types.py\n@@ -152,6 +152,13 @@\n \n # Check if the expected type matches the type found on the type_map\n real_concrete_type = type(getattr(root, generic_field.name))\n+\n+ # TODO: uniform type var map, at the moment we map object types\n+ # to their class (not to TypeDefinition) while we map enum to\n+ # the EnumDefinition class. This is why we do this check here:\n+ if hasattr(real_concrete_type, \"_enum_definition\"):\n+ real_concrete_type = real_concrete_type._enum_definition\n+\n if real_concrete_type is not expected_concrete_type:\n return False\n", "issue": "Error validating return types when using a Generic in Union\nThere seems to be an issue with types.\r\nGenerics worked fine and dandy until I've tried to use them in a Union.\r\n\r\n```python\r\[email protected]\r\nclass FaultyType:\r\n code_sent: bool\r\n result: Union[Pet, ErrorNode[Codes]]\r\n```\r\nHere returning an ErrorNode in \"result\" field fails type check.\r\nFails with strawberry.exceptions.UnallowedReturnTypeForUnion: The type \"<class 'strawberry_sample.ErrorNode'>\" of the field \"result\" is not in the list of the types of the union: \"['CodesErrorNode', 'Pet']\"\r\n\r\nReturning the same generic without unions in type declaration works fine.\r\n\r\nFull sample:\r\nhttps://gist.github.com/MeRuslan/5bd179f28fc5ae05e815429ee02ebdf6\n", "before_files": [{"content": "from __future__ import annotations\n\nimport dataclasses\nfrom typing import (\n TYPE_CHECKING,\n List,\n Mapping,\n Optional,\n Sequence,\n Type,\n TypeVar,\n Union,\n)\n\nfrom strawberry.type import StrawberryType, StrawberryTypeVar\nfrom strawberry.utils.typing import is_generic as is_type_generic\n\n\nif TYPE_CHECKING:\n from strawberry.field import StrawberryField\n from strawberry.schema_directive import StrawberrySchemaDirective\n\n\[email protected](eq=False)\nclass TypeDefinition(StrawberryType):\n name: str\n is_input: bool\n is_interface: bool\n origin: Type\n description: Optional[str]\n interfaces: List[\"TypeDefinition\"]\n extend: bool\n directives: Optional[Sequence[StrawberrySchemaDirective]]\n\n _fields: List[\"StrawberryField\"]\n\n concrete_of: Optional[\"TypeDefinition\"] = None\n \"\"\"Concrete implementations of Generic TypeDefinitions fill this in\"\"\"\n type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] = dataclasses.field(\n default_factory=dict\n )\n\n # TODO: remove wrapped cls when we \"merge\" this with `StrawberryObject`\n def resolve_generic(self, wrapped_cls: type) -> type:\n from strawberry.annotation import StrawberryAnnotation\n\n passed_types = wrapped_cls.__args__ # type: ignore\n params = wrapped_cls.__origin__.__parameters__ # type: ignore\n\n # Make sure all passed_types are turned into StrawberryTypes\n resolved_types = []\n for passed_type in passed_types:\n resolved_type = StrawberryAnnotation(passed_type).resolve()\n resolved_types.append(resolved_type)\n\n type_var_map = dict(zip(params, resolved_types))\n\n return self.copy_with(type_var_map)\n\n # TODO: Return a StrawberryObject\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> type:\n fields = []\n for field in self.fields:\n # TODO: Logic unnecessary with StrawberryObject\n field_type = field.type\n if hasattr(field_type, \"_type_definition\"):\n field_type = field_type._type_definition # type: ignore\n\n # TODO: All types should end up being StrawberryTypes\n # The first check is here as a symptom of strawberry.ID being a\n # Scalar, but not a StrawberryType\n if isinstance(field_type, StrawberryType) and field_type.is_generic:\n field = field.copy_with(type_var_map)\n\n fields.append(field)\n\n new_type_definition = TypeDefinition(\n name=self.name,\n is_input=self.is_input,\n origin=self.origin,\n is_interface=self.is_interface,\n directives=self.directives,\n interfaces=self.interfaces,\n description=self.description,\n extend=self.extend,\n _fields=fields,\n concrete_of=self,\n type_var_map=type_var_map,\n )\n\n new_type = type(\n new_type_definition.name,\n (self.origin,),\n {\"_type_definition\": new_type_definition},\n )\n\n new_type_definition.origin = new_type\n\n return new_type\n\n def get_field(self, python_name: str) -> Optional[\"StrawberryField\"]:\n return next(\n (field for field in self.fields if field.python_name == python_name), None\n )\n\n @property\n def fields(self) -> List[\"StrawberryField\"]:\n # TODO: rename _fields to fields and remove this property\n return self._fields\n\n @property\n def is_generic(self) -> bool:\n return is_type_generic(self.origin)\n\n @property\n def type_params(self) -> List[TypeVar]:\n type_params: List[TypeVar] = []\n for field in self.fields:\n type_params.extend(field.type_params)\n\n return type_params\n\n def is_implemented_by(self, root: Union[type, dict]) -> bool:\n # TODO: Accept StrawberryObject instead\n # TODO: Support dicts\n if isinstance(root, dict):\n raise NotImplementedError()\n\n type_definition = root._type_definition # type: ignore\n\n if type_definition is self:\n # No generics involved. Exact type match\n return True\n\n if type_definition is not self.concrete_of:\n # Either completely different type, or concrete type of a different generic\n return False\n\n # Check the mapping of all fields' TypeVars\n for generic_field in type_definition.fields:\n generic_field_type = generic_field.type\n if not isinstance(generic_field_type, StrawberryTypeVar):\n continue\n\n # For each TypeVar found, get the expected type from the copy's type map\n expected_concrete_type = self.type_var_map.get(generic_field_type.type_var)\n if expected_concrete_type is None:\n # TODO: Should this return False?\n continue\n\n # Check if the expected type matches the type found on the type_map\n real_concrete_type = type(getattr(root, generic_field.name))\n if real_concrete_type is not expected_concrete_type:\n return False\n\n # All field mappings succeeded. This is a match\n return True\n", "path": "strawberry/types/types.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport dataclasses\nfrom typing import (\n TYPE_CHECKING,\n List,\n Mapping,\n Optional,\n Sequence,\n Type,\n TypeVar,\n Union,\n)\n\nfrom strawberry.type import StrawberryType, StrawberryTypeVar\nfrom strawberry.utils.typing import is_generic as is_type_generic\n\n\nif TYPE_CHECKING:\n from strawberry.field import StrawberryField\n from strawberry.schema_directive import StrawberrySchemaDirective\n\n\[email protected](eq=False)\nclass TypeDefinition(StrawberryType):\n name: str\n is_input: bool\n is_interface: bool\n origin: Type\n description: Optional[str]\n interfaces: List[\"TypeDefinition\"]\n extend: bool\n directives: Optional[Sequence[StrawberrySchemaDirective]]\n\n _fields: List[\"StrawberryField\"]\n\n concrete_of: Optional[\"TypeDefinition\"] = None\n \"\"\"Concrete implementations of Generic TypeDefinitions fill this in\"\"\"\n type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] = dataclasses.field(\n default_factory=dict\n )\n\n # TODO: remove wrapped cls when we \"merge\" this with `StrawberryObject`\n def resolve_generic(self, wrapped_cls: type) -> type:\n from strawberry.annotation import StrawberryAnnotation\n\n passed_types = wrapped_cls.__args__ # type: ignore\n params = wrapped_cls.__origin__.__parameters__ # type: ignore\n\n # Make sure all passed_types are turned into StrawberryTypes\n resolved_types = []\n for passed_type in passed_types:\n resolved_type = StrawberryAnnotation(passed_type).resolve()\n resolved_types.append(resolved_type)\n\n type_var_map = dict(zip(params, resolved_types))\n\n return self.copy_with(type_var_map)\n\n # TODO: Return a StrawberryObject\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> type:\n fields = []\n for field in self.fields:\n # TODO: Logic unnecessary with StrawberryObject\n field_type = field.type\n if hasattr(field_type, \"_type_definition\"):\n field_type = field_type._type_definition # type: ignore\n\n # TODO: All types should end up being StrawberryTypes\n # The first check is here as a symptom of strawberry.ID being a\n # Scalar, but not a StrawberryType\n if isinstance(field_type, StrawberryType) and field_type.is_generic:\n field = field.copy_with(type_var_map)\n\n fields.append(field)\n\n new_type_definition = TypeDefinition(\n name=self.name,\n is_input=self.is_input,\n origin=self.origin,\n is_interface=self.is_interface,\n directives=self.directives,\n interfaces=self.interfaces,\n description=self.description,\n extend=self.extend,\n _fields=fields,\n concrete_of=self,\n type_var_map=type_var_map,\n )\n\n new_type = type(\n new_type_definition.name,\n (self.origin,),\n {\"_type_definition\": new_type_definition},\n )\n\n new_type_definition.origin = new_type\n\n return new_type\n\n def get_field(self, python_name: str) -> Optional[\"StrawberryField\"]:\n return next(\n (field for field in self.fields if field.python_name == python_name), None\n )\n\n @property\n def fields(self) -> List[\"StrawberryField\"]:\n # TODO: rename _fields to fields and remove this property\n return self._fields\n\n @property\n def is_generic(self) -> bool:\n return is_type_generic(self.origin)\n\n @property\n def type_params(self) -> List[TypeVar]:\n type_params: List[TypeVar] = []\n for field in self.fields:\n type_params.extend(field.type_params)\n\n return type_params\n\n def is_implemented_by(self, root: Union[type, dict]) -> bool:\n # TODO: Accept StrawberryObject instead\n # TODO: Support dicts\n if isinstance(root, dict):\n raise NotImplementedError()\n\n type_definition = root._type_definition # type: ignore\n\n if type_definition is self:\n # No generics involved. Exact type match\n return True\n\n if type_definition is not self.concrete_of:\n # Either completely different type, or concrete type of a different generic\n return False\n\n # Check the mapping of all fields' TypeVars\n for generic_field in type_definition.fields:\n generic_field_type = generic_field.type\n if not isinstance(generic_field_type, StrawberryTypeVar):\n continue\n\n # For each TypeVar found, get the expected type from the copy's type map\n expected_concrete_type = self.type_var_map.get(generic_field_type.type_var)\n if expected_concrete_type is None:\n # TODO: Should this return False?\n continue\n\n # Check if the expected type matches the type found on the type_map\n real_concrete_type = type(getattr(root, generic_field.name))\n\n # TODO: uniform type var map, at the moment we map object types\n # to their class (not to TypeDefinition) while we map enum to\n # the EnumDefinition class. This is why we do this check here:\n if hasattr(real_concrete_type, \"_enum_definition\"):\n real_concrete_type = real_concrete_type._enum_definition\n\n if real_concrete_type is not expected_concrete_type:\n return False\n\n # All field mappings succeeded. This is a match\n return True\n", "path": "strawberry/types/types.py"}]} | 1,930 | 187 |
gh_patches_debug_18543 | rasdani/github-patches | git_diff | mne-tools__mne-python-9055 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
use bibtex in multi_comp.py
convert references in `mne/stats/multi_comp.py` to use footcite / footbibliography
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mne/stats/multi_comp.py`
Content:
```
1 # Authors: Josef Pktd and example from H Raja and rewrite from Vincent Davis
2 # Alexandre Gramfort <[email protected]>
3 #
4 # Code borrowed from statsmodels
5 #
6 # License: BSD (3-clause)
7
8 import numpy as np
9
10
11 def _ecdf(x):
12 """No frills empirical cdf used in fdrcorrection."""
13 nobs = len(x)
14 return np.arange(1, nobs + 1) / float(nobs)
15
16
17 def fdr_correction(pvals, alpha=0.05, method='indep'):
18 """P-value correction with False Discovery Rate (FDR).
19
20 Correction for multiple comparison using FDR [1]_.
21
22 This covers Benjamini/Hochberg for independent or positively correlated and
23 Benjamini/Yekutieli for general or negatively correlated tests.
24
25 Parameters
26 ----------
27 pvals : array_like
28 Set of p-values of the individual tests.
29 alpha : float
30 Error rate.
31 method : 'indep' | 'negcorr'
32 If 'indep' it implements Benjamini/Hochberg for independent or if
33 'negcorr' it corresponds to Benjamini/Yekutieli.
34
35 Returns
36 -------
37 reject : array, bool
38 True if a hypothesis is rejected, False if not.
39 pval_corrected : array
40 P-values adjusted for multiple hypothesis testing to limit FDR.
41
42 References
43 ----------
44 .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps
45 in functional neuroimaging using the false discovery rate.
46 Neuroimage. 2002 Apr;15(4):870-8.
47 """
48 pvals = np.asarray(pvals)
49 shape_init = pvals.shape
50 pvals = pvals.ravel()
51
52 pvals_sortind = np.argsort(pvals)
53 pvals_sorted = pvals[pvals_sortind]
54 sortrevind = pvals_sortind.argsort()
55
56 if method in ['i', 'indep', 'p', 'poscorr']:
57 ecdffactor = _ecdf(pvals_sorted)
58 elif method in ['n', 'negcorr']:
59 cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1))
60 ecdffactor = _ecdf(pvals_sorted) / cm
61 else:
62 raise ValueError("Method should be 'indep' and 'negcorr'")
63
64 reject = pvals_sorted < (ecdffactor * alpha)
65 if reject.any():
66 rejectmax = max(np.nonzero(reject)[0])
67 else:
68 rejectmax = 0
69 reject[:rejectmax] = True
70
71 pvals_corrected_raw = pvals_sorted / ecdffactor
72 pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
73 pvals_corrected[pvals_corrected > 1.0] = 1.0
74 pvals_corrected = pvals_corrected[sortrevind].reshape(shape_init)
75 reject = reject[sortrevind].reshape(shape_init)
76 return reject, pvals_corrected
77
78
79 def bonferroni_correction(pval, alpha=0.05):
80 """P-value correction with Bonferroni method.
81
82 Parameters
83 ----------
84 pval : array_like
85 Set of p-values of the individual tests.
86 alpha : float
87 Error rate.
88
89 Returns
90 -------
91 reject : array, bool
92 True if a hypothesis is rejected, False if not.
93 pval_corrected : array
94 P-values adjusted for multiple hypothesis testing to limit FDR.
95 """
96 pval = np.asarray(pval)
97 pval_corrected = pval * float(pval.size)
98 # p-values must not be larger than 1.
99 pval_corrected = pval_corrected.clip(max=1.)
100 reject = pval_corrected < alpha
101 return reject, pval_corrected
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mne/stats/multi_comp.py b/mne/stats/multi_comp.py
--- a/mne/stats/multi_comp.py
+++ b/mne/stats/multi_comp.py
@@ -17,7 +17,7 @@
def fdr_correction(pvals, alpha=0.05, method='indep'):
"""P-value correction with False Discovery Rate (FDR).
- Correction for multiple comparison using FDR [1]_.
+ Correction for multiple comparison using FDR :footcite:`GenoveseEtAl2002`.
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests.
@@ -41,9 +41,7 @@
References
----------
- .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps
- in functional neuroimaging using the false discovery rate.
- Neuroimage. 2002 Apr;15(4):870-8.
+ .. footbibliography::
"""
pvals = np.asarray(pvals)
shape_init = pvals.shape
| {"golden_diff": "diff --git a/mne/stats/multi_comp.py b/mne/stats/multi_comp.py\n--- a/mne/stats/multi_comp.py\n+++ b/mne/stats/multi_comp.py\n@@ -17,7 +17,7 @@\n def fdr_correction(pvals, alpha=0.05, method='indep'):\n \"\"\"P-value correction with False Discovery Rate (FDR).\n \n- Correction for multiple comparison using FDR [1]_.\n+ Correction for multiple comparison using FDR :footcite:`GenoveseEtAl2002`.\n \n This covers Benjamini/Hochberg for independent or positively correlated and\n Benjamini/Yekutieli for general or negatively correlated tests.\n@@ -41,9 +41,7 @@\n \n References\n ----------\n- .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps\n- in functional neuroimaging using the false discovery rate.\n- Neuroimage. 2002 Apr;15(4):870-8.\n+ .. footbibliography::\n \"\"\"\n pvals = np.asarray(pvals)\n shape_init = pvals.shape\n", "issue": "use bibtex in multi_comp.py\nconvert references in `mne/stats/multi_comp.py` to use footcite / footbibliography\r\n\n", "before_files": [{"content": "# Authors: Josef Pktd and example from H Raja and rewrite from Vincent Davis\n# Alexandre Gramfort <[email protected]>\n#\n# Code borrowed from statsmodels\n#\n# License: BSD (3-clause)\n\nimport numpy as np\n\n\ndef _ecdf(x):\n \"\"\"No frills empirical cdf used in fdrcorrection.\"\"\"\n nobs = len(x)\n return np.arange(1, nobs + 1) / float(nobs)\n\n\ndef fdr_correction(pvals, alpha=0.05, method='indep'):\n \"\"\"P-value correction with False Discovery Rate (FDR).\n\n Correction for multiple comparison using FDR [1]_.\n\n This covers Benjamini/Hochberg for independent or positively correlated and\n Benjamini/Yekutieli for general or negatively correlated tests.\n\n Parameters\n ----------\n pvals : array_like\n Set of p-values of the individual tests.\n alpha : float\n Error rate.\n method : 'indep' | 'negcorr'\n If 'indep' it implements Benjamini/Hochberg for independent or if\n 'negcorr' it corresponds to Benjamini/Yekutieli.\n\n Returns\n -------\n reject : array, bool\n True if a hypothesis is rejected, False if not.\n pval_corrected : array\n P-values adjusted for multiple hypothesis testing to limit FDR.\n\n References\n ----------\n .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps\n in functional neuroimaging using the false discovery rate.\n Neuroimage. 2002 Apr;15(4):870-8.\n \"\"\"\n pvals = np.asarray(pvals)\n shape_init = pvals.shape\n pvals = pvals.ravel()\n\n pvals_sortind = np.argsort(pvals)\n pvals_sorted = pvals[pvals_sortind]\n sortrevind = pvals_sortind.argsort()\n\n if method in ['i', 'indep', 'p', 'poscorr']:\n ecdffactor = _ecdf(pvals_sorted)\n elif method in ['n', 'negcorr']:\n cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1))\n ecdffactor = _ecdf(pvals_sorted) / cm\n else:\n raise ValueError(\"Method should be 'indep' and 'negcorr'\")\n\n reject = pvals_sorted < (ecdffactor * alpha)\n if reject.any():\n rejectmax = max(np.nonzero(reject)[0])\n else:\n rejectmax = 0\n reject[:rejectmax] = True\n\n pvals_corrected_raw = pvals_sorted / ecdffactor\n pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]\n pvals_corrected[pvals_corrected > 1.0] = 1.0\n pvals_corrected = pvals_corrected[sortrevind].reshape(shape_init)\n reject = reject[sortrevind].reshape(shape_init)\n return reject, pvals_corrected\n\n\ndef bonferroni_correction(pval, alpha=0.05):\n \"\"\"P-value correction with Bonferroni method.\n\n Parameters\n ----------\n pval : array_like\n Set of p-values of the individual tests.\n alpha : float\n Error rate.\n\n Returns\n -------\n reject : array, bool\n True if a hypothesis is rejected, False if not.\n pval_corrected : array\n P-values adjusted for multiple hypothesis testing to limit FDR.\n \"\"\"\n pval = np.asarray(pval)\n pval_corrected = pval * float(pval.size)\n # p-values must not be larger than 1.\n pval_corrected = pval_corrected.clip(max=1.)\n reject = pval_corrected < alpha\n return reject, pval_corrected\n", "path": "mne/stats/multi_comp.py"}], "after_files": [{"content": "# Authors: Josef Pktd and example from H Raja and rewrite from Vincent Davis\n# Alexandre Gramfort <[email protected]>\n#\n# Code borrowed from statsmodels\n#\n# License: BSD (3-clause)\n\nimport numpy as np\n\n\ndef _ecdf(x):\n \"\"\"No frills empirical cdf used in fdrcorrection.\"\"\"\n nobs = len(x)\n return np.arange(1, nobs + 1) / float(nobs)\n\n\ndef fdr_correction(pvals, alpha=0.05, method='indep'):\n \"\"\"P-value correction with False Discovery Rate (FDR).\n\n Correction for multiple comparison using FDR :footcite:`GenoveseEtAl2002`.\n\n This covers Benjamini/Hochberg for independent or positively correlated and\n Benjamini/Yekutieli for general or negatively correlated tests.\n\n Parameters\n ----------\n pvals : array_like\n Set of p-values of the individual tests.\n alpha : float\n Error rate.\n method : 'indep' | 'negcorr'\n If 'indep' it implements Benjamini/Hochberg for independent or if\n 'negcorr' it corresponds to Benjamini/Yekutieli.\n\n Returns\n -------\n reject : array, bool\n True if a hypothesis is rejected, False if not.\n pval_corrected : array\n P-values adjusted for multiple hypothesis testing to limit FDR.\n\n References\n ----------\n .. footbibliography::\n \"\"\"\n pvals = np.asarray(pvals)\n shape_init = pvals.shape\n pvals = pvals.ravel()\n\n pvals_sortind = np.argsort(pvals)\n pvals_sorted = pvals[pvals_sortind]\n sortrevind = pvals_sortind.argsort()\n\n if method in ['i', 'indep', 'p', 'poscorr']:\n ecdffactor = _ecdf(pvals_sorted)\n elif method in ['n', 'negcorr']:\n cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1))\n ecdffactor = _ecdf(pvals_sorted) / cm\n else:\n raise ValueError(\"Method should be 'indep' and 'negcorr'\")\n\n reject = pvals_sorted < (ecdffactor * alpha)\n if reject.any():\n rejectmax = max(np.nonzero(reject)[0])\n else:\n rejectmax = 0\n reject[:rejectmax] = True\n\n pvals_corrected_raw = pvals_sorted / ecdffactor\n pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]\n pvals_corrected[pvals_corrected > 1.0] = 1.0\n pvals_corrected = pvals_corrected[sortrevind].reshape(shape_init)\n reject = reject[sortrevind].reshape(shape_init)\n return reject, pvals_corrected\n\n\ndef bonferroni_correction(pval, alpha=0.05):\n \"\"\"P-value correction with Bonferroni method.\n\n Parameters\n ----------\n pval : array_like\n Set of p-values of the individual tests.\n alpha : float\n Error rate.\n\n Returns\n -------\n reject : array, bool\n True if a hypothesis is rejected, False if not.\n pval_corrected : array\n P-values adjusted for multiple hypothesis testing to limit FDR.\n \"\"\"\n pval = np.asarray(pval)\n pval_corrected = pval * float(pval.size)\n # p-values must not be larger than 1.\n pval_corrected = pval_corrected.clip(max=1.)\n reject = pval_corrected < alpha\n return reject, pval_corrected\n", "path": "mne/stats/multi_comp.py"}]} | 1,359 | 258 |
gh_patches_debug_30754 | rasdani/github-patches | git_diff | cal-itp__benefits-441 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add logging for OAuth flows
We merged the basic implementation in #414, but neglected to include any additional logging around the new flows/logic.
Some ideas of what we should log:
- [x] The `OAUTH_CLIENT_NAME` used
- [x] The `redirect_uri` sent to the authorization server with the `authorize_redirect` request
- [x] If an access token fails to be authorized
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/oauth/views.py`
Content:
```
1 from django.shortcuts import redirect
2 from django.urls import reverse
3
4 from authlib.integrations.django_client import OAuth
5
6 from benefits.core import session
7 from benefits.settings import OAUTH_CLIENT_NAME
8
9
10 if OAUTH_CLIENT_NAME:
11 _oauth = OAuth()
12 _oauth.register(OAUTH_CLIENT_NAME)
13 oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME)
14
15
16 ROUTE_AUTH = "oauth:authorize"
17 ROUTE_START = "eligibility:start"
18 ROUTE_CONFIRM = "eligibility:confirm"
19
20
21 def login(request):
22 if not oauth_client:
23 raise Exception("No OAuth client")
24
25 route = reverse(ROUTE_AUTH)
26 redirect_uri = request.build_absolute_uri(route)
27
28 return oauth_client.authorize_redirect(request, redirect_uri)
29
30
31 def authorize(request):
32 if not oauth_client:
33 raise Exception("No OAuth client")
34
35 token = oauth_client.authorize_access_token(request)
36
37 if token is None:
38 return redirect(ROUTE_START)
39 else:
40 # we are intentionally not storing anything about the user, including their token
41 session.update(request, auth=True)
42 return redirect(ROUTE_CONFIRM)
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py
--- a/benefits/oauth/views.py
+++ b/benefits/oauth/views.py
@@ -1,3 +1,5 @@
+import logging
+
from django.shortcuts import redirect
from django.urls import reverse
@@ -7,7 +9,12 @@
from benefits.settings import OAUTH_CLIENT_NAME
+logger = logging.getLogger(__name__)
+
+
if OAUTH_CLIENT_NAME:
+ logger.debug(f"Using OAuth client configuration: {OAUTH_CLIENT_NAME}")
+
_oauth = OAuth()
_oauth.register(OAUTH_CLIENT_NAME)
oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME)
@@ -25,6 +32,8 @@
route = reverse(ROUTE_AUTH)
redirect_uri = request.build_absolute_uri(route)
+ logger.debug(f"OAuth authorize_redirect with redirect_uri: {redirect_uri}")
+
return oauth_client.authorize_redirect(request, redirect_uri)
@@ -32,11 +41,14 @@
if not oauth_client:
raise Exception("No OAuth client")
+ logger.debug("Attempting to authorize OAuth access token")
token = oauth_client.authorize_access_token(request)
if token is None:
+ logger.warning("Could not authorize OAuth access token")
return redirect(ROUTE_START)
else:
# we are intentionally not storing anything about the user, including their token
+ logger.debug("OAuth access token authorized")
session.update(request, auth=True)
return redirect(ROUTE_CONFIRM)
| {"golden_diff": "diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py\n--- a/benefits/oauth/views.py\n+++ b/benefits/oauth/views.py\n@@ -1,3 +1,5 @@\n+import logging\n+\n from django.shortcuts import redirect\n from django.urls import reverse\n \n@@ -7,7 +9,12 @@\n from benefits.settings import OAUTH_CLIENT_NAME\n \n \n+logger = logging.getLogger(__name__)\n+\n+\n if OAUTH_CLIENT_NAME:\n+ logger.debug(f\"Using OAuth client configuration: {OAUTH_CLIENT_NAME}\")\n+\n _oauth = OAuth()\n _oauth.register(OAUTH_CLIENT_NAME)\n oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME)\n@@ -25,6 +32,8 @@\n route = reverse(ROUTE_AUTH)\n redirect_uri = request.build_absolute_uri(route)\n \n+ logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n+\n return oauth_client.authorize_redirect(request, redirect_uri)\n \n \n@@ -32,11 +41,14 @@\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n \n+ logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n \n if token is None:\n+ logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n else:\n # we are intentionally not storing anything about the user, including their token\n+ logger.debug(\"OAuth access token authorized\")\n session.update(request, auth=True)\n return redirect(ROUTE_CONFIRM)\n", "issue": "Add logging for OAuth flows\nWe merged the basic implementation in #414, but neglected to include any additional logging around the new flows/logic.\r\n\r\nSome ideas of what we should log:\r\n\r\n- [x] The `OAUTH_CLIENT_NAME` used\r\n- [x] The `redirect_uri` sent to the authorization server with the `authorize_redirect` request\r\n- [x] If an access token fails to be authorized\n", "before_files": [{"content": "from django.shortcuts import redirect\nfrom django.urls import reverse\n\nfrom authlib.integrations.django_client import OAuth\n\nfrom benefits.core import session\nfrom benefits.settings import OAUTH_CLIENT_NAME\n\n\nif OAUTH_CLIENT_NAME:\n _oauth = OAuth()\n _oauth.register(OAUTH_CLIENT_NAME)\n oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\n\n\ndef login(request):\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = request.build_absolute_uri(route)\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\ndef authorize(request):\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n return redirect(ROUTE_START)\n else:\n # we are intentionally not storing anything about the user, including their token\n session.update(request, auth=True)\n return redirect(ROUTE_CONFIRM)\n", "path": "benefits/oauth/views.py"}], "after_files": [{"content": "import logging\n\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\n\nfrom authlib.integrations.django_client import OAuth\n\nfrom benefits.core import session\nfrom benefits.settings import OAUTH_CLIENT_NAME\n\n\nlogger = logging.getLogger(__name__)\n\n\nif OAUTH_CLIENT_NAME:\n logger.debug(f\"Using OAuth client configuration: {OAUTH_CLIENT_NAME}\")\n\n _oauth = OAuth()\n _oauth.register(OAUTH_CLIENT_NAME)\n oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\n\n\ndef login(request):\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = request.build_absolute_uri(route)\n\n logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\ndef authorize(request):\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n\n logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n else:\n # we are intentionally not storing anything about the user, including their token\n logger.debug(\"OAuth access token authorized\")\n session.update(request, auth=True)\n return redirect(ROUTE_CONFIRM)\n", "path": "benefits/oauth/views.py"}]} | 662 | 330 |
gh_patches_debug_13770 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1435 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RI: state legislator first names contain middle initials
For example, https://openstates.org/api/v1/legislators/RIL000179/ has first_name of "Moira J." and nothing in middle_name for Moira J. Walsh.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/ri/legislators.py`
Content:
```
1 import re
2 import string
3
4 from billy.scrape import NoDataForPeriod
5 from billy.scrape.legislators import LegislatorScraper, Legislator
6 from openstates.utils import LXMLMixin
7
8 import lxml.html
9 import xlrd
10
11 excel_mapping = {
12 'district': 0,
13 'town_represented': 1,
14 'full_name': 2,
15 'party': 3,
16 'address': 4,
17 'email': 5,
18 }
19
20 class RILegislatorScraper(LegislatorScraper, LXMLMixin):
21 jurisdiction = 'ri'
22 latest_only = True
23
24 def scrape(self, chamber, term):
25 if chamber == 'upper':
26 url = ('http://webserver.rilin.state.ri.us/Documents/Senators.xls')
27 rep_type = 'Senator'
28 source_url = 'http://www.rilin.state.ri.us/senators/default.aspx'
29 source_url_title_replacement = rep_type
30 contact_url = 'http://webserver.rilin.state.ri.us/Email/SenEmailListDistrict.asp'
31 elif chamber == 'lower':
32 url = ('http://webserver.rilin.state.ri.us/Documents/Representatives.xls')
33 rep_type = 'Representative'
34 source_url = 'http://www.rilin.state.ri.us/representatives/default.aspx'
35 source_url_title_replacement = 'Rep. '
36 contact_url = 'http://webserver.rilin.state.ri.us/Email/RepEmailListDistrict.asp'
37
38 self.urlretrieve(url, 'ri_leg.xls')
39
40 wb = xlrd.open_workbook('ri_leg.xls')
41 sh = wb.sheet_by_index(0)
42
43 # This isn't perfect but it's cheap and better than using the
44 # XLS doc as the source URL for all legislators.
45 # 374: RI: legislator url
46 leg_source_url_map = {}
47 leg_page = self.lxmlize(source_url)
48
49 for link in leg_page.xpath('//td[@class="ms-vb2"]'):
50 leg_name = link.text_content().replace(source_url_title_replacement,'')
51 leg_url = link.xpath("..//a")[0].attrib['href']
52 leg_source_url_map[leg_name] = leg_url
53
54 for rownum in xrange(1, sh.nrows):
55 d = {}
56 for field, col_num in excel_mapping.iteritems():
57 d[field] = sh.cell(rownum, col_num).value
58
59 if d['full_name'].upper() == "VACANT":
60 self.warning(
61 "District {}'s seat is vacant".format(int(d['district'])))
62 continue
63
64 slug = re.match(
65 "(?P<class>sen|rep)-(?P<slug>.*)@(rilin\.state\.ri\.us|rilegislature\.gov)", d['email']
66 )
67
68 if 'asp' in d['email']:
69 d['email'] = None
70
71 if d['email'] is not None:
72 info = slug.groupdict()
73 info['chamber'] = "senators" if info['class'] == 'sen' else "representatives"
74
75 url = ("http://www.rilin.state.ri.us/{chamber}/"
76 "{slug}/Pages/Biography.aspx".format(**info))
77
78 dist = str(int(d['district']))
79 district_name = dist
80
81 assert d['full_name'].startswith(rep_type), "Improper name found"
82 full_name = re.sub(r"^{}(?=\s?[A-Z].*$)".format(rep_type), '', d['full_name']).strip()
83 translate = {
84 "Democrat" : "Democratic",
85 "Republican" : "Republican",
86 "Independent" : "Independent"
87 }
88
89 homepage_url = None
90 url_names = lxml.html.fromstring(self.get(source_url).text)
91 url_names = url_names.xpath('//td[@class="ms-vb2"]/a/@href')
92 modified_name = re.sub(r'[^\w\s]', '', full_name)
93 modified_name = modified_name.replace(' ', '').strip('').lower()
94
95 for el in url_names:
96 if 'default.aspx' in el:
97 el = el.replace('default.aspx', '')
98 el = el.strip('')
99 if el[-1] == '/':
100 el = el[:-1]
101 el = el.lower()
102 url_name_array = el.split('/')
103 if url_name_array[-1] in modified_name:
104 #remove '/default.aspx' and add last name
105 homepage_url = source_url[:-12] + url_name_array[-1]
106
107 kwargs = {
108 "town_represented": d['town_represented'],
109 }
110
111 contact = self.lxmlize(contact_url)
112 contact_phone = contact.xpath('//tr[@valign="TOP"]//td[@class="bodyCopy"]/text() | //td[@class="bodyCopy"]//center/text()')
113
114 phone = None
115 for el in contact_phone:
116 if len(el) <= 2 and dist == el:
117 number = contact_phone.index(el)
118 phone = contact_phone[number + 2]
119 phone = phone.strip()
120
121 email = None
122 if d['email'] is not None:
123 email = d['email']
124
125 if homepage_url is not None:
126 kwargs['url'] = homepage_url
127
128 if d['address'] is '':
129 d['address'] = 'No Address Found'
130
131 leg = Legislator(term, chamber, district_name, full_name,
132 '', '', '',
133 translate[d['party']],
134 **kwargs)
135
136 leg.add_office('district', 'Dictrict Office', address=d['address'], phone=phone, email=email)
137 leg.add_source(source_url)
138 leg.add_source(contact_url)
139 if homepage_url:
140 leg.add_source(homepage_url)
141 self.save_legislator(leg)
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/ri/legislators.py b/openstates/ri/legislators.py
--- a/openstates/ri/legislators.py
+++ b/openstates/ri/legislators.py
@@ -128,8 +128,14 @@
if d['address'] is '':
d['address'] = 'No Address Found'
+ # RI is very fond of First M. Last name formats and
+ # they're being misparsed upstream, so fix here
+ (first, middle, last) = ('','','')
+ if re.match(r'^\S+\s[A-Z]\.\s\S+$', full_name):
+ (first, middle, last) = full_name.split()
+
leg = Legislator(term, chamber, district_name, full_name,
- '', '', '',
+ first, last, middle,
translate[d['party']],
**kwargs)
| {"golden_diff": "diff --git a/openstates/ri/legislators.py b/openstates/ri/legislators.py\n--- a/openstates/ri/legislators.py\n+++ b/openstates/ri/legislators.py\n@@ -128,8 +128,14 @@\n if d['address'] is '':\n d['address'] = 'No Address Found'\n \n+ # RI is very fond of First M. Last name formats and\n+ # they're being misparsed upstream, so fix here\n+ (first, middle, last) = ('','','')\n+ if re.match(r'^\\S+\\s[A-Z]\\.\\s\\S+$', full_name):\n+ (first, middle, last) = full_name.split()\n+ \n leg = Legislator(term, chamber, district_name, full_name,\n- '', '', '',\n+ first, last, middle,\n translate[d['party']],\n **kwargs)\n", "issue": "RI: state legislator first names contain middle initials\nFor example, https://openstates.org/api/v1/legislators/RIL000179/ has first_name of \"Moira J.\" and nothing in middle_name for Moira J. Walsh.\n", "before_files": [{"content": "import re\nimport string\n\nfrom billy.scrape import NoDataForPeriod\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin\n\nimport lxml.html\nimport xlrd\n\nexcel_mapping = {\n 'district': 0,\n 'town_represented': 1,\n 'full_name': 2,\n 'party': 3,\n 'address': 4,\n 'email': 5,\n}\n\nclass RILegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'ri'\n latest_only = True\n\n def scrape(self, chamber, term):\n if chamber == 'upper':\n url = ('http://webserver.rilin.state.ri.us/Documents/Senators.xls')\n rep_type = 'Senator'\n source_url = 'http://www.rilin.state.ri.us/senators/default.aspx'\n source_url_title_replacement = rep_type\n contact_url = 'http://webserver.rilin.state.ri.us/Email/SenEmailListDistrict.asp'\n elif chamber == 'lower':\n url = ('http://webserver.rilin.state.ri.us/Documents/Representatives.xls')\n rep_type = 'Representative'\n source_url = 'http://www.rilin.state.ri.us/representatives/default.aspx'\n source_url_title_replacement = 'Rep. '\n contact_url = 'http://webserver.rilin.state.ri.us/Email/RepEmailListDistrict.asp'\n\n self.urlretrieve(url, 'ri_leg.xls')\n\n wb = xlrd.open_workbook('ri_leg.xls')\n sh = wb.sheet_by_index(0)\n\n # This isn't perfect but it's cheap and better than using the\n # XLS doc as the source URL for all legislators.\n # 374: RI: legislator url\n leg_source_url_map = {}\n leg_page = self.lxmlize(source_url)\n\n for link in leg_page.xpath('//td[@class=\"ms-vb2\"]'):\n leg_name = link.text_content().replace(source_url_title_replacement,'')\n leg_url = link.xpath(\"..//a\")[0].attrib['href']\n leg_source_url_map[leg_name] = leg_url\n\n for rownum in xrange(1, sh.nrows):\n d = {}\n for field, col_num in excel_mapping.iteritems():\n d[field] = sh.cell(rownum, col_num).value\n\n if d['full_name'].upper() == \"VACANT\":\n self.warning(\n \"District {}'s seat is vacant\".format(int(d['district'])))\n continue\n\n slug = re.match(\n \"(?P<class>sen|rep)-(?P<slug>.*)@(rilin\\.state\\.ri\\.us|rilegislature\\.gov)\", d['email']\n )\n \n if 'asp' in d['email']:\n d['email'] = None\n\n if d['email'] is not None:\n info = slug.groupdict()\n info['chamber'] = \"senators\" if info['class'] == 'sen' else \"representatives\"\n\n url = (\"http://www.rilin.state.ri.us/{chamber}/\"\n \"{slug}/Pages/Biography.aspx\".format(**info))\n\n dist = str(int(d['district']))\n district_name = dist\n\n assert d['full_name'].startswith(rep_type), \"Improper name found\"\n full_name = re.sub(r\"^{}(?=\\s?[A-Z].*$)\".format(rep_type), '', d['full_name']).strip()\n translate = {\n \"Democrat\" : \"Democratic\",\n \"Republican\" : \"Republican\",\n \"Independent\" : \"Independent\"\n }\n\n homepage_url = None\n url_names = lxml.html.fromstring(self.get(source_url).text)\n url_names = url_names.xpath('//td[@class=\"ms-vb2\"]/a/@href')\n modified_name = re.sub(r'[^\\w\\s]', '', full_name)\n modified_name = modified_name.replace(' ', '').strip('').lower()\n\n for el in url_names:\n if 'default.aspx' in el:\n el = el.replace('default.aspx', '')\n el = el.strip('')\n if el[-1] == '/':\n el = el[:-1]\n el = el.lower()\n url_name_array = el.split('/')\n if url_name_array[-1] in modified_name:\n #remove '/default.aspx' and add last name\n homepage_url = source_url[:-12] + url_name_array[-1]\n\n kwargs = {\n \"town_represented\": d['town_represented'],\n }\n\n contact = self.lxmlize(contact_url)\n contact_phone = contact.xpath('//tr[@valign=\"TOP\"]//td[@class=\"bodyCopy\"]/text() | //td[@class=\"bodyCopy\"]//center/text()')\n\n phone = None\n for el in contact_phone:\n if len(el) <= 2 and dist == el:\n number = contact_phone.index(el)\n phone = contact_phone[number + 2]\n phone = phone.strip()\n\n email = None\n if d['email'] is not None:\n email = d['email']\n\n if homepage_url is not None:\n kwargs['url'] = homepage_url\n\n if d['address'] is '':\n d['address'] = 'No Address Found'\n\n leg = Legislator(term, chamber, district_name, full_name,\n '', '', '',\n translate[d['party']],\n **kwargs)\n\n leg.add_office('district', 'Dictrict Office', address=d['address'], phone=phone, email=email)\n leg.add_source(source_url)\n leg.add_source(contact_url)\n if homepage_url:\n leg.add_source(homepage_url)\n self.save_legislator(leg)\n", "path": "openstates/ri/legislators.py"}], "after_files": [{"content": "import re\nimport string\n\nfrom billy.scrape import NoDataForPeriod\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin\n\nimport lxml.html\nimport xlrd\n\nexcel_mapping = {\n 'district': 0,\n 'town_represented': 1,\n 'full_name': 2,\n 'party': 3,\n 'address': 4,\n 'email': 5,\n}\n\nclass RILegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'ri'\n latest_only = True\n\n def scrape(self, chamber, term):\n if chamber == 'upper':\n url = ('http://webserver.rilin.state.ri.us/Documents/Senators.xls')\n rep_type = 'Senator'\n source_url = 'http://www.rilin.state.ri.us/senators/default.aspx'\n source_url_title_replacement = rep_type\n contact_url = 'http://webserver.rilin.state.ri.us/Email/SenEmailListDistrict.asp'\n elif chamber == 'lower':\n url = ('http://webserver.rilin.state.ri.us/Documents/Representatives.xls')\n rep_type = 'Representative'\n source_url = 'http://www.rilin.state.ri.us/representatives/default.aspx'\n source_url_title_replacement = 'Rep. '\n contact_url = 'http://webserver.rilin.state.ri.us/Email/RepEmailListDistrict.asp'\n\n self.urlretrieve(url, 'ri_leg.xls')\n\n wb = xlrd.open_workbook('ri_leg.xls')\n sh = wb.sheet_by_index(0)\n\n # This isn't perfect but it's cheap and better than using the\n # XLS doc as the source URL for all legislators.\n # 374: RI: legislator url\n leg_source_url_map = {}\n leg_page = self.lxmlize(source_url)\n\n for link in leg_page.xpath('//td[@class=\"ms-vb2\"]'):\n leg_name = link.text_content().replace(source_url_title_replacement,'')\n leg_url = link.xpath(\"..//a\")[0].attrib['href']\n leg_source_url_map[leg_name] = leg_url\n\n for rownum in xrange(1, sh.nrows):\n d = {}\n for field, col_num in excel_mapping.iteritems():\n d[field] = sh.cell(rownum, col_num).value\n\n if d['full_name'].upper() == \"VACANT\":\n self.warning(\n \"District {}'s seat is vacant\".format(int(d['district'])))\n continue\n\n slug = re.match(\n \"(?P<class>sen|rep)-(?P<slug>.*)@(rilin\\.state\\.ri\\.us|rilegislature\\.gov)\", d['email']\n )\n \n if 'asp' in d['email']:\n d['email'] = None\n\n if d['email'] is not None:\n info = slug.groupdict()\n info['chamber'] = \"senators\" if info['class'] == 'sen' else \"representatives\"\n\n url = (\"http://www.rilin.state.ri.us/{chamber}/\"\n \"{slug}/Pages/Biography.aspx\".format(**info))\n\n dist = str(int(d['district']))\n district_name = dist\n\n assert d['full_name'].startswith(rep_type), \"Improper name found\"\n full_name = re.sub(r\"^{}(?=\\s?[A-Z].*$)\".format(rep_type), '', d['full_name']).strip()\n translate = {\n \"Democrat\" : \"Democratic\",\n \"Republican\" : \"Republican\",\n \"Independent\" : \"Independent\"\n }\n\n homepage_url = None\n url_names = lxml.html.fromstring(self.get(source_url).text)\n url_names = url_names.xpath('//td[@class=\"ms-vb2\"]/a/@href')\n modified_name = re.sub(r'[^\\w\\s]', '', full_name)\n modified_name = modified_name.replace(' ', '').strip('').lower()\n\n for el in url_names:\n if 'default.aspx' in el:\n el = el.replace('default.aspx', '')\n el = el.strip('')\n if el[-1] == '/':\n el = el[:-1]\n el = el.lower()\n url_name_array = el.split('/')\n if url_name_array[-1] in modified_name:\n #remove '/default.aspx' and add last name\n homepage_url = source_url[:-12] + url_name_array[-1]\n\n kwargs = {\n \"town_represented\": d['town_represented'],\n }\n\n contact = self.lxmlize(contact_url)\n contact_phone = contact.xpath('//tr[@valign=\"TOP\"]//td[@class=\"bodyCopy\"]/text() | //td[@class=\"bodyCopy\"]//center/text()')\n\n phone = None\n for el in contact_phone:\n if len(el) <= 2 and dist == el:\n number = contact_phone.index(el)\n phone = contact_phone[number + 2]\n phone = phone.strip()\n\n email = None\n if d['email'] is not None:\n email = d['email']\n\n if homepage_url is not None:\n kwargs['url'] = homepage_url\n\n if d['address'] is '':\n d['address'] = 'No Address Found'\n\n # RI is very fond of First M. Last name formats and\n # they're being misparsed upstream, so fix here\n (first, middle, last) = ('','','')\n if re.match(r'^\\S+\\s[A-Z]\\.\\s\\S+$', full_name):\n (first, middle, last) = full_name.split()\n \n leg = Legislator(term, chamber, district_name, full_name,\n first, last, middle,\n translate[d['party']],\n **kwargs)\n\n leg.add_office('district', 'Dictrict Office', address=d['address'], phone=phone, email=email)\n leg.add_source(source_url)\n leg.add_source(contact_url)\n if homepage_url:\n leg.add_source(homepage_url)\n self.save_legislator(leg)\n", "path": "openstates/ri/legislators.py"}]} | 1,904 | 203 |
gh_patches_debug_1787 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-9068 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CT-3377] [Regression] `dbt deps` fails on tarball dependencies
### Is this a regression in a recent version of dbt-core?
- [X] I believe this is a regression in dbt-core functionality
- [X] I have searched the existing issues, and I could not find an existing issue for this regression
### Current Behavior
When `dependencies.yml` includes a tarball dependency, I get an error message from `dbt deps`:
```
11:18:06 Running with dbt=1.7.1
11:18:06 Updating lock file in file path: /workspace/dbt-deps-tarball-failure/asdf/package-lock.yml
11:18:06 Encountered an error:
Runtime Error
The packages.yml file in this project is malformed. Please double check
the contents of this file and fix any errors before retrying.
You can find more information on the syntax for this file here:
https://docs.getdbt.com/docs/package-management
Validator Error:
dbt_utils was not found in the package index. Packages on the index require a namespace, e.g dbt-labs/dbt_utils
```
### Expected/Previous Behavior
Expected output:
```
11:27:03 Running with dbt=1.6.8
11:27:03 Installing dbt_utils
11:27:03 Installed from tarball (url: https://codeload.github.com/dbt-labs/dbt-utils/tar.gz/0.9.6)
```
The validator should
- not check the index for tarball dependencies
- not validate the `namespace/package-name` for tarball dependencies
- mention the correct filename (this is a minor thing)
### Steps To Reproduce
1. In a new dbt project
2. With the following `dependencies.yml`:
```yaml
packages:
- tarball: https://codeload.github.com/dbt-labs/dbt-utils/tar.gz/0.9.6
name: 'dbt_utils'
```
3. Run `dbt deps`
4. See error message above
### Relevant log output
_No response_
### Environment
```markdown
- OS: Ubuntu 22.04.3
- Python: 3.11.1
- dbt-core (latest working version): 1.6.8
- dbt-core (earliest regression version): 1.7.0
- dbt-core (latest version): 1.7.1
```
### Which database adapter are you using with dbt?
_No response_
### Additional Context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/dbt/deps/tarball.py`
Content:
```
1 from typing import Dict
2
3 from dbt.contracts.project import RegistryPackageMetadata, TarballPackage
4 from dbt.deps.base import PinnedPackage, UnpinnedPackage
5
6
7 class TarballPackageMixin:
8 def __init__(self, tarball: str) -> None:
9 super().__init__()
10 self.tarball = tarball
11
12 @property
13 def name(self):
14 return self.tarball
15
16 def source_type(self) -> str:
17 return "tarball"
18
19
20 class TarballPinnedPackage(TarballPackageMixin, PinnedPackage):
21 def __init__(self, tarball: str, package: str) -> None:
22 super().__init__(tarball)
23 # setup to recycle RegistryPinnedPackage fns
24 self.package = package
25 self.version = "tarball"
26
27 @property
28 def name(self):
29 return self.package
30
31 def to_dict(self) -> Dict[str, str]:
32 return {
33 "tarball": self.tarball,
34 "version": self.version,
35 "package": self.package,
36 }
37
38 def get_version(self):
39 return self.version
40
41 def nice_version_name(self):
42 return f"tarball (url: {self.tarball})"
43
44 def _fetch_metadata(self, project, renderer):
45 """
46 recycle RegistryPackageMetadata so that we can use the install and
47 download_and_untar from RegistryPinnedPackage next.
48 build RegistryPackageMetadata from info passed via packages.yml since no
49 'metadata' service exists in this case.
50 """
51
52 dct = {
53 "name": self.package,
54 "packages": [], # note: required by RegistryPackageMetadata
55 "downloads": {"tarball": self.tarball},
56 }
57
58 return RegistryPackageMetadata.from_dict(dct)
59
60 def install(self, project, renderer):
61 self._install(project, renderer)
62
63
64 class TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]):
65 def __init__(
66 self,
67 tarball: str,
68 package: str,
69 ) -> None:
70 super().__init__(tarball)
71 # setup to recycle RegistryPinnedPackage fns
72 self.package = package
73 self.version = "tarball"
74
75 @classmethod
76 def from_contract(cls, contract: TarballPackage) -> "TarballUnpinnedPackage":
77 return cls(tarball=contract.tarball, package=contract.name)
78
79 def incorporate(self, other: "TarballUnpinnedPackage") -> "TarballUnpinnedPackage":
80 return TarballUnpinnedPackage(tarball=self.tarball, package=self.package)
81
82 def resolved(self) -> TarballPinnedPackage:
83 return TarballPinnedPackage(tarball=self.tarball, package=self.package)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/dbt/deps/tarball.py b/core/dbt/deps/tarball.py
--- a/core/dbt/deps/tarball.py
+++ b/core/dbt/deps/tarball.py
@@ -31,8 +31,7 @@
def to_dict(self) -> Dict[str, str]:
return {
"tarball": self.tarball,
- "version": self.version,
- "package": self.package,
+ "name": self.package,
}
def get_version(self):
| {"golden_diff": "diff --git a/core/dbt/deps/tarball.py b/core/dbt/deps/tarball.py\n--- a/core/dbt/deps/tarball.py\n+++ b/core/dbt/deps/tarball.py\n@@ -31,8 +31,7 @@\n def to_dict(self) -> Dict[str, str]:\n return {\n \"tarball\": self.tarball,\n- \"version\": self.version,\n- \"package\": self.package,\n+ \"name\": self.package,\n }\n \n def get_version(self):\n", "issue": "[CT-3377] [Regression] `dbt deps` fails on tarball dependencies\n### Is this a regression in a recent version of dbt-core?\n\n- [X] I believe this is a regression in dbt-core functionality\n- [X] I have searched the existing issues, and I could not find an existing issue for this regression\n\n### Current Behavior\n\nWhen `dependencies.yml` includes a tarball dependency, I get an error message from `dbt deps`:\r\n\r\n```\r\n11:18:06 Running with dbt=1.7.1\r\n11:18:06 Updating lock file in file path: /workspace/dbt-deps-tarball-failure/asdf/package-lock.yml\r\n11:18:06 Encountered an error:\r\nRuntime Error\r\n The packages.yml file in this project is malformed. Please double check\r\n the contents of this file and fix any errors before retrying.\r\n \r\n You can find more information on the syntax for this file here:\r\n https://docs.getdbt.com/docs/package-management\r\n \r\n Validator Error:\r\n dbt_utils was not found in the package index. Packages on the index require a namespace, e.g dbt-labs/dbt_utils\r\n```\n\n### Expected/Previous Behavior\n\nExpected output:\r\n```\r\n11:27:03 Running with dbt=1.6.8\r\n11:27:03 Installing dbt_utils\r\n11:27:03 Installed from tarball (url: https://codeload.github.com/dbt-labs/dbt-utils/tar.gz/0.9.6)\r\n```\r\n\r\nThe validator should \r\n- not check the index for tarball dependencies\r\n- not validate the `namespace/package-name` for tarball dependencies\r\n- mention the correct filename (this is a minor thing)\n\n### Steps To Reproduce\n\n1. In a new dbt project\r\n2. With the following `dependencies.yml`:\r\n```yaml\r\npackages:\r\n - tarball: https://codeload.github.com/dbt-labs/dbt-utils/tar.gz/0.9.6\r\n name: 'dbt_utils'\r\n```\r\n3. Run `dbt deps`\r\n4. See error message above\n\n### Relevant log output\n\n_No response_\n\n### Environment\n\n```markdown\n- OS: Ubuntu 22.04.3\r\n- Python: 3.11.1\r\n- dbt-core (latest working version): 1.6.8\r\n- dbt-core (earliest regression version): 1.7.0\r\n- dbt-core (latest version): 1.7.1\n```\n\n\n### Which database adapter are you using with dbt?\n\n_No response_\n\n### Additional Context\n\n_No response_\n", "before_files": [{"content": "from typing import Dict\n\nfrom dbt.contracts.project import RegistryPackageMetadata, TarballPackage\nfrom dbt.deps.base import PinnedPackage, UnpinnedPackage\n\n\nclass TarballPackageMixin:\n def __init__(self, tarball: str) -> None:\n super().__init__()\n self.tarball = tarball\n\n @property\n def name(self):\n return self.tarball\n\n def source_type(self) -> str:\n return \"tarball\"\n\n\nclass TarballPinnedPackage(TarballPackageMixin, PinnedPackage):\n def __init__(self, tarball: str, package: str) -> None:\n super().__init__(tarball)\n # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n\n @property\n def name(self):\n return self.package\n\n def to_dict(self) -> Dict[str, str]:\n return {\n \"tarball\": self.tarball,\n \"version\": self.version,\n \"package\": self.package,\n }\n\n def get_version(self):\n return self.version\n\n def nice_version_name(self):\n return f\"tarball (url: {self.tarball})\"\n\n def _fetch_metadata(self, project, renderer):\n \"\"\"\n recycle RegistryPackageMetadata so that we can use the install and\n download_and_untar from RegistryPinnedPackage next.\n build RegistryPackageMetadata from info passed via packages.yml since no\n 'metadata' service exists in this case.\n \"\"\"\n\n dct = {\n \"name\": self.package,\n \"packages\": [], # note: required by RegistryPackageMetadata\n \"downloads\": {\"tarball\": self.tarball},\n }\n\n return RegistryPackageMetadata.from_dict(dct)\n\n def install(self, project, renderer):\n self._install(project, renderer)\n\n\nclass TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]):\n def __init__(\n self,\n tarball: str,\n package: str,\n ) -> None:\n super().__init__(tarball)\n # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n\n @classmethod\n def from_contract(cls, contract: TarballPackage) -> \"TarballUnpinnedPackage\":\n return cls(tarball=contract.tarball, package=contract.name)\n\n def incorporate(self, other: \"TarballUnpinnedPackage\") -> \"TarballUnpinnedPackage\":\n return TarballUnpinnedPackage(tarball=self.tarball, package=self.package)\n\n def resolved(self) -> TarballPinnedPackage:\n return TarballPinnedPackage(tarball=self.tarball, package=self.package)\n", "path": "core/dbt/deps/tarball.py"}], "after_files": [{"content": "from typing import Dict\n\nfrom dbt.contracts.project import RegistryPackageMetadata, TarballPackage\nfrom dbt.deps.base import PinnedPackage, UnpinnedPackage\n\n\nclass TarballPackageMixin:\n def __init__(self, tarball: str) -> None:\n super().__init__()\n self.tarball = tarball\n\n @property\n def name(self):\n return self.tarball\n\n def source_type(self) -> str:\n return \"tarball\"\n\n\nclass TarballPinnedPackage(TarballPackageMixin, PinnedPackage):\n def __init__(self, tarball: str, package: str) -> None:\n super().__init__(tarball)\n # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n\n @property\n def name(self):\n return self.package\n\n def to_dict(self) -> Dict[str, str]:\n return {\n \"tarball\": self.tarball,\n \"name\": self.package,\n }\n\n def get_version(self):\n return self.version\n\n def nice_version_name(self):\n return f\"tarball (url: {self.tarball})\"\n\n def _fetch_metadata(self, project, renderer):\n \"\"\"\n recycle RegistryPackageMetadata so that we can use the install and\n download_and_untar from RegistryPinnedPackage next.\n build RegistryPackageMetadata from info passed via packages.yml since no\n 'metadata' service exists in this case.\n \"\"\"\n\n dct = {\n \"name\": self.package,\n \"packages\": [], # note: required by RegistryPackageMetadata\n \"downloads\": {\"tarball\": self.tarball},\n }\n\n return RegistryPackageMetadata.from_dict(dct)\n\n def install(self, project, renderer):\n self._install(project, renderer)\n\n\nclass TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]):\n def __init__(\n self,\n tarball: str,\n package: str,\n ) -> None:\n super().__init__(tarball)\n # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n\n @classmethod\n def from_contract(cls, contract: TarballPackage) -> \"TarballUnpinnedPackage\":\n return cls(tarball=contract.tarball, package=contract.name)\n\n def incorporate(self, other: \"TarballUnpinnedPackage\") -> \"TarballUnpinnedPackage\":\n return TarballUnpinnedPackage(tarball=self.tarball, package=self.package)\n\n def resolved(self) -> TarballPinnedPackage:\n return TarballPinnedPackage(tarball=self.tarball, package=self.package)\n", "path": "core/dbt/deps/tarball.py"}]} | 1,614 | 117 |
gh_patches_debug_38546 | rasdani/github-patches | git_diff | beetbox__beets-1129 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
zero: Permit stripping album art
It would be nice to have the option of automatically clearing embedded art when an item is imported. Whether or not a media item actually contains embedded art, beets should ensure the resulting media item has no embedded art after being import. There are two plugins which would offer a good place of implementation for this feature: the EmbedArt and the Zero plugins.
The EmbedArt plugin already supports a command called `clearart` which allows for the manual stripping of embedded art from items which match a query. Since the the `clearart` operation is not automatic and there is no option for automation, an extra step is required on the importation of media.
What probably makes more sense is implementing support for the art field in the Zero plugin. It can only be assumed that people who would use such a feature already have the Zero plugin deployed for clearing other fields. That said, it would require less configuration as all a user would need to do is drop the art field in their configuration for the Zero plugin. Moreover, with the EmbedArt plugin, it embeds art into media items by default. This feature would need to be disabled in the configuration as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/zero.py`
Content:
```
1 # This file is part of beets.
2 # Copyright 2013, Blemjhoo Tezoulbr <[email protected]>.
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """ Clears tag fields in media files."""
16
17 import re
18 import logging
19 from beets.plugins import BeetsPlugin
20 from beets.library import Item
21 from beets.importer import action
22 from beets.util import confit
23
24 __author__ = '[email protected]'
25 __version__ = '0.10'
26
27 log = logging.getLogger('beets')
28
29
30 class ZeroPlugin(BeetsPlugin):
31
32 _instance = None
33
34 def __init__(self):
35 super(ZeroPlugin, self).__init__()
36
37 # Listeners.
38 self.register_listener('write', self.write_event)
39 self.register_listener('import_task_choice',
40 self.import_task_choice_event)
41
42 self.config.add({
43 'fields': [],
44 })
45
46 self.patterns = {}
47 self.warned = False
48
49 for field in self.config['fields'].as_str_seq():
50 if field in ('id', 'path', 'album_id'):
51 log.warn(u'[zero] field \'{0}\' ignored, zeroing '
52 u'it would be dangerous'.format(field))
53 continue
54 if field not in Item._fields.keys():
55 log.error(u'[zero] invalid field: {0}'.format(field))
56 continue
57
58 try:
59 self.patterns[field] = self.config[field].as_str_seq()
60 except confit.NotFoundError:
61 # Matches everything
62 self.patterns[field] = [u'']
63
64 def import_task_choice_event(self, session, task):
65 """Listen for import_task_choice event."""
66 if task.choice_flag == action.ASIS and not self.warned:
67 log.warn(u'[zero] cannot zero in \"as-is\" mode')
68 self.warned = True
69 # TODO request write in as-is mode
70
71 @classmethod
72 def match_patterns(cls, field, patterns):
73 """Check if field (as string) is matching any of the patterns in
74 the list.
75 """
76 for p in patterns:
77 if re.search(p, unicode(field), flags=re.IGNORECASE):
78 return True
79 return False
80
81 def write_event(self, item, path, tags):
82 """Listen for write event."""
83 if not self.patterns:
84 log.warn(u'[zero] no fields, nothing to do')
85 return
86
87 for field, patterns in self.patterns.items():
88 if field not in tags:
89 log.error(u'[zero] no such field: {0}'.format(field))
90 continue
91
92 value = tags[field]
93 if self.match_patterns(value, patterns):
94 log.debug(u'[zero] {0}: {1} -> None'.format(field, value))
95 tags[field] = None
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/zero.py b/beetsplug/zero.py
--- a/beetsplug/zero.py
+++ b/beetsplug/zero.py
@@ -17,7 +17,7 @@
import re
import logging
from beets.plugins import BeetsPlugin
-from beets.library import Item
+from beets.mediafile import MediaFile
from beets.importer import action
from beets.util import confit
@@ -51,7 +51,7 @@
log.warn(u'[zero] field \'{0}\' ignored, zeroing '
u'it would be dangerous'.format(field))
continue
- if field not in Item._fields.keys():
+ if field not in MediaFile.fields():
log.error(u'[zero] invalid field: {0}'.format(field))
continue
@@ -59,7 +59,7 @@
self.patterns[field] = self.config[field].as_str_seq()
except confit.NotFoundError:
# Matches everything
- self.patterns[field] = [u'']
+ self.patterns[field] = True
def import_task_choice_event(self, session, task):
"""Listen for import_task_choice event."""
@@ -73,23 +73,29 @@
"""Check if field (as string) is matching any of the patterns in
the list.
"""
+ if patterns is True:
+ return True
for p in patterns:
if re.search(p, unicode(field), flags=re.IGNORECASE):
return True
return False
def write_event(self, item, path, tags):
- """Listen for write event."""
+ """Set values in tags to `None` if the key and value are matched
+ by `self.patterns`.
+ """
if not self.patterns:
log.warn(u'[zero] no fields, nothing to do')
return
for field, patterns in self.patterns.items():
- if field not in tags:
- log.error(u'[zero] no such field: {0}'.format(field))
- continue
-
- value = tags[field]
- if self.match_patterns(value, patterns):
+ if field in tags:
+ value = tags[field]
+ match = self.match_patterns(tags[field], patterns)
+ else:
+ value = ''
+ match = patterns is True
+
+ if match:
log.debug(u'[zero] {0}: {1} -> None'.format(field, value))
tags[field] = None
| {"golden_diff": "diff --git a/beetsplug/zero.py b/beetsplug/zero.py\n--- a/beetsplug/zero.py\n+++ b/beetsplug/zero.py\n@@ -17,7 +17,7 @@\n import re\n import logging\n from beets.plugins import BeetsPlugin\n-from beets.library import Item\n+from beets.mediafile import MediaFile\n from beets.importer import action\n from beets.util import confit\n \n@@ -51,7 +51,7 @@\n log.warn(u'[zero] field \\'{0}\\' ignored, zeroing '\n u'it would be dangerous'.format(field))\n continue\n- if field not in Item._fields.keys():\n+ if field not in MediaFile.fields():\n log.error(u'[zero] invalid field: {0}'.format(field))\n continue\n \n@@ -59,7 +59,7 @@\n self.patterns[field] = self.config[field].as_str_seq()\n except confit.NotFoundError:\n # Matches everything\n- self.patterns[field] = [u'']\n+ self.patterns[field] = True\n \n def import_task_choice_event(self, session, task):\n \"\"\"Listen for import_task_choice event.\"\"\"\n@@ -73,23 +73,29 @@\n \"\"\"Check if field (as string) is matching any of the patterns in\n the list.\n \"\"\"\n+ if patterns is True:\n+ return True\n for p in patterns:\n if re.search(p, unicode(field), flags=re.IGNORECASE):\n return True\n return False\n \n def write_event(self, item, path, tags):\n- \"\"\"Listen for write event.\"\"\"\n+ \"\"\"Set values in tags to `None` if the key and value are matched\n+ by `self.patterns`.\n+ \"\"\"\n if not self.patterns:\n log.warn(u'[zero] no fields, nothing to do')\n return\n \n for field, patterns in self.patterns.items():\n- if field not in tags:\n- log.error(u'[zero] no such field: {0}'.format(field))\n- continue\n-\n- value = tags[field]\n- if self.match_patterns(value, patterns):\n+ if field in tags:\n+ value = tags[field]\n+ match = self.match_patterns(tags[field], patterns)\n+ else:\n+ value = ''\n+ match = patterns is True\n+\n+ if match:\n log.debug(u'[zero] {0}: {1} -> None'.format(field, value))\n tags[field] = None\n", "issue": "zero: Permit stripping album art\nIt would be nice to have the option of automatically clearing embedded art when an item is imported. Whether or not a media item actually contains embedded art, beets should ensure the resulting media item has no embedded art after being import. There are two plugins which would offer a good place of implementation for this feature: the EmbedArt and the Zero plugins.\n\nThe EmbedArt plugin already supports a command called `clearart` which allows for the manual stripping of embedded art from items which match a query. Since the the `clearart` operation is not automatic and there is no option for automation, an extra step is required on the importation of media.\n\nWhat probably makes more sense is implementing support for the art field in the Zero plugin. It can only be assumed that people who would use such a feature already have the Zero plugin deployed for clearing other fields. That said, it would require less configuration as all a user would need to do is drop the art field in their configuration for the Zero plugin. Moreover, with the EmbedArt plugin, it embeds art into media items by default. This feature would need to be disabled in the configuration as well.\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2013, Blemjhoo Tezoulbr <[email protected]>.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\" Clears tag fields in media files.\"\"\"\n\nimport re\nimport logging\nfrom beets.plugins import BeetsPlugin\nfrom beets.library import Item\nfrom beets.importer import action\nfrom beets.util import confit\n\n__author__ = '[email protected]'\n__version__ = '0.10'\n\nlog = logging.getLogger('beets')\n\n\nclass ZeroPlugin(BeetsPlugin):\n\n _instance = None\n\n def __init__(self):\n super(ZeroPlugin, self).__init__()\n\n # Listeners.\n self.register_listener('write', self.write_event)\n self.register_listener('import_task_choice',\n self.import_task_choice_event)\n\n self.config.add({\n 'fields': [],\n })\n\n self.patterns = {}\n self.warned = False\n\n for field in self.config['fields'].as_str_seq():\n if field in ('id', 'path', 'album_id'):\n log.warn(u'[zero] field \\'{0}\\' ignored, zeroing '\n u'it would be dangerous'.format(field))\n continue\n if field not in Item._fields.keys():\n log.error(u'[zero] invalid field: {0}'.format(field))\n continue\n\n try:\n self.patterns[field] = self.config[field].as_str_seq()\n except confit.NotFoundError:\n # Matches everything\n self.patterns[field] = [u'']\n\n def import_task_choice_event(self, session, task):\n \"\"\"Listen for import_task_choice event.\"\"\"\n if task.choice_flag == action.ASIS and not self.warned:\n log.warn(u'[zero] cannot zero in \\\"as-is\\\" mode')\n self.warned = True\n # TODO request write in as-is mode\n\n @classmethod\n def match_patterns(cls, field, patterns):\n \"\"\"Check if field (as string) is matching any of the patterns in\n the list.\n \"\"\"\n for p in patterns:\n if re.search(p, unicode(field), flags=re.IGNORECASE):\n return True\n return False\n\n def write_event(self, item, path, tags):\n \"\"\"Listen for write event.\"\"\"\n if not self.patterns:\n log.warn(u'[zero] no fields, nothing to do')\n return\n\n for field, patterns in self.patterns.items():\n if field not in tags:\n log.error(u'[zero] no such field: {0}'.format(field))\n continue\n\n value = tags[field]\n if self.match_patterns(value, patterns):\n log.debug(u'[zero] {0}: {1} -> None'.format(field, value))\n tags[field] = None\n", "path": "beetsplug/zero.py"}], "after_files": [{"content": "# This file is part of beets.\n# Copyright 2013, Blemjhoo Tezoulbr <[email protected]>.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\" Clears tag fields in media files.\"\"\"\n\nimport re\nimport logging\nfrom beets.plugins import BeetsPlugin\nfrom beets.mediafile import MediaFile\nfrom beets.importer import action\nfrom beets.util import confit\n\n__author__ = '[email protected]'\n__version__ = '0.10'\n\nlog = logging.getLogger('beets')\n\n\nclass ZeroPlugin(BeetsPlugin):\n\n _instance = None\n\n def __init__(self):\n super(ZeroPlugin, self).__init__()\n\n # Listeners.\n self.register_listener('write', self.write_event)\n self.register_listener('import_task_choice',\n self.import_task_choice_event)\n\n self.config.add({\n 'fields': [],\n })\n\n self.patterns = {}\n self.warned = False\n\n for field in self.config['fields'].as_str_seq():\n if field in ('id', 'path', 'album_id'):\n log.warn(u'[zero] field \\'{0}\\' ignored, zeroing '\n u'it would be dangerous'.format(field))\n continue\n if field not in MediaFile.fields():\n log.error(u'[zero] invalid field: {0}'.format(field))\n continue\n\n try:\n self.patterns[field] = self.config[field].as_str_seq()\n except confit.NotFoundError:\n # Matches everything\n self.patterns[field] = True\n\n def import_task_choice_event(self, session, task):\n \"\"\"Listen for import_task_choice event.\"\"\"\n if task.choice_flag == action.ASIS and not self.warned:\n log.warn(u'[zero] cannot zero in \\\"as-is\\\" mode')\n self.warned = True\n # TODO request write in as-is mode\n\n @classmethod\n def match_patterns(cls, field, patterns):\n \"\"\"Check if field (as string) is matching any of the patterns in\n the list.\n \"\"\"\n if patterns is True:\n return True\n for p in patterns:\n if re.search(p, unicode(field), flags=re.IGNORECASE):\n return True\n return False\n\n def write_event(self, item, path, tags):\n \"\"\"Set values in tags to `None` if the key and value are matched\n by `self.patterns`.\n \"\"\"\n if not self.patterns:\n log.warn(u'[zero] no fields, nothing to do')\n return\n\n for field, patterns in self.patterns.items():\n if field in tags:\n value = tags[field]\n match = self.match_patterns(tags[field], patterns)\n else:\n value = ''\n match = patterns is True\n\n if match:\n log.debug(u'[zero] {0}: {1} -> None'.format(field, value))\n tags[field] = None\n", "path": "beetsplug/zero.py"}]} | 1,415 | 549 |
gh_patches_debug_32722 | rasdani/github-patches | git_diff | pypa__pip-3443 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Protect a few more requests imports
In Debian, we unbundle requests, and further, we unbundled all vendored packages from requests. This causes pip's vendoring algorithm to fail. I had to add this patch to the Debian packaging.
```
From 144ba146cde273b815a80859537b09c068fd47e6 Mon Sep 17 00:00:00 2001
From: Barry Warsaw <[email protected]>
Date: Fri, 29 Jan 2016 16:56:43 -0500
Subject: Debian already unbundles things from requests.
Patch-Name: handle-unbundled-requests.patch
---
pip/_vendor/__init__.py | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/pip/_vendor/__init__.py b/pip/_vendor/__init__.py
index 1cb35a1..c64896a 100644
--- a/pip/_vendor/__init__.py
+++ b/pip/_vendor/__init__.py
@@ -82,8 +82,12 @@ if DEBUNDLED:
vendored("requests.packages.urllib3.fields")
vendored("requests.packages.urllib3.filepost")
vendored("requests.packages.urllib3.packages")
- vendored("requests.packages.urllib3.packages.ordered_dict")
- vendored("requests.packages.urllib3.packages.six")
+ try:
+ vendored("requests.packages.urllib3.packages.ordered_dict")
+ vendored("requests.packages.urllib3.packages.six")
+ except ImportError:
+ # Debian already unbundles these from requests.
+ pass
vendored("requests.packages.urllib3.packages.ssl_match_hostname")
vendored("requests.packages.urllib3.packages.ssl_match_hostname."
"_implementation")
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/_vendor/__init__.py`
Content:
```
1 """
2 pip._vendor is for vendoring dependencies of pip to prevent needing pip to
3 depend on something external.
4
5 Files inside of pip._vendor should be considered immutable and should only be
6 updated to versions from upstream.
7 """
8 from __future__ import absolute_import
9
10 import glob
11 import os.path
12 import sys
13
14 # Downstream redistributors which have debundled our dependencies should also
15 # patch this value to be true. This will trigger the additional patching
16 # to cause things like "six" to be available as pip.
17 DEBUNDLED = False
18
19 # By default, look in this directory for a bunch of .whl files which we will
20 # add to the beginning of sys.path before attempting to import anything. This
21 # is done to support downstream re-distributors like Debian and Fedora who
22 # wish to create their own Wheels for our dependencies to aid in debundling.
23 WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
24
25
26 # Define a small helper function to alias our vendored modules to the real ones
27 # if the vendored ones do not exist. This idea of this was taken from
28 # https://github.com/kennethreitz/requests/pull/2567.
29 def vendored(modulename):
30 vendored_name = "{0}.{1}".format(__name__, modulename)
31
32 try:
33 __import__(vendored_name, globals(), locals(), level=0)
34 except ImportError:
35 __import__(modulename, globals(), locals(), level=0)
36 sys.modules[vendored_name] = sys.modules[modulename]
37 base, head = vendored_name.rsplit(".", 1)
38 setattr(sys.modules[base], head, sys.modules[modulename])
39
40
41 # If we're operating in a debundled setup, then we want to go ahead and trigger
42 # the aliasing of our vendored libraries as well as looking for wheels to add
43 # to our sys.path. This will cause all of this code to be a no-op typically
44 # however downstream redistributors can enable it in a consistent way across
45 # all platforms.
46 if DEBUNDLED:
47 # Actually look inside of WHEEL_DIR to find .whl files and add them to the
48 # front of our sys.path.
49 sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
50
51 # Actually alias all of our vendored dependencies.
52 vendored("cachecontrol")
53 vendored("colorama")
54 vendored("distlib")
55 vendored("html5lib")
56 vendored("lockfile")
57 vendored("six")
58 vendored("six.moves")
59 vendored("six.moves.urllib")
60 vendored("packaging")
61 vendored("packaging.version")
62 vendored("packaging.specifiers")
63 vendored("pkg_resources")
64 vendored("progress")
65 vendored("retrying")
66 vendored("requests")
67 vendored("requests.packages")
68 vendored("requests.packages.urllib3")
69 vendored("requests.packages.urllib3._collections")
70 vendored("requests.packages.urllib3.connection")
71 vendored("requests.packages.urllib3.connectionpool")
72 vendored("requests.packages.urllib3.contrib")
73 try:
74 vendored("requests.packages.urllib3.contrib.ntlmpool")
75 except ImportError:
76 pass
77 try:
78 vendored("requests.packages.urllib3.contrib.pyopenssl")
79 except ImportError:
80 pass
81 vendored("requests.packages.urllib3.exceptions")
82 vendored("requests.packages.urllib3.fields")
83 vendored("requests.packages.urllib3.filepost")
84 vendored("requests.packages.urllib3.packages")
85 vendored("requests.packages.urllib3.packages.ordered_dict")
86 vendored("requests.packages.urllib3.packages.six")
87 vendored("requests.packages.urllib3.packages.ssl_match_hostname")
88 vendored("requests.packages.urllib3.packages.ssl_match_hostname."
89 "_implementation")
90 vendored("requests.packages.urllib3.poolmanager")
91 vendored("requests.packages.urllib3.request")
92 vendored("requests.packages.urllib3.response")
93 vendored("requests.packages.urllib3.util")
94 vendored("requests.packages.urllib3.util.connection")
95 vendored("requests.packages.urllib3.util.request")
96 vendored("requests.packages.urllib3.util.response")
97 vendored("requests.packages.urllib3.util.retry")
98 vendored("requests.packages.urllib3.util.ssl_")
99 vendored("requests.packages.urllib3.util.timeout")
100 vendored("requests.packages.urllib3.util.url")
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pip/_vendor/__init__.py b/pip/_vendor/__init__.py
--- a/pip/_vendor/__init__.py
+++ b/pip/_vendor/__init__.py
@@ -32,10 +32,22 @@
try:
__import__(vendored_name, globals(), locals(), level=0)
except ImportError:
- __import__(modulename, globals(), locals(), level=0)
- sys.modules[vendored_name] = sys.modules[modulename]
- base, head = vendored_name.rsplit(".", 1)
- setattr(sys.modules[base], head, sys.modules[modulename])
+ try:
+ __import__(modulename, globals(), locals(), level=0)
+ except ImportError:
+ # We can just silently allow import failures to pass here. If we
+ # got to this point it means that ``import pip._vendor.whatever``
+ # failed and so did ``import whatever``. Since we're importing this
+ # upfront in an attempt to alias imports, not erroring here will
+ # just mean we get a regular import error whenever pip *actually*
+ # tries to import one of these modules to use it, which actually
+ # gives us a better error message than we would have otherwise
+ # gotten.
+ pass
+ else:
+ sys.modules[vendored_name] = sys.modules[modulename]
+ base, head = vendored_name.rsplit(".", 1)
+ setattr(sys.modules[base], head, sys.modules[modulename])
# If we're operating in a debundled setup, then we want to go ahead and trigger
@@ -70,14 +82,8 @@
vendored("requests.packages.urllib3.connection")
vendored("requests.packages.urllib3.connectionpool")
vendored("requests.packages.urllib3.contrib")
- try:
- vendored("requests.packages.urllib3.contrib.ntlmpool")
- except ImportError:
- pass
- try:
- vendored("requests.packages.urllib3.contrib.pyopenssl")
- except ImportError:
- pass
+ vendored("requests.packages.urllib3.contrib.ntlmpool")
+ vendored("requests.packages.urllib3.contrib.pyopenssl")
vendored("requests.packages.urllib3.exceptions")
vendored("requests.packages.urllib3.fields")
vendored("requests.packages.urllib3.filepost")
| {"golden_diff": "diff --git a/pip/_vendor/__init__.py b/pip/_vendor/__init__.py\n--- a/pip/_vendor/__init__.py\n+++ b/pip/_vendor/__init__.py\n@@ -32,10 +32,22 @@\n try:\n __import__(vendored_name, globals(), locals(), level=0)\n except ImportError:\n- __import__(modulename, globals(), locals(), level=0)\n- sys.modules[vendored_name] = sys.modules[modulename]\n- base, head = vendored_name.rsplit(\".\", 1)\n- setattr(sys.modules[base], head, sys.modules[modulename])\n+ try:\n+ __import__(modulename, globals(), locals(), level=0)\n+ except ImportError:\n+ # We can just silently allow import failures to pass here. If we\n+ # got to this point it means that ``import pip._vendor.whatever``\n+ # failed and so did ``import whatever``. Since we're importing this\n+ # upfront in an attempt to alias imports, not erroring here will\n+ # just mean we get a regular import error whenever pip *actually*\n+ # tries to import one of these modules to use it, which actually\n+ # gives us a better error message than we would have otherwise\n+ # gotten.\n+ pass\n+ else:\n+ sys.modules[vendored_name] = sys.modules[modulename]\n+ base, head = vendored_name.rsplit(\".\", 1)\n+ setattr(sys.modules[base], head, sys.modules[modulename])\n \n \n # If we're operating in a debundled setup, then we want to go ahead and trigger\n@@ -70,14 +82,8 @@\n vendored(\"requests.packages.urllib3.connection\")\n vendored(\"requests.packages.urllib3.connectionpool\")\n vendored(\"requests.packages.urllib3.contrib\")\n- try:\n- vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n- except ImportError:\n- pass\n- try:\n- vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n- except ImportError:\n- pass\n+ vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n+ vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n vendored(\"requests.packages.urllib3.exceptions\")\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n", "issue": "Protect a few more requests imports\nIn Debian, we unbundle requests, and further, we unbundled all vendored packages from requests. This causes pip's vendoring algorithm to fail. I had to add this patch to the Debian packaging.\n\n```\nFrom 144ba146cde273b815a80859537b09c068fd47e6 Mon Sep 17 00:00:00 2001\nFrom: Barry Warsaw <[email protected]>\nDate: Fri, 29 Jan 2016 16:56:43 -0500\nSubject: Debian already unbundles things from requests.\n\nPatch-Name: handle-unbundled-requests.patch\n\n---\n pip/_vendor/__init__.py | 8 ++++++--\n 1 file changed, 6 insertions(+), 2 deletions(-)\n\ndiff --git a/pip/_vendor/__init__.py b/pip/_vendor/__init__.py\nindex 1cb35a1..c64896a 100644\n--- a/pip/_vendor/__init__.py\n+++ b/pip/_vendor/__init__.py\n@@ -82,8 +82,12 @@ if DEBUNDLED:\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n vendored(\"requests.packages.urllib3.packages\")\n- vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n- vendored(\"requests.packages.urllib3.packages.six\")\n+ try:\n+ vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n+ vendored(\"requests.packages.urllib3.packages.six\")\n+ except ImportError:\n+ # Debian already unbundles these from requests.\n+ pass\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname.\"\n \"_implementation\")\n```\n\n", "before_files": [{"content": "\"\"\"\npip._vendor is for vendoring dependencies of pip to prevent needing pip to\ndepend on something external.\n\nFiles inside of pip._vendor should be considered immutable and should only be\nupdated to versions from upstream.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport glob\nimport os.path\nimport sys\n\n# Downstream redistributors which have debundled our dependencies should also\n# patch this value to be true. This will trigger the additional patching\n# to cause things like \"six\" to be available as pip.\nDEBUNDLED = False\n\n# By default, look in this directory for a bunch of .whl files which we will\n# add to the beginning of sys.path before attempting to import anything. This\n# is done to support downstream re-distributors like Debian and Fedora who\n# wish to create their own Wheels for our dependencies to aid in debundling.\nWHEEL_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\n# Define a small helper function to alias our vendored modules to the real ones\n# if the vendored ones do not exist. This idea of this was taken from\n# https://github.com/kennethreitz/requests/pull/2567.\ndef vendored(modulename):\n vendored_name = \"{0}.{1}\".format(__name__, modulename)\n\n try:\n __import__(vendored_name, globals(), locals(), level=0)\n except ImportError:\n __import__(modulename, globals(), locals(), level=0)\n sys.modules[vendored_name] = sys.modules[modulename]\n base, head = vendored_name.rsplit(\".\", 1)\n setattr(sys.modules[base], head, sys.modules[modulename])\n\n\n# If we're operating in a debundled setup, then we want to go ahead and trigger\n# the aliasing of our vendored libraries as well as looking for wheels to add\n# to our sys.path. This will cause all of this code to be a no-op typically\n# however downstream redistributors can enable it in a consistent way across\n# all platforms.\nif DEBUNDLED:\n # Actually look inside of WHEEL_DIR to find .whl files and add them to the\n # front of our sys.path.\n sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, \"*.whl\")) + sys.path\n\n # Actually alias all of our vendored dependencies.\n vendored(\"cachecontrol\")\n vendored(\"colorama\")\n vendored(\"distlib\")\n vendored(\"html5lib\")\n vendored(\"lockfile\")\n vendored(\"six\")\n vendored(\"six.moves\")\n vendored(\"six.moves.urllib\")\n vendored(\"packaging\")\n vendored(\"packaging.version\")\n vendored(\"packaging.specifiers\")\n vendored(\"pkg_resources\")\n vendored(\"progress\")\n vendored(\"retrying\")\n vendored(\"requests\")\n vendored(\"requests.packages\")\n vendored(\"requests.packages.urllib3\")\n vendored(\"requests.packages.urllib3._collections\")\n vendored(\"requests.packages.urllib3.connection\")\n vendored(\"requests.packages.urllib3.connectionpool\")\n vendored(\"requests.packages.urllib3.contrib\")\n try:\n vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n except ImportError:\n pass\n try:\n vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n except ImportError:\n pass\n vendored(\"requests.packages.urllib3.exceptions\")\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n vendored(\"requests.packages.urllib3.packages\")\n vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n vendored(\"requests.packages.urllib3.packages.six\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname.\"\n \"_implementation\")\n vendored(\"requests.packages.urllib3.poolmanager\")\n vendored(\"requests.packages.urllib3.request\")\n vendored(\"requests.packages.urllib3.response\")\n vendored(\"requests.packages.urllib3.util\")\n vendored(\"requests.packages.urllib3.util.connection\")\n vendored(\"requests.packages.urllib3.util.request\")\n vendored(\"requests.packages.urllib3.util.response\")\n vendored(\"requests.packages.urllib3.util.retry\")\n vendored(\"requests.packages.urllib3.util.ssl_\")\n vendored(\"requests.packages.urllib3.util.timeout\")\n vendored(\"requests.packages.urllib3.util.url\")\n", "path": "pip/_vendor/__init__.py"}], "after_files": [{"content": "\"\"\"\npip._vendor is for vendoring dependencies of pip to prevent needing pip to\ndepend on something external.\n\nFiles inside of pip._vendor should be considered immutable and should only be\nupdated to versions from upstream.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport glob\nimport os.path\nimport sys\n\n# Downstream redistributors which have debundled our dependencies should also\n# patch this value to be true. This will trigger the additional patching\n# to cause things like \"six\" to be available as pip.\nDEBUNDLED = False\n\n# By default, look in this directory for a bunch of .whl files which we will\n# add to the beginning of sys.path before attempting to import anything. This\n# is done to support downstream re-distributors like Debian and Fedora who\n# wish to create their own Wheels for our dependencies to aid in debundling.\nWHEEL_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\n# Define a small helper function to alias our vendored modules to the real ones\n# if the vendored ones do not exist. This idea of this was taken from\n# https://github.com/kennethreitz/requests/pull/2567.\ndef vendored(modulename):\n vendored_name = \"{0}.{1}\".format(__name__, modulename)\n\n try:\n __import__(vendored_name, globals(), locals(), level=0)\n except ImportError:\n try:\n __import__(modulename, globals(), locals(), level=0)\n except ImportError:\n # We can just silently allow import failures to pass here. If we\n # got to this point it means that ``import pip._vendor.whatever``\n # failed and so did ``import whatever``. Since we're importing this\n # upfront in an attempt to alias imports, not erroring here will\n # just mean we get a regular import error whenever pip *actually*\n # tries to import one of these modules to use it, which actually\n # gives us a better error message than we would have otherwise\n # gotten.\n pass\n else:\n sys.modules[vendored_name] = sys.modules[modulename]\n base, head = vendored_name.rsplit(\".\", 1)\n setattr(sys.modules[base], head, sys.modules[modulename])\n\n\n# If we're operating in a debundled setup, then we want to go ahead and trigger\n# the aliasing of our vendored libraries as well as looking for wheels to add\n# to our sys.path. This will cause all of this code to be a no-op typically\n# however downstream redistributors can enable it in a consistent way across\n# all platforms.\nif DEBUNDLED:\n # Actually look inside of WHEEL_DIR to find .whl files and add them to the\n # front of our sys.path.\n sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, \"*.whl\")) + sys.path\n\n # Actually alias all of our vendored dependencies.\n vendored(\"cachecontrol\")\n vendored(\"colorama\")\n vendored(\"distlib\")\n vendored(\"html5lib\")\n vendored(\"lockfile\")\n vendored(\"six\")\n vendored(\"six.moves\")\n vendored(\"six.moves.urllib\")\n vendored(\"packaging\")\n vendored(\"packaging.version\")\n vendored(\"packaging.specifiers\")\n vendored(\"pkg_resources\")\n vendored(\"progress\")\n vendored(\"retrying\")\n vendored(\"requests\")\n vendored(\"requests.packages\")\n vendored(\"requests.packages.urllib3\")\n vendored(\"requests.packages.urllib3._collections\")\n vendored(\"requests.packages.urllib3.connection\")\n vendored(\"requests.packages.urllib3.connectionpool\")\n vendored(\"requests.packages.urllib3.contrib\")\n vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n vendored(\"requests.packages.urllib3.exceptions\")\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n vendored(\"requests.packages.urllib3.packages\")\n vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n vendored(\"requests.packages.urllib3.packages.six\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname.\"\n \"_implementation\")\n vendored(\"requests.packages.urllib3.poolmanager\")\n vendored(\"requests.packages.urllib3.request\")\n vendored(\"requests.packages.urllib3.response\")\n vendored(\"requests.packages.urllib3.util\")\n vendored(\"requests.packages.urllib3.util.connection\")\n vendored(\"requests.packages.urllib3.util.request\")\n vendored(\"requests.packages.urllib3.util.response\")\n vendored(\"requests.packages.urllib3.util.retry\")\n vendored(\"requests.packages.urllib3.util.ssl_\")\n vendored(\"requests.packages.urllib3.util.timeout\")\n vendored(\"requests.packages.urllib3.util.url\")\n", "path": "pip/_vendor/__init__.py"}]} | 1,918 | 553 |
gh_patches_debug_20500 | rasdani/github-patches | git_diff | AlexsLemonade__refinebio-3299 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cache Docker Images by Branch
### Context
We want to be able to cache docker image layers that are created locally as testing artfacts locally to be used by github actions.
The current prepare_images.sh does this but there was an issue with the definition for branch_name.
We also don't want to remove support non-ccdl members developing locally.

### Solution or next step
- After #3285 is merged, we should set sensible defaults that can be overridden for external contributors.
- Get current branch name or tag to be set when pushing images to ccdl(staging) repo.
Determine:
- If they don't have access to the docker repo should we just build locally and not push?
- How long can docker tags be / are they compatible with our longer branch names.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `common/setup.py`
Content:
```
1 import os
2
3 from setuptools import find_packages, setup
4
5 # allow setup.py to be run from any path
6 os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
7
8 VERSION_FILE = "version"
9 try:
10 with open(VERSION_FILE, "rt") as version_file:
11 version_string = version_file.read().strip().split("-")[0]
12 except OSError:
13 print(
14 "Cannot read version to determine System Version."
15 " Please create a file common/version containing an up to date System Version."
16 )
17 raise
18
19 setup(
20 name="data-refinery-common",
21 version=version_string,
22 packages=find_packages(),
23 include_package_data=True,
24 # These values are based on what is in common/requirements.txt.
25 install_requires=[
26 "boto3>=1.9.16",
27 "coverage>=4.5.1",
28 "daiquiri>=1.5.0",
29 "django>=3.2,<4",
30 "raven>=6.9.0",
31 "requests>=2.10.1",
32 "retrying>=1.3.3",
33 "psycopg2-binary>=2.7.5",
34 ],
35 license="BSD License",
36 description="Common functionality to be shared between Data Refinery sub-projects.",
37 url="https://www.greenelab.com",
38 author="Kurt Wheeler",
39 author_email="[email protected]",
40 classifiers=[
41 "Environment :: Web Environment",
42 "Framework :: Django",
43 "Intended Audience :: Developers",
44 "License :: OSI Approved :: BSD License",
45 "Operating System :: Ubuntu",
46 "Programming Language :: Python",
47 "Programming Language :: Python :: 3.5",
48 "Programming Language :: Python :: 3.6",
49 "Topic :: Internet :: WWW/HTTP",
50 ],
51 )
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/common/setup.py b/common/setup.py
--- a/common/setup.py
+++ b/common/setup.py
@@ -1,4 +1,6 @@
import os
+import re
+from datetime import datetime
from setuptools import find_packages, setup
@@ -11,11 +13,21 @@
version_string = version_file.read().strip().split("-")[0]
except OSError:
print(
- "Cannot read version to determine System Version."
- " Please create a file common/version containing an up to date System Version."
+ "Cannot read version file to determine system version. "
+ "Please create a file common/version containing an up to date system version."
)
raise
+version_re = re.compile(
+ r"^([1-9][0-9]*!)?(0|[1-9][0-9]*)"
+ "(\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))"
+ "?(\.post(0|[1-9][0-9]*))?(\.dev(0|[1-9][0-9]*))?$"
+)
+if not version_re.match(version_string):
+ # Generate version based on the datetime.now(): e.g., 2023.5.17.dev1684352560.
+ now = datetime.now()
+ version_string = f"{now.strftime('%Y.%-m.%-d.dev')}{int(datetime.timestamp(now))}"
+
setup(
name="data-refinery-common",
version=version_string,
| {"golden_diff": "diff --git a/common/setup.py b/common/setup.py\n--- a/common/setup.py\n+++ b/common/setup.py\n@@ -1,4 +1,6 @@\n import os\n+import re\n+from datetime import datetime\n \n from setuptools import find_packages, setup\n \n@@ -11,11 +13,21 @@\n version_string = version_file.read().strip().split(\"-\")[0]\n except OSError:\n print(\n- \"Cannot read version to determine System Version.\"\n- \" Please create a file common/version containing an up to date System Version.\"\n+ \"Cannot read version file to determine system version. \"\n+ \"Please create a file common/version containing an up to date system version.\"\n )\n raise\n \n+version_re = re.compile(\n+ r\"^([1-9][0-9]*!)?(0|[1-9][0-9]*)\"\n+ \"(\\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))\"\n+ \"?(\\.post(0|[1-9][0-9]*))?(\\.dev(0|[1-9][0-9]*))?$\"\n+)\n+if not version_re.match(version_string):\n+ # Generate version based on the datetime.now(): e.g., 2023.5.17.dev1684352560.\n+ now = datetime.now()\n+ version_string = f\"{now.strftime('%Y.%-m.%-d.dev')}{int(datetime.timestamp(now))}\"\n+\n setup(\n name=\"data-refinery-common\",\n version=version_string,\n", "issue": "Cache Docker Images by Branch\n### Context\r\n\r\nWe want to be able to cache docker image layers that are created locally as testing artfacts locally to be used by github actions.\r\nThe current prepare_images.sh does this but there was an issue with the definition for branch_name.\r\nWe also don't want to remove support non-ccdl members developing locally.\r\n\r\n\r\n\r\n\r\n\r\n### Solution or next step\r\n\r\n- After #3285 is merged, we should set sensible defaults that can be overridden for external contributors.\r\n- Get current branch name or tag to be set when pushing images to ccdl(staging) repo.\r\n\r\nDetermine:\r\n- If they don't have access to the docker repo should we just build locally and not push?\r\n- How long can docker tags be / are they compatible with our longer branch names.\r\n\n", "before_files": [{"content": "import os\n\nfrom setuptools import find_packages, setup\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nVERSION_FILE = \"version\"\ntry:\n with open(VERSION_FILE, \"rt\") as version_file:\n version_string = version_file.read().strip().split(\"-\")[0]\nexcept OSError:\n print(\n \"Cannot read version to determine System Version.\"\n \" Please create a file common/version containing an up to date System Version.\"\n )\n raise\n\nsetup(\n name=\"data-refinery-common\",\n version=version_string,\n packages=find_packages(),\n include_package_data=True,\n # These values are based on what is in common/requirements.txt.\n install_requires=[\n \"boto3>=1.9.16\",\n \"coverage>=4.5.1\",\n \"daiquiri>=1.5.0\",\n \"django>=3.2,<4\",\n \"raven>=6.9.0\",\n \"requests>=2.10.1\",\n \"retrying>=1.3.3\",\n \"psycopg2-binary>=2.7.5\",\n ],\n license=\"BSD License\",\n description=\"Common functionality to be shared between Data Refinery sub-projects.\",\n url=\"https://www.greenelab.com\",\n author=\"Kurt Wheeler\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: Ubuntu\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "common/setup.py"}], "after_files": [{"content": "import os\nimport re\nfrom datetime import datetime\n\nfrom setuptools import find_packages, setup\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nVERSION_FILE = \"version\"\ntry:\n with open(VERSION_FILE, \"rt\") as version_file:\n version_string = version_file.read().strip().split(\"-\")[0]\nexcept OSError:\n print(\n \"Cannot read version file to determine system version. \"\n \"Please create a file common/version containing an up to date system version.\"\n )\n raise\n\nversion_re = re.compile(\n r\"^([1-9][0-9]*!)?(0|[1-9][0-9]*)\"\n \"(\\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))\"\n \"?(\\.post(0|[1-9][0-9]*))?(\\.dev(0|[1-9][0-9]*))?$\"\n)\nif not version_re.match(version_string):\n # Generate version based on the datetime.now(): e.g., 2023.5.17.dev1684352560.\n now = datetime.now()\n version_string = f\"{now.strftime('%Y.%-m.%-d.dev')}{int(datetime.timestamp(now))}\"\n\nsetup(\n name=\"data-refinery-common\",\n version=version_string,\n packages=find_packages(),\n include_package_data=True,\n # These values are based on what is in common/requirements.txt.\n install_requires=[\n \"boto3>=1.9.16\",\n \"coverage>=4.5.1\",\n \"daiquiri>=1.5.0\",\n \"django>=3.2,<4\",\n \"raven>=6.9.0\",\n \"requests>=2.10.1\",\n \"retrying>=1.3.3\",\n \"psycopg2-binary>=2.7.5\",\n ],\n license=\"BSD License\",\n description=\"Common functionality to be shared between Data Refinery sub-projects.\",\n url=\"https://www.greenelab.com\",\n author=\"Kurt Wheeler\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: Ubuntu\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "common/setup.py"}]} | 972 | 353 |
gh_patches_debug_25112 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-668 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
capture_backtrace raises AttributeError on PEP-420 namespace packages
The new `capture_backtrace` function in `scout_apm.core.backtrace` raises an AttributeError when the stack includes a [PEP-420] namespace package.
This is caused by the [`module_filepath` function](https://github.com/scoutapp/scout_apm_python/blob/v2.21.0/src/scout_apm/core/backtrace.py#L26-L33), specifically line 32:
```python
module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]
```
If `sys.modules[root_module]` is a [PEP-420] namespace package, this will raise
```
AttributeError: 'NoneType' object has no attribute 'rsplit'
```
### Steps to reproduce
Create a namespace package, with some modules inside, e.g.:
```
namespace/
foo/
__init__.py
bar/
__init__.py
```
Then on an interactive Python shell:
```
>>> from scout_apm.core.backtrace import module_filepath
>>> from namespace import foo
>>> module_filepath("namespace.foo", "namespace")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jack/venvs/tmp-a17ac7185189989/lib/python3.8/site-packages/scout_apm/core/backtrace.py", line 32, in module_filepath
module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]
AttributeError: 'NoneType' object has no attribute 'rsplit'
```
### Details
- Tested with version 2.21.0
- Current workaround is to pin version to 2.20.0
[PEP-420]: https://www.python.org/dev/peps/pep-0420/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/core/backtrace.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import itertools
5 import os
6 import sys
7 import sysconfig
8 import traceback
9 import warnings
10
11 # Maximum non-Scout frames to target retrieving
12 LIMIT = 50
13 # How many upper frames from inside Scout to ignore
14 IGNORED = 1
15
16
17 def filter_frames(frames):
18 """Filter the stack trace frames down to non-library code."""
19 paths = sysconfig.get_paths()
20 library_paths = {paths["purelib"], paths["platlib"]}
21 for frame in frames:
22 if not any(frame["file"].startswith(exclusion) for exclusion in library_paths):
23 yield frame
24
25
26 def module_filepath(module, filepath):
27 """Get the filepath relative to the base module."""
28 root_module = module.split(".", 1)[0]
29 if root_module == module:
30 return os.path.basename(filepath)
31
32 module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]
33 return filepath.split(module_dir, 1)[-1].lstrip(os.sep)
34
35
36 def filepath(frame):
37 """Get the filepath for frame."""
38 module = frame.f_globals.get("__name__", None)
39 filepath = frame.f_code.co_filename
40
41 if filepath.endswith(".pyc"):
42 filepath = filepath[:-1]
43
44 if not module:
45 return filepath
46 return module_filepath(module, filepath)
47
48
49 if sys.version_info >= (3, 5):
50
51 def stacktrace_walker(tb):
52 """Iterate over each frame of the stack downards for exceptions."""
53 for frame, lineno in traceback.walk_tb(tb):
54 name = frame.f_code.co_name
55 yield {"file": filepath(frame), "line": lineno, "function": name}
56
57 def backtrace_walker():
58 """Iterate over each frame of the stack upwards.
59
60 Taken from python3/traceback.ExtractSummary.extract to support
61 iterating over the entire stack, but without creating a large
62 data structure.
63 """
64 start_frame = sys._getframe().f_back
65 for frame, lineno in traceback.walk_stack(start_frame):
66 name = frame.f_code.co_name
67 yield {"file": filepath(frame), "line": lineno, "function": name}
68
69
70 else:
71
72 def stacktrace_walker(tb):
73 """Iterate over each frame of the stack downards for exceptions."""
74 while tb is not None:
75 lineno = tb.tb_lineno
76 name = tb.tb_frame.f_code.co_name
77 yield {
78 "file": filepath(tb.tb_frame),
79 "line": lineno,
80 "function": name,
81 }
82 tb = tb.tb_next
83
84 def backtrace_walker():
85 """Iterate over each frame of the stack upwards.
86
87 Taken from python2.7/traceback.extract_stack to support iterating
88 over the entire stack, but without creating a large data structure.
89 """
90 try:
91 raise ZeroDivisionError
92 except ZeroDivisionError:
93 # Get the current frame
94 frame = sys.exc_info()[2].tb_frame.f_back
95
96 while frame is not None:
97 lineno = frame.f_lineno
98 name = frame.f_code.co_name
99 yield {"file": filepath(frame), "line": lineno, "function": name}
100 frame = frame.f_back
101
102
103 def capture_backtrace():
104 walker = filter_frames(backtrace_walker())
105 return list(itertools.islice(walker, LIMIT))
106
107
108 def capture_stacktrace(tb):
109 walker = stacktrace_walker(tb)
110 return list(reversed(list(itertools.islice(walker, LIMIT))))
111
112
113 def capture():
114 warnings.warn(
115 "capture is deprecated, instead use capture_backtrace instead.",
116 DeprecationWarning,
117 2,
118 )
119 return capture_backtrace()
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/core/backtrace.py b/src/scout_apm/core/backtrace.py
--- a/src/scout_apm/core/backtrace.py
+++ b/src/scout_apm/core/backtrace.py
@@ -7,6 +7,9 @@
import sysconfig
import traceback
import warnings
+from logging import getLogger
+
+logger = getLogger(__name__)
# Maximum non-Scout frames to target retrieving
LIMIT = 50
@@ -25,11 +28,25 @@
def module_filepath(module, filepath):
"""Get the filepath relative to the base module."""
- root_module = module.split(".", 1)[0]
- if root_module == module:
+ root_module_name = module.split(".", 1)[0]
+ if root_module_name == module:
return os.path.basename(filepath)
- module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]
+ root_module = sys.modules[root_module_name]
+ if root_module.__file__:
+ module_dir = root_module.__file__.rsplit(os.sep, 2)[0]
+ elif root_module.__path__:
+ # Default to using the first path specified for the module.
+ module_dir = root_module.__path__[0].rsplit(os.sep, 1)[0]
+ if len(root_module.__path__) > 1:
+ logger.debug(
+ "{} has {} paths. Use the first and ignore the rest.".format(
+ root_module, len(root_module.__path__)
+ )
+ )
+ else:
+ # If the file path don't exist, then return the full path.
+ return filepath
return filepath.split(module_dir, 1)[-1].lstrip(os.sep)
| {"golden_diff": "diff --git a/src/scout_apm/core/backtrace.py b/src/scout_apm/core/backtrace.py\n--- a/src/scout_apm/core/backtrace.py\n+++ b/src/scout_apm/core/backtrace.py\n@@ -7,6 +7,9 @@\n import sysconfig\n import traceback\n import warnings\n+from logging import getLogger\n+\n+logger = getLogger(__name__)\n \n # Maximum non-Scout frames to target retrieving\n LIMIT = 50\n@@ -25,11 +28,25 @@\n \n def module_filepath(module, filepath):\n \"\"\"Get the filepath relative to the base module.\"\"\"\n- root_module = module.split(\".\", 1)[0]\n- if root_module == module:\n+ root_module_name = module.split(\".\", 1)[0]\n+ if root_module_name == module:\n return os.path.basename(filepath)\n \n- module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]\n+ root_module = sys.modules[root_module_name]\n+ if root_module.__file__:\n+ module_dir = root_module.__file__.rsplit(os.sep, 2)[0]\n+ elif root_module.__path__:\n+ # Default to using the first path specified for the module.\n+ module_dir = root_module.__path__[0].rsplit(os.sep, 1)[0]\n+ if len(root_module.__path__) > 1:\n+ logger.debug(\n+ \"{} has {} paths. Use the first and ignore the rest.\".format(\n+ root_module, len(root_module.__path__)\n+ )\n+ )\n+ else:\n+ # If the file path don't exist, then return the full path.\n+ return filepath\n return filepath.split(module_dir, 1)[-1].lstrip(os.sep)\n", "issue": "capture_backtrace raises AttributeError on PEP-420 namespace packages\nThe new `capture_backtrace` function in `scout_apm.core.backtrace` raises an AttributeError when the stack includes a [PEP-420] namespace package.\r\n\r\nThis is caused by the [`module_filepath` function](https://github.com/scoutapp/scout_apm_python/blob/v2.21.0/src/scout_apm/core/backtrace.py#L26-L33), specifically line 32:\r\n\r\n```python\r\n module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]\r\n```\r\n\r\nIf `sys.modules[root_module]` is a [PEP-420] namespace package, this will raise\r\n```\r\nAttributeError: 'NoneType' object has no attribute 'rsplit'\r\n```\r\n\r\n### Steps to reproduce\r\n\r\nCreate a namespace package, with some modules inside, e.g.:\r\n```\r\nnamespace/\r\n foo/\r\n __init__.py\r\n bar/\r\n __init__.py\r\n```\r\n\r\nThen on an interactive Python shell:\r\n\r\n```\r\n>>> from scout_apm.core.backtrace import module_filepath\r\n>>> from namespace import foo\r\n>>> module_filepath(\"namespace.foo\", \"namespace\")\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/jack/venvs/tmp-a17ac7185189989/lib/python3.8/site-packages/scout_apm/core/backtrace.py\", line 32, in module_filepath\r\n module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]\r\nAttributeError: 'NoneType' object has no attribute 'rsplit'\r\n```\r\n\r\n### Details\r\n\r\n- Tested with version 2.21.0\r\n- Current workaround is to pin version to 2.20.0\r\n\r\n[PEP-420]: https://www.python.org/dev/peps/pep-0420/\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport itertools\nimport os\nimport sys\nimport sysconfig\nimport traceback\nimport warnings\n\n# Maximum non-Scout frames to target retrieving\nLIMIT = 50\n# How many upper frames from inside Scout to ignore\nIGNORED = 1\n\n\ndef filter_frames(frames):\n \"\"\"Filter the stack trace frames down to non-library code.\"\"\"\n paths = sysconfig.get_paths()\n library_paths = {paths[\"purelib\"], paths[\"platlib\"]}\n for frame in frames:\n if not any(frame[\"file\"].startswith(exclusion) for exclusion in library_paths):\n yield frame\n\n\ndef module_filepath(module, filepath):\n \"\"\"Get the filepath relative to the base module.\"\"\"\n root_module = module.split(\".\", 1)[0]\n if root_module == module:\n return os.path.basename(filepath)\n\n module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]\n return filepath.split(module_dir, 1)[-1].lstrip(os.sep)\n\n\ndef filepath(frame):\n \"\"\"Get the filepath for frame.\"\"\"\n module = frame.f_globals.get(\"__name__\", None)\n filepath = frame.f_code.co_filename\n\n if filepath.endswith(\".pyc\"):\n filepath = filepath[:-1]\n\n if not module:\n return filepath\n return module_filepath(module, filepath)\n\n\nif sys.version_info >= (3, 5):\n\n def stacktrace_walker(tb):\n \"\"\"Iterate over each frame of the stack downards for exceptions.\"\"\"\n for frame, lineno in traceback.walk_tb(tb):\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n\n def backtrace_walker():\n \"\"\"Iterate over each frame of the stack upwards.\n\n Taken from python3/traceback.ExtractSummary.extract to support\n iterating over the entire stack, but without creating a large\n data structure.\n \"\"\"\n start_frame = sys._getframe().f_back\n for frame, lineno in traceback.walk_stack(start_frame):\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n\n\nelse:\n\n def stacktrace_walker(tb):\n \"\"\"Iterate over each frame of the stack downards for exceptions.\"\"\"\n while tb is not None:\n lineno = tb.tb_lineno\n name = tb.tb_frame.f_code.co_name\n yield {\n \"file\": filepath(tb.tb_frame),\n \"line\": lineno,\n \"function\": name,\n }\n tb = tb.tb_next\n\n def backtrace_walker():\n \"\"\"Iterate over each frame of the stack upwards.\n\n Taken from python2.7/traceback.extract_stack to support iterating\n over the entire stack, but without creating a large data structure.\n \"\"\"\n try:\n raise ZeroDivisionError\n except ZeroDivisionError:\n # Get the current frame\n frame = sys.exc_info()[2].tb_frame.f_back\n\n while frame is not None:\n lineno = frame.f_lineno\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n frame = frame.f_back\n\n\ndef capture_backtrace():\n walker = filter_frames(backtrace_walker())\n return list(itertools.islice(walker, LIMIT))\n\n\ndef capture_stacktrace(tb):\n walker = stacktrace_walker(tb)\n return list(reversed(list(itertools.islice(walker, LIMIT))))\n\n\ndef capture():\n warnings.warn(\n \"capture is deprecated, instead use capture_backtrace instead.\",\n DeprecationWarning,\n 2,\n )\n return capture_backtrace()\n", "path": "src/scout_apm/core/backtrace.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport itertools\nimport os\nimport sys\nimport sysconfig\nimport traceback\nimport warnings\nfrom logging import getLogger\n\nlogger = getLogger(__name__)\n\n# Maximum non-Scout frames to target retrieving\nLIMIT = 50\n# How many upper frames from inside Scout to ignore\nIGNORED = 1\n\n\ndef filter_frames(frames):\n \"\"\"Filter the stack trace frames down to non-library code.\"\"\"\n paths = sysconfig.get_paths()\n library_paths = {paths[\"purelib\"], paths[\"platlib\"]}\n for frame in frames:\n if not any(frame[\"file\"].startswith(exclusion) for exclusion in library_paths):\n yield frame\n\n\ndef module_filepath(module, filepath):\n \"\"\"Get the filepath relative to the base module.\"\"\"\n root_module_name = module.split(\".\", 1)[0]\n if root_module_name == module:\n return os.path.basename(filepath)\n\n root_module = sys.modules[root_module_name]\n if root_module.__file__:\n module_dir = root_module.__file__.rsplit(os.sep, 2)[0]\n elif root_module.__path__:\n # Default to using the first path specified for the module.\n module_dir = root_module.__path__[0].rsplit(os.sep, 1)[0]\n if len(root_module.__path__) > 1:\n logger.debug(\n \"{} has {} paths. Use the first and ignore the rest.\".format(\n root_module, len(root_module.__path__)\n )\n )\n else:\n # If the file path don't exist, then return the full path.\n return filepath\n return filepath.split(module_dir, 1)[-1].lstrip(os.sep)\n\n\ndef filepath(frame):\n \"\"\"Get the filepath for frame.\"\"\"\n module = frame.f_globals.get(\"__name__\", None)\n filepath = frame.f_code.co_filename\n\n if filepath.endswith(\".pyc\"):\n filepath = filepath[:-1]\n\n if not module:\n return filepath\n return module_filepath(module, filepath)\n\n\nif sys.version_info >= (3, 5):\n\n def stacktrace_walker(tb):\n \"\"\"Iterate over each frame of the stack downards for exceptions.\"\"\"\n for frame, lineno in traceback.walk_tb(tb):\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n\n def backtrace_walker():\n \"\"\"Iterate over each frame of the stack upwards.\n\n Taken from python3/traceback.ExtractSummary.extract to support\n iterating over the entire stack, but without creating a large\n data structure.\n \"\"\"\n start_frame = sys._getframe().f_back\n for frame, lineno in traceback.walk_stack(start_frame):\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n\n\nelse:\n\n def stacktrace_walker(tb):\n \"\"\"Iterate over each frame of the stack downards for exceptions.\"\"\"\n while tb is not None:\n lineno = tb.tb_lineno\n name = tb.tb_frame.f_code.co_name\n yield {\n \"file\": filepath(tb.tb_frame),\n \"line\": lineno,\n \"function\": name,\n }\n tb = tb.tb_next\n\n def backtrace_walker():\n \"\"\"Iterate over each frame of the stack upwards.\n\n Taken from python2.7/traceback.extract_stack to support iterating\n over the entire stack, but without creating a large data structure.\n \"\"\"\n try:\n raise ZeroDivisionError\n except ZeroDivisionError:\n # Get the current frame\n frame = sys.exc_info()[2].tb_frame.f_back\n\n while frame is not None:\n lineno = frame.f_lineno\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n frame = frame.f_back\n\n\ndef capture_backtrace():\n walker = filter_frames(backtrace_walker())\n return list(itertools.islice(walker, LIMIT))\n\n\ndef capture_stacktrace(tb):\n walker = stacktrace_walker(tb)\n return list(reversed(list(itertools.islice(walker, LIMIT))))\n\n\ndef capture():\n warnings.warn(\n \"capture is deprecated, instead use capture_backtrace instead.\",\n DeprecationWarning,\n 2,\n )\n return capture_backtrace()\n", "path": "src/scout_apm/core/backtrace.py"}]} | 1,748 | 388 |
gh_patches_debug_14653 | rasdani/github-patches | git_diff | conda__conda-4327 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Channels in centrally installed .condarc file are being ignored in conda 4.3.4
Hi, I am testing a centrally installed Anaconda setup with Anaconda installed under `C:\Program Files\Anaconda3`. I have a condarc file under `C:\Program Files\Anaconda3\.condarc`.
When I run `conda info` it tells me that my config file is under the correct location.
config file : C:\Program Files\Anaconda3\.condarc
I have configured a few custom channels in this `.condarc` file, e.g.:
channels:
- http://some.internal/url
I can also use `conda config --system --add channels http://some.internal/url` to set this value and conda tells me that channels already contains this value.
But when I run `conda config --system --show`, the list of channels is always set to:
channels:
- defaults
It seems that the list of channels in the central `.condarc` file is completely ignored and always replaced by `defaults`. I have also tried to set the list of `default_channels` in the central `.condarc` file but without success.
Using conda 4.3.4 on win-64.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/__init__.py`
Content:
```
1 # (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6 """OS-agnostic, system-level binary package manager."""
7 from __future__ import absolute_import, division, print_function, unicode_literals
8
9 from os.path import dirname
10
11 from ._vendor.auxlib.packaging import get_version
12 from .common.compat import iteritems, text_type
13
14 __all__ = [
15 "__name__", "__version__", "__author__",
16 "__email__", "__license__", "__copyright__",
17 "__summary__", "__url__",
18 ]
19
20 __name__ = "conda"
21 __version__ = get_version(__file__)
22 __author__ = "Continuum Analytics, Inc."
23 __email__ = "[email protected]"
24 __license__ = "BSD"
25 __summary__ = __doc__
26 __url__ = "https://github.com/conda/conda"
27
28 CONDA_PACKAGE_ROOT = dirname(__file__)
29
30
31 class CondaError(Exception):
32 def __init__(self, message, **kwargs):
33 self.message = message
34 self._kwargs = kwargs
35 super(CondaError, self).__init__(message)
36
37 def __repr__(self):
38 return '%s: %s\n' % (self.__class__.__name__, text_type(self))
39
40 def __str__(self):
41 return text_type(self.message % self._kwargs)
42
43 def dump_map(self):
44 result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_'))
45 result.update(exception_type=text_type(type(self)),
46 exception_name=self.__class__.__name__,
47 message=text_type(self),
48 error=repr(self),
49 **self._kwargs)
50 return result
51
52
53 class CondaMultiError(CondaError):
54
55 def __init__(self, errors):
56 self.errors = errors
57 super(CondaError, self).__init__(None)
58
59 def __repr__(self):
60 return '\n'.join(repr(e) for e in self.errors) + '\n'
61
62 def __str__(self):
63 return '\n'.join(text_type(e) for e in self.errors) + '\n'
64
65 def dump_map(self):
66 return dict(exception_type=text_type(type(self)),
67 exception_name=self.__class__.__name__,
68 errors=tuple(error.dump_map() for error in self.errors),
69 error="Multiple Errors Encountered.",
70 )
71
72
73 class CondaExitZero(CondaError):
74 pass
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda/__init__.py b/conda/__init__.py
--- a/conda/__init__.py
+++ b/conda/__init__.py
@@ -6,7 +6,9 @@
"""OS-agnostic, system-level binary package manager."""
from __future__ import absolute_import, division, print_function, unicode_literals
+import os
from os.path import dirname
+import sys
from ._vendor.auxlib.packaging import get_version
from .common.compat import iteritems, text_type
@@ -25,6 +27,10 @@
__summary__ = __doc__
__url__ = "https://github.com/conda/conda"
+
+if os.getenv('CONDA_ROOT') is None:
+ os.environ['CONDA_ROOT'] = sys.prefix
+
CONDA_PACKAGE_ROOT = dirname(__file__)
| {"golden_diff": "diff --git a/conda/__init__.py b/conda/__init__.py\n--- a/conda/__init__.py\n+++ b/conda/__init__.py\n@@ -6,7 +6,9 @@\n \"\"\"OS-agnostic, system-level binary package manager.\"\"\"\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+import os\n from os.path import dirname\n+import sys\n \n from ._vendor.auxlib.packaging import get_version\n from .common.compat import iteritems, text_type\n@@ -25,6 +27,10 @@\n __summary__ = __doc__\n __url__ = \"https://github.com/conda/conda\"\n \n+\n+if os.getenv('CONDA_ROOT') is None:\n+ os.environ['CONDA_ROOT'] = sys.prefix\n+\n CONDA_PACKAGE_ROOT = dirname(__file__)\n", "issue": "Channels in centrally installed .condarc file are being ignored in conda 4.3.4\nHi, I am testing a centrally installed Anaconda setup with Anaconda installed under `C:\\Program Files\\Anaconda3`. I have a condarc file under `C:\\Program Files\\Anaconda3\\.condarc`.\r\n\r\nWhen I run `conda info` it tells me that my config file is under the correct location.\r\n\r\n config file : C:\\Program Files\\Anaconda3\\.condarc\r\n\r\nI have configured a few custom channels in this `.condarc` file, e.g.:\r\n\r\n channels:\r\n - http://some.internal/url\r\n\r\nI can also use `conda config --system --add channels http://some.internal/url` to set this value and conda tells me that channels already contains this value.\r\n\r\nBut when I run `conda config --system --show`, the list of channels is always set to:\r\n\r\n channels:\r\n - defaults\r\n\r\nIt seems that the list of channels in the central `.condarc` file is completely ignored and always replaced by `defaults`. I have also tried to set the list of `default_channels` in the central `.condarc` file but without success.\r\n\r\nUsing conda 4.3.4 on win-64.\r\n\n", "before_files": [{"content": "# (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\"\"\"OS-agnostic, system-level binary package manager.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom os.path import dirname\n\nfrom ._vendor.auxlib.packaging import get_version\nfrom .common.compat import iteritems, text_type\n\n__all__ = [\n \"__name__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n \"__summary__\", \"__url__\",\n]\n\n__name__ = \"conda\"\n__version__ = get_version(__file__)\n__author__ = \"Continuum Analytics, Inc.\"\n__email__ = \"[email protected]\"\n__license__ = \"BSD\"\n__summary__ = __doc__\n__url__ = \"https://github.com/conda/conda\"\n\nCONDA_PACKAGE_ROOT = dirname(__file__)\n\n\nclass CondaError(Exception):\n def __init__(self, message, **kwargs):\n self.message = message\n self._kwargs = kwargs\n super(CondaError, self).__init__(message)\n\n def __repr__(self):\n return '%s: %s\\n' % (self.__class__.__name__, text_type(self))\n\n def __str__(self):\n return text_type(self.message % self._kwargs)\n\n def dump_map(self):\n result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_'))\n result.update(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n message=text_type(self),\n error=repr(self),\n **self._kwargs)\n return result\n\n\nclass CondaMultiError(CondaError):\n\n def __init__(self, errors):\n self.errors = errors\n super(CondaError, self).__init__(None)\n\n def __repr__(self):\n return '\\n'.join(repr(e) for e in self.errors) + '\\n'\n\n def __str__(self):\n return '\\n'.join(text_type(e) for e in self.errors) + '\\n'\n\n def dump_map(self):\n return dict(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n errors=tuple(error.dump_map() for error in self.errors),\n error=\"Multiple Errors Encountered.\",\n )\n\n\nclass CondaExitZero(CondaError):\n pass\n", "path": "conda/__init__.py"}], "after_files": [{"content": "# (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\"\"\"OS-agnostic, system-level binary package manager.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom os.path import dirname\nimport sys\n\nfrom ._vendor.auxlib.packaging import get_version\nfrom .common.compat import iteritems, text_type\n\n__all__ = [\n \"__name__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n \"__summary__\", \"__url__\",\n]\n\n__name__ = \"conda\"\n__version__ = get_version(__file__)\n__author__ = \"Continuum Analytics, Inc.\"\n__email__ = \"[email protected]\"\n__license__ = \"BSD\"\n__summary__ = __doc__\n__url__ = \"https://github.com/conda/conda\"\n\n\nif os.getenv('CONDA_ROOT') is None:\n os.environ['CONDA_ROOT'] = sys.prefix\n\nCONDA_PACKAGE_ROOT = dirname(__file__)\n\n\nclass CondaError(Exception):\n def __init__(self, message, **kwargs):\n self.message = message\n self._kwargs = kwargs\n super(CondaError, self).__init__(message)\n\n def __repr__(self):\n return '%s: %s\\n' % (self.__class__.__name__, text_type(self))\n\n def __str__(self):\n return text_type(self.message % self._kwargs)\n\n def dump_map(self):\n result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_'))\n result.update(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n message=text_type(self),\n error=repr(self),\n **self._kwargs)\n return result\n\n\nclass CondaMultiError(CondaError):\n\n def __init__(self, errors):\n self.errors = errors\n super(CondaError, self).__init__(None)\n\n def __repr__(self):\n return '\\n'.join(repr(e) for e in self.errors) + '\\n'\n\n def __str__(self):\n return '\\n'.join(text_type(e) for e in self.errors) + '\\n'\n\n def dump_map(self):\n return dict(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n errors=tuple(error.dump_map() for error in self.errors),\n error=\"Multiple Errors Encountered.\",\n )\n\n\nclass CondaExitZero(CondaError):\n pass\n", "path": "conda/__init__.py"}]} | 1,236 | 181 |
gh_patches_debug_19684 | rasdani/github-patches | git_diff | Azure__azure-cli-extensions-2985 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The parameter for --administration-members is incorrectly stated as optional
For the function 'az powerbi embedded-capacity create', the parameter for --administration-members is incorrectly stated as optional.
If you leave this parameter out, it will give this error:
**BadRequestError: At least one capacity administrator is required**
---
#### Document Details
⚠ *Do not edit this section. It is required for docs.microsoft.com ➟ GitHub issue linking.*
* ID: edf4a4a9-8ff1-c276-3e51-d5e83c180879
* Version Independent ID: de63a28e-4d16-2270-595f-1a67f5e682bd
* Content: [az powerbi embedded-capacity](https://docs.microsoft.com/en-us/cli/azure/ext/powerbidedicated/powerbi/embedded-capacity?view=azure-cli-latest)
* Content Source: [latest/docs-ref-autogen/ext/powerbidedicated/powerbi/embedded-capacity.yml](https://github.com/MicrosoftDocs/azure-docs-cli/blob/master/latest/docs-ref-autogen/ext/powerbidedicated/powerbi/embedded-capacity.yml)
* GitHub Login: @rloutlaw
* Microsoft Alias: **routlaw**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/powerbidedicated/azext_powerbidedicated/_params.py`
Content:
```
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5 # pylint: disable=line-too-long
6 # pylint: disable=too-many-lines
7 # pylint: disable=too-many-statements
8
9 from knack.arguments import CLIArgumentType
10
11 from azure.cli.core.commands.parameters import (
12 tags_type,
13 get_enum_type,
14 resource_group_name_type,
15 get_location_type
16 )
17
18
19 def load_arguments(self, _):
20 name_type = CLIArgumentType(
21 options_list=['--name', '-n'],
22 help='The name of the Dedicated capacity. It must be at least 3 characters in length, and no more than 63.')
23 sku_name_type = CLIArgumentType(
24 arg_type=get_enum_type(['A1', 'A2', 'A3', 'A4', 'A5', 'A6']),
25 help='Name of the SKU level. For more information, please refer to '
26 'https://azure.microsoft.com/en-us/pricing/details/power-bi-embedded/.'
27 )
28 sku_tier_type = CLIArgumentType(
29 arg_type=get_enum_type(['PBIE_Azure']),
30 help='The name of the Azure pricing tier to which the SKU applies.'
31 )
32 administration_type = CLIArgumentType(
33 help='An array of administrator user identities.', nargs='+'
34 )
35
36 with self.argument_context('powerbi embedded-capacity') as c:
37 c.argument('resource_group_name', resource_group_name_type)
38 c.argument('name', name_type)
39
40 with self.argument_context('powerbi embedded-capacity create') as c:
41 c.argument('sku_name', sku_name_type)
42 c.argument('sku_tier', sku_tier_type)
43 c.argument('tags', tags_type)
44 c.argument('administration_members', administration_type)
45 c.argument('location', get_location_type(self.cli_ctx))
46
47 with self.argument_context('powerbi embedded-capacity update') as c:
48 c.argument('sku_name', sku_name_type)
49 c.argument('sku_tier', sku_tier_type)
50 c.argument('tags', tags_type)
51 c.argument('administration_members', administration_type)
52
```
Path: `src/powerbidedicated/setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8
9 from codecs import open
10 from setuptools import setup, find_packages
11 try:
12 from azure_bdist_wheel import cmdclass
13 except ImportError:
14 from distutils import log as logger
15 logger.warn("Wheel is not available, disabling bdist_wheel hook")
16
17 # TODO: Confirm this is the right version number you want and it matches your
18 # HISTORY.rst entry.
19 VERSION = '0.1.1'
20
21 # The full list of classifiers is available at
22 # https://pypi.python.org/pypi?%3Aaction=list_classifiers
23 CLASSIFIERS = [
24 'Development Status :: 4 - Beta',
25 'Intended Audience :: Developers',
26 'Intended Audience :: System Administrators',
27 'Programming Language :: Python',
28 'Programming Language :: Python :: 2',
29 'Programming Language :: Python :: 2.7',
30 'Programming Language :: Python :: 3',
31 'Programming Language :: Python :: 3.4',
32 'Programming Language :: Python :: 3.5',
33 'Programming Language :: Python :: 3.6',
34 'License :: OSI Approved :: MIT License',
35 ]
36
37 # TODO: Add any additional SDK dependencies here
38 DEPENDENCIES = []
39
40 with open('README.md', 'r', encoding='utf-8') as f:
41 README = f.read()
42 with open('HISTORY.rst', 'r', encoding='utf-8') as f:
43 HISTORY = f.read()
44
45 setup(
46 name='powerbidedicated',
47 version=VERSION,
48 description='Microsoft Azure Command-Line Tools PowerBIDedicated Extension',
49 # TODO: Update author and email, if applicable
50 author='Microsoft Corporation',
51 author_email='[email protected]',
52 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/powerbidedicated',
53 long_description=README + '\n\n' + HISTORY,
54 license='MIT',
55 classifiers=CLASSIFIERS,
56 packages=find_packages(),
57 install_requires=DEPENDENCIES,
58 package_data={'azext_powerbidedicated': ['azext_metadata.json']},
59 )
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/powerbidedicated/azext_powerbidedicated/_params.py b/src/powerbidedicated/azext_powerbidedicated/_params.py
--- a/src/powerbidedicated/azext_powerbidedicated/_params.py
+++ b/src/powerbidedicated/azext_powerbidedicated/_params.py
@@ -41,7 +41,7 @@
c.argument('sku_name', sku_name_type)
c.argument('sku_tier', sku_tier_type)
c.argument('tags', tags_type)
- c.argument('administration_members', administration_type)
+ c.argument('administration_members', administration_type, required=True)
c.argument('location', get_location_type(self.cli_ctx))
with self.argument_context('powerbi embedded-capacity update') as c:
diff --git a/src/powerbidedicated/setup.py b/src/powerbidedicated/setup.py
--- a/src/powerbidedicated/setup.py
+++ b/src/powerbidedicated/setup.py
@@ -16,7 +16,7 @@
# TODO: Confirm this is the right version number you want and it matches your
# HISTORY.rst entry.
-VERSION = '0.1.1'
+VERSION = '0.2.0'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
| {"golden_diff": "diff --git a/src/powerbidedicated/azext_powerbidedicated/_params.py b/src/powerbidedicated/azext_powerbidedicated/_params.py\n--- a/src/powerbidedicated/azext_powerbidedicated/_params.py\n+++ b/src/powerbidedicated/azext_powerbidedicated/_params.py\n@@ -41,7 +41,7 @@\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n- c.argument('administration_members', administration_type)\n+ c.argument('administration_members', administration_type, required=True)\n c.argument('location', get_location_type(self.cli_ctx))\n \n with self.argument_context('powerbi embedded-capacity update') as c:\ndiff --git a/src/powerbidedicated/setup.py b/src/powerbidedicated/setup.py\n--- a/src/powerbidedicated/setup.py\n+++ b/src/powerbidedicated/setup.py\n@@ -16,7 +16,7 @@\n \n # TODO: Confirm this is the right version number you want and it matches your\n # HISTORY.rst entry.\n-VERSION = '0.1.1'\n+VERSION = '0.2.0'\n \n # The full list of classifiers is available at\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n", "issue": "The parameter for --administration-members is incorrectly stated as optional \nFor the function 'az powerbi embedded-capacity create', the parameter for --administration-members is incorrectly stated as optional.\r\nIf you leave this parameter out, it will give this error:\r\n**BadRequestError: At least one capacity administrator is required**\r\n\r\n---\r\n#### Document Details\r\n\r\n\u26a0 *Do not edit this section. It is required for docs.microsoft.com \u279f GitHub issue linking.*\r\n\r\n* ID: edf4a4a9-8ff1-c276-3e51-d5e83c180879\r\n* Version Independent ID: de63a28e-4d16-2270-595f-1a67f5e682bd\r\n* Content: [az powerbi embedded-capacity](https://docs.microsoft.com/en-us/cli/azure/ext/powerbidedicated/powerbi/embedded-capacity?view=azure-cli-latest)\r\n* Content Source: [latest/docs-ref-autogen/ext/powerbidedicated/powerbi/embedded-capacity.yml](https://github.com/MicrosoftDocs/azure-docs-cli/blob/master/latest/docs-ref-autogen/ext/powerbidedicated/powerbi/embedded-capacity.yml)\r\n* GitHub Login: @rloutlaw\r\n* Microsoft Alias: **routlaw**\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# pylint: disable=line-too-long\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-statements\n\nfrom knack.arguments import CLIArgumentType\n\nfrom azure.cli.core.commands.parameters import (\n tags_type,\n get_enum_type,\n resource_group_name_type,\n get_location_type\n)\n\n\ndef load_arguments(self, _):\n name_type = CLIArgumentType(\n options_list=['--name', '-n'],\n help='The name of the Dedicated capacity. It must be at least 3 characters in length, and no more than 63.')\n sku_name_type = CLIArgumentType(\n arg_type=get_enum_type(['A1', 'A2', 'A3', 'A4', 'A5', 'A6']),\n help='Name of the SKU level. For more information, please refer to '\n 'https://azure.microsoft.com/en-us/pricing/details/power-bi-embedded/.'\n )\n sku_tier_type = CLIArgumentType(\n arg_type=get_enum_type(['PBIE_Azure']),\n help='The name of the Azure pricing tier to which the SKU applies.'\n )\n administration_type = CLIArgumentType(\n help='An array of administrator user identities.', nargs='+'\n )\n\n with self.argument_context('powerbi embedded-capacity') as c:\n c.argument('resource_group_name', resource_group_name_type)\n c.argument('name', name_type)\n\n with self.argument_context('powerbi embedded-capacity create') as c:\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n c.argument('administration_members', administration_type)\n c.argument('location', get_location_type(self.cli_ctx))\n\n with self.argument_context('powerbi embedded-capacity update') as c:\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n c.argument('administration_members', administration_type)\n", "path": "src/powerbidedicated/azext_powerbidedicated/_params.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.1.1'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\n# TODO: Add any additional SDK dependencies here\nDEPENDENCIES = []\n\nwith open('README.md', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='powerbidedicated',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools PowerBIDedicated Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/powerbidedicated',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n install_requires=DEPENDENCIES,\n package_data={'azext_powerbidedicated': ['azext_metadata.json']},\n)\n", "path": "src/powerbidedicated/setup.py"}], "after_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# pylint: disable=line-too-long\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-statements\n\nfrom knack.arguments import CLIArgumentType\n\nfrom azure.cli.core.commands.parameters import (\n tags_type,\n get_enum_type,\n resource_group_name_type,\n get_location_type\n)\n\n\ndef load_arguments(self, _):\n name_type = CLIArgumentType(\n options_list=['--name', '-n'],\n help='The name of the Dedicated capacity. It must be at least 3 characters in length, and no more than 63.')\n sku_name_type = CLIArgumentType(\n arg_type=get_enum_type(['A1', 'A2', 'A3', 'A4', 'A5', 'A6']),\n help='Name of the SKU level. For more information, please refer to '\n 'https://azure.microsoft.com/en-us/pricing/details/power-bi-embedded/.'\n )\n sku_tier_type = CLIArgumentType(\n arg_type=get_enum_type(['PBIE_Azure']),\n help='The name of the Azure pricing tier to which the SKU applies.'\n )\n administration_type = CLIArgumentType(\n help='An array of administrator user identities.', nargs='+'\n )\n\n with self.argument_context('powerbi embedded-capacity') as c:\n c.argument('resource_group_name', resource_group_name_type)\n c.argument('name', name_type)\n\n with self.argument_context('powerbi embedded-capacity create') as c:\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n c.argument('administration_members', administration_type, required=True)\n c.argument('location', get_location_type(self.cli_ctx))\n\n with self.argument_context('powerbi embedded-capacity update') as c:\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n c.argument('administration_members', administration_type)\n", "path": "src/powerbidedicated/azext_powerbidedicated/_params.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.2.0'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\n# TODO: Add any additional SDK dependencies here\nDEPENDENCIES = []\n\nwith open('README.md', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='powerbidedicated',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools PowerBIDedicated Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/powerbidedicated',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n install_requires=DEPENDENCIES,\n package_data={'azext_powerbidedicated': ['azext_metadata.json']},\n)\n", "path": "src/powerbidedicated/setup.py"}]} | 1,727 | 297 |
gh_patches_debug_10979 | rasdani/github-patches | git_diff | bokeh__bokeh-10074 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOCS] Page wise display of documentation search
**Is your feature request related to a problem?**
Yes. I searched for a relatively simple query in the documentation search bar of https://docs.bokeh.org, and it took very long to load the results. In my second try, the results weren't even loading, I'm afraid. These are displayed in an unordered list which fills the entire page up. It might get frustrating to read through everything to find the answer to the input query.
**Describe the solution you'd like**
I would suggest displaying the fetched results in a page wise format, the way most search engines do it. Relevance weighted sorted answer, shown page wise. Fill up only the current page of about 20 to 30 odd query results, and depending on whether the user wants to see the other pages, load them.
**Describe alternatives you've considered**
If not a page wise result, a folder wise result would also benefit, which leaves the option to the user to navigate where he/she wants to. A custom google search may also help.
**Additional context**

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sphinx/docserver.py`
Content:
```
1 import os
2 import sys
3 import threading
4 import time
5 import webbrowser
6
7 import flask
8 import tornado
9 from tornado.httpserver import HTTPServer
10 from tornado.ioloop import IOLoop
11 from tornado.wsgi import WSGIContainer
12
13 _basedir = os.path.join("..", os.path.dirname(__file__))
14
15 app = flask.Flask(__name__, static_folder="/unused")
16 PORT=5009
17 http_server = HTTPServer(WSGIContainer(app))
18
19 @app.route('/')
20 def welcome():
21 return """
22 <h1>Welcome to the Bokeh documentation server</h1>
23 You probably want to go to <a href="/en/latest/index.html"> Index</a>
24 """
25
26 @app.route('/versions.json')
27 def send_versions():
28 return flask.send_from_directory(
29 os.path.join(_basedir, "sphinx"), "test_versions.json")
30
31 @app.route('/alert.html')
32 def send_alert():
33 return os.environ.get("BOKEH_DOCS_ALERT", "")
34
35 @app.route('/en/latest/<path:filename>')
36 def send_docs(filename):
37 return flask.send_from_directory(
38 os.path.join(_basedir, "sphinx/build/html/"), filename)
39
40 def open_browser():
41 # Child process
42 time.sleep(0.5)
43 webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
44
45 data = {}
46
47 def serve_http():
48 data['ioloop'] = IOLoop()
49 http_server.listen(PORT)
50 IOLoop.current().start()
51
52 def shutdown_server():
53 ioloop = data['ioloop']
54 ioloop.add_callback(ioloop.stop)
55 print("Asked Server to shut down.")
56
57 def ui():
58 try:
59 time.sleep(0.5)
60 input("Press <ENTER> to exit...\n") # lgtm [py/use-of-input]
61 except KeyboardInterrupt:
62 pass
63
64 if __name__ == "__main__":
65
66 if tornado.version_info[0] == 4:
67 print('docserver.py script requires tornado 5 or higher')
68 sys.exit(1)
69
70 print("\nStarting Bokeh plot server on port %d..." % PORT)
71 print("Visit http://localhost:%d/en/latest/index.html to see plots\n" % PORT)
72
73 t_server = threading.Thread(target=serve_http)
74 t_server.start()
75 t_browser = threading.Thread(target=open_browser)
76 t_browser.start()
77
78 ui()
79
80 shutdown_server()
81 t_server.join()
82 t_browser.join()
83 print("Server shut down.")
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sphinx/docserver.py b/sphinx/docserver.py
--- a/sphinx/docserver.py
+++ b/sphinx/docserver.py
@@ -1,3 +1,4 @@
+import asyncio
import os
import sys
import threading
@@ -10,6 +11,11 @@
from tornado.ioloop import IOLoop
from tornado.wsgi import WSGIContainer
+# Needed for Windows + Python 3.8 config
+if sys.version_info.major==3 and sys.version_info.minor >= 8 and sys.platform.startswith('win'):
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
+
+
_basedir = os.path.join("..", os.path.dirname(__file__))
app = flask.Flask(__name__, static_folder="/unused")
| {"golden_diff": "diff --git a/sphinx/docserver.py b/sphinx/docserver.py\n--- a/sphinx/docserver.py\n+++ b/sphinx/docserver.py\n@@ -1,3 +1,4 @@\n+import asyncio\n import os\n import sys\n import threading\n@@ -10,6 +11,11 @@\n from tornado.ioloop import IOLoop\n from tornado.wsgi import WSGIContainer\n \n+# Needed for Windows + Python 3.8 config\n+if sys.version_info.major==3 and sys.version_info.minor >= 8 and sys.platform.startswith('win'):\n+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n+\n+\n _basedir = os.path.join(\"..\", os.path.dirname(__file__))\n \n app = flask.Flask(__name__, static_folder=\"/unused\")\n", "issue": "[DOCS] Page wise display of documentation search \n**Is your feature request related to a problem?**\r\nYes. I searched for a relatively simple query in the documentation search bar of https://docs.bokeh.org, and it took very long to load the results. In my second try, the results weren't even loading, I'm afraid. These are displayed in an unordered list which fills the entire page up. It might get frustrating to read through everything to find the answer to the input query. \r\n\r\n**Describe the solution you'd like**\r\nI would suggest displaying the fetched results in a page wise format, the way most search engines do it. Relevance weighted sorted answer, shown page wise. Fill up only the current page of about 20 to 30 odd query results, and depending on whether the user wants to see the other pages, load them.\r\n\r\n**Describe alternatives you've considered**\r\nIf not a page wise result, a folder wise result would also benefit, which leaves the option to the user to navigate where he/she wants to. A custom google search may also help.\r\n\r\n**Additional context**\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask.Flask(__name__, static_folder=\"/unused\")\nPORT=5009\nhttp_server = HTTPServer(WSGIContainer(app))\n\[email protected]('/')\ndef welcome():\n return \"\"\"\n <h1>Welcome to the Bokeh documentation server</h1>\n You probably want to go to <a href=\"/en/latest/index.html\"> Index</a>\n \"\"\"\n\[email protected]('/versions.json')\ndef send_versions():\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx\"), \"test_versions.json\")\n\[email protected]('/alert.html')\ndef send_alert():\n return os.environ.get(\"BOKEH_DOCS_ALERT\", \"\")\n\[email protected]('/en/latest/<path:filename>')\ndef send_docs(filename):\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx/build/html/\"), filename)\n\ndef open_browser():\n # Child process\n time.sleep(0.5)\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=\"tab\")\n\ndata = {}\n\ndef serve_http():\n data['ioloop'] = IOLoop()\n http_server.listen(PORT)\n IOLoop.current().start()\n\ndef shutdown_server():\n ioloop = data['ioloop']\n ioloop.add_callback(ioloop.stop)\n print(\"Asked Server to shut down.\")\n\ndef ui():\n try:\n time.sleep(0.5)\n input(\"Press <ENTER> to exit...\\n\") # lgtm [py/use-of-input]\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n\n if tornado.version_info[0] == 4:\n print('docserver.py script requires tornado 5 or higher')\n sys.exit(1)\n\n print(\"\\nStarting Bokeh plot server on port %d...\" % PORT)\n print(\"Visit http://localhost:%d/en/latest/index.html to see plots\\n\" % PORT)\n\n t_server = threading.Thread(target=serve_http)\n t_server.start()\n t_browser = threading.Thread(target=open_browser)\n t_browser.start()\n\n ui()\n\n shutdown_server()\n t_server.join()\n t_browser.join()\n print(\"Server shut down.\")\n", "path": "sphinx/docserver.py"}], "after_files": [{"content": "import asyncio\nimport os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n# Needed for Windows + Python 3.8 config\nif sys.version_info.major==3 and sys.version_info.minor >= 8 and sys.platform.startswith('win'):\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask.Flask(__name__, static_folder=\"/unused\")\nPORT=5009\nhttp_server = HTTPServer(WSGIContainer(app))\n\[email protected]('/')\ndef welcome():\n return \"\"\"\n <h1>Welcome to the Bokeh documentation server</h1>\n You probably want to go to <a href=\"/en/latest/index.html\"> Index</a>\n \"\"\"\n\[email protected]('/versions.json')\ndef send_versions():\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx\"), \"test_versions.json\")\n\[email protected]('/alert.html')\ndef send_alert():\n return os.environ.get(\"BOKEH_DOCS_ALERT\", \"\")\n\[email protected]('/en/latest/<path:filename>')\ndef send_docs(filename):\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx/build/html/\"), filename)\n\ndef open_browser():\n # Child process\n time.sleep(0.5)\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=\"tab\")\n\ndata = {}\n\ndef serve_http():\n data['ioloop'] = IOLoop()\n http_server.listen(PORT)\n IOLoop.current().start()\n\ndef shutdown_server():\n ioloop = data['ioloop']\n ioloop.add_callback(ioloop.stop)\n print(\"Asked Server to shut down.\")\n\ndef ui():\n try:\n time.sleep(0.5)\n input(\"Press <ENTER> to exit...\\n\") # lgtm [py/use-of-input]\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n\n if tornado.version_info[0] == 4:\n print('docserver.py script requires tornado 5 or higher')\n sys.exit(1)\n\n print(\"\\nStarting Bokeh plot server on port %d...\" % PORT)\n print(\"Visit http://localhost:%d/en/latest/index.html to see plots\\n\" % PORT)\n\n t_server = threading.Thread(target=serve_http)\n t_server.start()\n t_browser = threading.Thread(target=open_browser)\n t_browser.start()\n\n ui()\n\n shutdown_server()\n t_server.join()\n t_browser.join()\n print(\"Server shut down.\")\n", "path": "sphinx/docserver.py"}]} | 1,253 | 170 |
gh_patches_debug_31758 | rasdani/github-patches | git_diff | docker__docker-py-384 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Do not support sslv3 (poodle)
In Docker 1.3.1 (coming very soon), only TLS1.0+ will be supported.
Ping @shin-
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/tls.py`
Content:
```
1 import os
2
3 from . import errors
4 from .ssladapter import ssladapter
5
6
7 class TLSConfig(object):
8 cert = None
9 verify = None
10 ssl_version = None
11
12 def __init__(self, client_cert=None, ca_cert=None, verify=None,
13 ssl_version=None, assert_hostname=None):
14 # Argument compatibility/mapping with
15 # http://docs.docker.com/examples/https/
16 # This diverges from the Docker CLI in that users can specify 'tls'
17 # here, but also disable any public/default CA pool verification by
18 # leaving tls_verify=False
19
20 # urllib3 sets a default ssl_version if ssl_version is None
21 # http://tinyurl.com/kxga8hb
22 self.ssl_version = ssl_version
23 self.assert_hostname = assert_hostname
24
25 # "tls" and "tls_verify" must have both or neither cert/key files
26 # In either case, Alert the user when both are expected, but any are
27 # missing.
28
29 if client_cert:
30 try:
31 tls_cert, tls_key = client_cert
32 except ValueError:
33 raise errors.TLSParameterError(
34 'client_config must be a tuple of'
35 ' (client certificate, key file)'
36 )
37
38 if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
39 not os.path.isfile(tls_key)):
40 raise errors.TLSParameterError(
41 'Path to a certificate and key files must be provided'
42 ' through the client_config param'
43 )
44 self.cert = (tls_cert, tls_key)
45
46 # Either set verify to True (public/default CA checks) or to the
47 # path of a CA Cert file.
48 if verify is not None:
49 if not ca_cert:
50 self.verify = verify
51 elif os.path.isfile(ca_cert):
52 if not verify:
53 raise errors.TLSParameterError(
54 'verify can not be False when a CA cert is'
55 ' provided.'
56 )
57 self.verify = ca_cert
58 else:
59 raise errors.TLSParameterError(
60 'Invalid CA certificate provided for `tls_ca_cert`.'
61 )
62
63 def configure_client(self, client):
64 client.ssl_version = self.ssl_version
65 if self.verify is not None:
66 client.verify = self.verify
67 if self.cert:
68 client.cert = self.cert
69 client.mount('https://', ssladapter.SSLAdapter(
70 ssl_version=self.ssl_version,
71 assert_hostname=self.assert_hostname,
72 ))
73
```
Path: `docker/ssladapter/ssladapter.py`
Content:
```
1 """ Resolves OpenSSL issues in some servers:
2 https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
3 https://github.com/kennethreitz/requests/pull/799
4 """
5 from distutils.version import StrictVersion
6 from requests.adapters import HTTPAdapter
7 try:
8 import requests.packages.urllib3 as urllib3
9 except ImportError:
10 import urllib3
11
12
13 PoolManager = urllib3.poolmanager.PoolManager
14
15
16 class SSLAdapter(HTTPAdapter):
17 '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
18 def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):
19 self.ssl_version = ssl_version
20 self.assert_hostname = assert_hostname
21 super(SSLAdapter, self).__init__(**kwargs)
22
23 def init_poolmanager(self, connections, maxsize, block=False):
24 kwargs = {
25 'num_pools': connections,
26 'maxsize': maxsize,
27 'block': block,
28 'assert_hostname': self.assert_hostname,
29 }
30 if self.can_override_ssl_version():
31 kwargs['ssl_version'] = self.ssl_version
32
33 self.poolmanager = PoolManager(**kwargs)
34
35 def can_override_ssl_version(self):
36 urllib_ver = urllib3.__version__.split('-')[0]
37 if urllib_ver is None:
38 return False
39 if urllib_ver == 'dev':
40 return True
41 return StrictVersion(urllib_ver) > StrictVersion('1.5')
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py
--- a/docker/ssladapter/ssladapter.py
+++ b/docker/ssladapter/ssladapter.py
@@ -4,6 +4,7 @@
"""
from distutils.version import StrictVersion
from requests.adapters import HTTPAdapter
+import ssl
try:
import requests.packages.urllib3 as urllib3
except ImportError:
@@ -13,9 +14,19 @@
PoolManager = urllib3.poolmanager.PoolManager
+def get_max_tls_protocol():
+ protocols = ('PROTOCOL_TLSv1_2',
+ 'PROTOCOL_TLSv1_1',
+ 'PROTOCOL_TLSv1')
+ for proto in protocols:
+ if hasattr(ssl, proto):
+ return proto
+
+
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):
+ ssl_version = ssl_version or get_max_tls_protocol()
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
super(SSLAdapter, self).__init__(**kwargs)
diff --git a/docker/tls.py b/docker/tls.py
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -17,8 +17,11 @@
# here, but also disable any public/default CA pool verification by
# leaving tls_verify=False
- # urllib3 sets a default ssl_version if ssl_version is None
- # http://tinyurl.com/kxga8hb
+ # urllib3 sets a default ssl_version if ssl_version is None,
+ # but that default is the vulnerable PROTOCOL_SSLv23 selection,
+ # so we override the default with the maximum supported in the running
+ # Python interpeter up to TLS 1.2. (see: http://tinyurl.com/kxga8hb)
+ ssl_version = ssl_version or ssladapter.get_max_tls_protocol()
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
| {"golden_diff": "diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py\n--- a/docker/ssladapter/ssladapter.py\n+++ b/docker/ssladapter/ssladapter.py\n@@ -4,6 +4,7 @@\n \"\"\"\n from distutils.version import StrictVersion\n from requests.adapters import HTTPAdapter\n+import ssl\n try:\n import requests.packages.urllib3 as urllib3\n except ImportError:\n@@ -13,9 +14,19 @@\n PoolManager = urllib3.poolmanager.PoolManager\n \n \n+def get_max_tls_protocol():\n+ protocols = ('PROTOCOL_TLSv1_2',\n+ 'PROTOCOL_TLSv1_1',\n+ 'PROTOCOL_TLSv1')\n+ for proto in protocols:\n+ if hasattr(ssl, proto):\n+ return proto\n+\n+\n class SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):\n+ ssl_version = ssl_version or get_max_tls_protocol()\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n super(SSLAdapter, self).__init__(**kwargs)\ndiff --git a/docker/tls.py b/docker/tls.py\n--- a/docker/tls.py\n+++ b/docker/tls.py\n@@ -17,8 +17,11 @@\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n \n- # urllib3 sets a default ssl_version if ssl_version is None\n- # http://tinyurl.com/kxga8hb\n+ # urllib3 sets a default ssl_version if ssl_version is None,\n+ # but that default is the vulnerable PROTOCOL_SSLv23 selection,\n+ # so we override the default with the maximum supported in the running\n+ # Python interpeter up to TLS 1.2. (see: http://tinyurl.com/kxga8hb)\n+ ssl_version = ssl_version or ssladapter.get_max_tls_protocol()\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n", "issue": "Do not support sslv3 (poodle)\nIn Docker 1.3.1 (coming very soon), only TLS1.0+ will be supported.\nPing @shin- \n\n", "before_files": [{"content": "import os\n\nfrom . import errors\nfrom .ssladapter import ssladapter\n\n\nclass TLSConfig(object):\n cert = None\n verify = None\n ssl_version = None\n\n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n ssl_version=None, assert_hostname=None):\n # Argument compatibility/mapping with\n # http://docs.docker.com/examples/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n\n # urllib3 sets a default ssl_version if ssl_version is None\n # http://tinyurl.com/kxga8hb\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n\n if client_cert:\n try:\n tls_cert, tls_key = client_cert\n except ValueError:\n raise errors.TLSParameterError(\n 'client_config must be a tuple of'\n ' (client certificate, key file)'\n )\n\n if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or\n not os.path.isfile(tls_key)):\n raise errors.TLSParameterError(\n 'Path to a certificate and key files must be provided'\n ' through the client_config param'\n )\n self.cert = (tls_cert, tls_key)\n\n # Either set verify to True (public/default CA checks) or to the\n # path of a CA Cert file.\n if verify is not None:\n if not ca_cert:\n self.verify = verify\n elif os.path.isfile(ca_cert):\n if not verify:\n raise errors.TLSParameterError(\n 'verify can not be False when a CA cert is'\n ' provided.'\n )\n self.verify = ca_cert\n else:\n raise errors.TLSParameterError(\n 'Invalid CA certificate provided for `tls_ca_cert`.'\n )\n\n def configure_client(self, client):\n client.ssl_version = self.ssl_version\n if self.verify is not None:\n client.verify = self.verify\n if self.cert:\n client.cert = self.cert\n client.mount('https://', ssladapter.SSLAdapter(\n ssl_version=self.ssl_version,\n assert_hostname=self.assert_hostname,\n ))\n", "path": "docker/tls.py"}, {"content": "\"\"\" Resolves OpenSSL issues in some servers:\n https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/\n https://github.com/kennethreitz/requests/pull/799\n\"\"\"\nfrom distutils.version import StrictVersion\nfrom requests.adapters import HTTPAdapter\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\n\nPoolManager = urllib3.poolmanager.PoolManager\n\n\nclass SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n super(SSLAdapter, self).__init__(**kwargs)\n\n def init_poolmanager(self, connections, maxsize, block=False):\n kwargs = {\n 'num_pools': connections,\n 'maxsize': maxsize,\n 'block': block,\n 'assert_hostname': self.assert_hostname,\n }\n if self.can_override_ssl_version():\n kwargs['ssl_version'] = self.ssl_version\n\n self.poolmanager = PoolManager(**kwargs)\n\n def can_override_ssl_version(self):\n urllib_ver = urllib3.__version__.split('-')[0]\n if urllib_ver is None:\n return False\n if urllib_ver == 'dev':\n return True\n return StrictVersion(urllib_ver) > StrictVersion('1.5')\n", "path": "docker/ssladapter/ssladapter.py"}], "after_files": [{"content": "import os\n\nfrom . import errors\nfrom .ssladapter import ssladapter\n\n\nclass TLSConfig(object):\n cert = None\n verify = None\n ssl_version = None\n\n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n ssl_version=None, assert_hostname=None):\n # Argument compatibility/mapping with\n # http://docs.docker.com/examples/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n\n # urllib3 sets a default ssl_version if ssl_version is None,\n # but that default is the vulnerable PROTOCOL_SSLv23 selection,\n # so we override the default with the maximum supported in the running\n # Python interpeter up to TLS 1.2. (see: http://tinyurl.com/kxga8hb)\n ssl_version = ssl_version or ssladapter.get_max_tls_protocol()\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n\n if client_cert:\n try:\n tls_cert, tls_key = client_cert\n except ValueError:\n raise errors.TLSParameterError(\n 'client_config must be a tuple of'\n ' (client certificate, key file)'\n )\n\n if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or\n not os.path.isfile(tls_key)):\n raise errors.TLSParameterError(\n 'Path to a certificate and key files must be provided'\n ' through the client_config param'\n )\n self.cert = (tls_cert, tls_key)\n\n # Either set verify to True (public/default CA checks) or to the\n # path of a CA Cert file.\n if verify is not None:\n if not ca_cert:\n self.verify = verify\n elif os.path.isfile(ca_cert):\n if not verify:\n raise errors.TLSParameterError(\n 'verify can not be False when a CA cert is'\n ' provided.'\n )\n self.verify = ca_cert\n else:\n raise errors.TLSParameterError(\n 'Invalid CA certificate provided for `tls_ca_cert`.'\n )\n\n def configure_client(self, client):\n client.ssl_version = self.ssl_version\n if self.verify is not None:\n client.verify = self.verify\n if self.cert:\n client.cert = self.cert\n client.mount('https://', ssladapter.SSLAdapter(\n ssl_version=self.ssl_version,\n assert_hostname=self.assert_hostname,\n ))\n", "path": "docker/tls.py"}, {"content": "\"\"\" Resolves OpenSSL issues in some servers:\n https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/\n https://github.com/kennethreitz/requests/pull/799\n\"\"\"\nfrom distutils.version import StrictVersion\nfrom requests.adapters import HTTPAdapter\nimport ssl\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\n\nPoolManager = urllib3.poolmanager.PoolManager\n\n\ndef get_max_tls_protocol():\n protocols = ('PROTOCOL_TLSv1_2',\n 'PROTOCOL_TLSv1_1',\n 'PROTOCOL_TLSv1')\n for proto in protocols:\n if hasattr(ssl, proto):\n return proto\n\n\nclass SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):\n ssl_version = ssl_version or get_max_tls_protocol()\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n super(SSLAdapter, self).__init__(**kwargs)\n\n def init_poolmanager(self, connections, maxsize, block=False):\n kwargs = {\n 'num_pools': connections,\n 'maxsize': maxsize,\n 'block': block,\n 'assert_hostname': self.assert_hostname,\n }\n if self.can_override_ssl_version():\n kwargs['ssl_version'] = self.ssl_version\n\n self.poolmanager = PoolManager(**kwargs)\n\n def can_override_ssl_version(self):\n urllib_ver = urllib3.__version__.split('-')[0]\n if urllib_ver is None:\n return False\n if urllib_ver == 'dev':\n return True\n return StrictVersion(urllib_ver) > StrictVersion('1.5')\n", "path": "docker/ssladapter/ssladapter.py"}]} | 1,376 | 458 |
gh_patches_debug_36699 | rasdani/github-patches | git_diff | secdev__scapy-2078 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeErrror: module 'os' has no attribute 'popen2' when using voip module
Hello I have installed the last scapy version, I want to use the module voip but i has the issue
AttributeErrror: module 'os' has no attribute 'popen2' when using voip_play function.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scapy/modules/voip.py`
Content:
```
1 # This file is part of Scapy
2 # See http://www.secdev.org/projects/scapy for more information
3 # Copyright (C) Philippe Biondi <[email protected]>
4 # This program is published under a GPLv2 license
5
6 """
7 VoIP (Voice over IP) related functions
8 """
9
10 from __future__ import absolute_import
11 import os
12 ###################
13 # Listen VoIP #
14 ###################
15
16 from scapy.sendrecv import sniff
17 from scapy.layers.inet import IP, UDP
18 from scapy.layers.rtp import RTP
19 from scapy.consts import WINDOWS
20 from scapy.config import conf
21 from scapy.modules.six.moves import range
22
23
24 sox_base = "sox -t .ul %s - -t ossdsp /dev/dsp"
25
26 if WINDOWS:
27 if conf.prog.sox is None:
28 raise OSError("Sox must be installed to play VoIP packets")
29 sox_base = "\"" + conf.prog.sox + "\" -t .ul %s - -t waveaudio"
30
31
32 def _merge_sound_bytes(x, y, sample_size=2):
33 # TODO: find a better way to merge sound bytes
34 # This will only add them one next to each other:
35 # \xff + \xff ==> \xff\xff
36 m = ""
37 ss = sample_size
38 min_ = 0
39 if len(x) >= len(y):
40 min_ = y
41 elif len(x) < len(y):
42 min_ = x
43 r_ = len(min_)
44 for i in range(r_ / ss):
45 m += x[ss * i:ss * (i + 1)] + y[ss * i:ss * (i + 1)]
46 return x[r_:], y[r_:], m
47
48
49 def voip_play(s1, lst=None, **kargs):
50 """Play VoIP packets with RAW data that
51 are either sniffed either from an IP, or
52 specified as a list.
53
54 It will play only the incoming packets !
55
56 :param s1: The IP of the src of all VoIP packets.
57 :param lst: (optional) A list of packets to load
58 :type s1: string
59 :type lst: list
60
61 :Example:
62
63 >>> voip_play("64.2.142.189")
64 while calling '[email protected]'
65
66 >>> voip_play("64.2.142.189", lst)
67 with list a list of packets with VoIP data
68 in their RAW layer
69
70 .. seealso:: voip_play2
71 to play both the outcoming and incoming packets
72 at the same time.
73
74 .. seealso:: voip_play3
75 to read RTP VoIP packets
76 """
77
78 dsp, rd = os.popen2(sox_base % "")
79
80 def play(pkt):
81 if not pkt:
82 return
83 if not pkt.haslayer(UDP) or not pkt.haslayer(IP):
84 return
85 ip = pkt.getlayer(IP)
86 if s1 == ip.src:
87 dsp.write(pkt.getlayer(conf.raw_layer).load[12:])
88 try:
89 if lst is None:
90 sniff(store=0, prn=play, **kargs)
91 else:
92 for p in lst:
93 play(p)
94 finally:
95 dsp.close()
96 rd.close()
97
98
99 def voip_play1(s1, lst=None, **kargs):
100 """Same than voip_play, backward compatibility
101 """
102 return voip_play(s1, lst, **kargs)
103
104
105 def voip_play2(s1, **kargs):
106 """
107 Same than voip_play, but will play
108 both incoming and outcoming packets.
109 The sound will surely suffer distortion.
110
111 Only supports sniffing.
112
113 .. seealso:: voip_play
114 to play only incoming packets.
115 """
116 dsp, rd = os.popen2(sox_base % "-c 2")
117 global x1, x2
118 x1 = ""
119 x2 = ""
120
121 def play(pkt):
122 global x1, x2
123 if not pkt:
124 return
125 if not pkt.haslayer(UDP) or not pkt.haslayer(IP):
126 return
127 ip = pkt.getlayer(IP)
128 if s1 in [ip.src, ip.dst]:
129 if ip.dst == s1:
130 x1 += pkt.getlayer(conf.raw_layer).load[12:]
131 else:
132 x2 += pkt.getlayer(conf.raw_layer).load[12:]
133 x1, x2, r = _merge_sound_bytes(x1, x2)
134 dsp.write(r)
135
136 sniff(store=0, prn=play, **kargs)
137
138
139 def voip_play3(lst=None, **kargs):
140 """Same than voip_play, but made to
141 read and play VoIP RTP packets, without
142 checking IP.
143
144 .. seealso:: voip_play
145 for basic VoIP packets
146 """
147 dsp, rd = os.popen2(sox_base % "")
148
149 def play(pkt, dsp=dsp):
150 if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):
151 dsp.write(pkt.getlayer(RTP).load)
152 try:
153 if lst is None:
154 sniff(store=0, prn=play, **kargs)
155 else:
156 for p in lst:
157 play(p)
158 finally:
159 try:
160 dsp.close()
161 rd.close()
162 except Exception:
163 pass
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scapy/modules/voip.py b/scapy/modules/voip.py
--- a/scapy/modules/voip.py
+++ b/scapy/modules/voip.py
@@ -8,7 +8,7 @@
"""
from __future__ import absolute_import
-import os
+import subprocess
###################
# Listen VoIP #
###################
@@ -21,12 +21,12 @@
from scapy.modules.six.moves import range
-sox_base = "sox -t .ul %s - -t ossdsp /dev/dsp"
+sox_base = (["sox", "-t", ".ul"], ["-", "-t", "ossdsp", "/dev/dsp"])
if WINDOWS:
if conf.prog.sox is None:
raise OSError("Sox must be installed to play VoIP packets")
- sox_base = "\"" + conf.prog.sox + "\" -t .ul %s - -t waveaudio"
+ sox_base = ([conf.prog.sox, "-t", ".ul"], ["-", "-t", "waveaudio"])
def _merge_sound_bytes(x, y, sample_size=2):
@@ -75,7 +75,9 @@
to read RTP VoIP packets
"""
- dsp, rd = os.popen2(sox_base % "")
+ proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ dsp, rd = proc.stdin, proc.stdout
def play(pkt):
if not pkt:
@@ -113,7 +115,9 @@
.. seealso:: voip_play
to play only incoming packets.
"""
- dsp, rd = os.popen2(sox_base % "-c 2")
+ proc = subprocess.Popen(sox_base[0] + ["-c", "2"] + sox_base[1],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ dsp, rd = proc.stdin, proc.stdout
global x1, x2
x1 = ""
x2 = ""
@@ -133,7 +137,14 @@
x1, x2, r = _merge_sound_bytes(x1, x2)
dsp.write(r)
- sniff(store=0, prn=play, **kargs)
+ try:
+ sniff(store=0, prn=play, **kargs)
+ finally:
+ try:
+ dsp.close()
+ rd.close()
+ except Exception:
+ pass
def voip_play3(lst=None, **kargs):
@@ -144,7 +155,9 @@
.. seealso:: voip_play
for basic VoIP packets
"""
- dsp, rd = os.popen2(sox_base % "")
+ proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ dsp, rd = proc.stdin, proc.stdout
def play(pkt, dsp=dsp):
if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):
| {"golden_diff": "diff --git a/scapy/modules/voip.py b/scapy/modules/voip.py\n--- a/scapy/modules/voip.py\n+++ b/scapy/modules/voip.py\n@@ -8,7 +8,7 @@\n \"\"\"\n \n from __future__ import absolute_import\n-import os\n+import subprocess\n ###################\n # Listen VoIP #\n ###################\n@@ -21,12 +21,12 @@\n from scapy.modules.six.moves import range\n \n \n-sox_base = \"sox -t .ul %s - -t ossdsp /dev/dsp\"\n+sox_base = ([\"sox\", \"-t\", \".ul\"], [\"-\", \"-t\", \"ossdsp\", \"/dev/dsp\"])\n \n if WINDOWS:\n if conf.prog.sox is None:\n raise OSError(\"Sox must be installed to play VoIP packets\")\n- sox_base = \"\\\"\" + conf.prog.sox + \"\\\" -t .ul %s - -t waveaudio\"\n+ sox_base = ([conf.prog.sox, \"-t\", \".ul\"], [\"-\", \"-t\", \"waveaudio\"])\n \n \n def _merge_sound_bytes(x, y, sample_size=2):\n@@ -75,7 +75,9 @@\n to read RTP VoIP packets\n \"\"\"\n \n- dsp, rd = os.popen2(sox_base % \"\")\n+ proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,\n+ stdout=subprocess.PIPE)\n+ dsp, rd = proc.stdin, proc.stdout\n \n def play(pkt):\n if not pkt:\n@@ -113,7 +115,9 @@\n .. seealso:: voip_play\n to play only incoming packets.\n \"\"\"\n- dsp, rd = os.popen2(sox_base % \"-c 2\")\n+ proc = subprocess.Popen(sox_base[0] + [\"-c\", \"2\"] + sox_base[1],\n+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n+ dsp, rd = proc.stdin, proc.stdout\n global x1, x2\n x1 = \"\"\n x2 = \"\"\n@@ -133,7 +137,14 @@\n x1, x2, r = _merge_sound_bytes(x1, x2)\n dsp.write(r)\n \n- sniff(store=0, prn=play, **kargs)\n+ try:\n+ sniff(store=0, prn=play, **kargs)\n+ finally:\n+ try:\n+ dsp.close()\n+ rd.close()\n+ except Exception:\n+ pass\n \n \n def voip_play3(lst=None, **kargs):\n@@ -144,7 +155,9 @@\n .. seealso:: voip_play\n for basic VoIP packets\n \"\"\"\n- dsp, rd = os.popen2(sox_base % \"\")\n+ proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,\n+ stdout=subprocess.PIPE)\n+ dsp, rd = proc.stdin, proc.stdout\n \n def play(pkt, dsp=dsp):\n if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):\n", "issue": "AttributeErrror: module 'os' has no attribute 'popen2' when using voip module\nHello I have installed the last scapy version, I want to use the module voip but i has the issue \r\nAttributeErrror: module 'os' has no attribute 'popen2' when using voip_play function. \n", "before_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n# This program is published under a GPLv2 license\n\n\"\"\"\nVoIP (Voice over IP) related functions\n\"\"\"\n\nfrom __future__ import absolute_import\nimport os\n###################\n# Listen VoIP #\n###################\n\nfrom scapy.sendrecv import sniff\nfrom scapy.layers.inet import IP, UDP\nfrom scapy.layers.rtp import RTP\nfrom scapy.consts import WINDOWS\nfrom scapy.config import conf\nfrom scapy.modules.six.moves import range\n\n\nsox_base = \"sox -t .ul %s - -t ossdsp /dev/dsp\"\n\nif WINDOWS:\n if conf.prog.sox is None:\n raise OSError(\"Sox must be installed to play VoIP packets\")\n sox_base = \"\\\"\" + conf.prog.sox + \"\\\" -t .ul %s - -t waveaudio\"\n\n\ndef _merge_sound_bytes(x, y, sample_size=2):\n # TODO: find a better way to merge sound bytes\n # This will only add them one next to each other:\n # \\xff + \\xff ==> \\xff\\xff\n m = \"\"\n ss = sample_size\n min_ = 0\n if len(x) >= len(y):\n min_ = y\n elif len(x) < len(y):\n min_ = x\n r_ = len(min_)\n for i in range(r_ / ss):\n m += x[ss * i:ss * (i + 1)] + y[ss * i:ss * (i + 1)]\n return x[r_:], y[r_:], m\n\n\ndef voip_play(s1, lst=None, **kargs):\n \"\"\"Play VoIP packets with RAW data that\n are either sniffed either from an IP, or\n specified as a list.\n\n It will play only the incoming packets !\n\n :param s1: The IP of the src of all VoIP packets.\n :param lst: (optional) A list of packets to load\n :type s1: string\n :type lst: list\n\n :Example:\n\n >>> voip_play(\"64.2.142.189\")\n while calling '[email protected]'\n\n >>> voip_play(\"64.2.142.189\", lst)\n with list a list of packets with VoIP data\n in their RAW layer\n\n .. seealso:: voip_play2\n to play both the outcoming and incoming packets\n at the same time.\n\n .. seealso:: voip_play3\n to read RTP VoIP packets\n \"\"\"\n\n dsp, rd = os.popen2(sox_base % \"\")\n\n def play(pkt):\n if not pkt:\n return\n if not pkt.haslayer(UDP) or not pkt.haslayer(IP):\n return\n ip = pkt.getlayer(IP)\n if s1 == ip.src:\n dsp.write(pkt.getlayer(conf.raw_layer).load[12:])\n try:\n if lst is None:\n sniff(store=0, prn=play, **kargs)\n else:\n for p in lst:\n play(p)\n finally:\n dsp.close()\n rd.close()\n\n\ndef voip_play1(s1, lst=None, **kargs):\n \"\"\"Same than voip_play, backward compatibility\n \"\"\"\n return voip_play(s1, lst, **kargs)\n\n\ndef voip_play2(s1, **kargs):\n \"\"\"\n Same than voip_play, but will play\n both incoming and outcoming packets.\n The sound will surely suffer distortion.\n\n Only supports sniffing.\n\n .. seealso:: voip_play\n to play only incoming packets.\n \"\"\"\n dsp, rd = os.popen2(sox_base % \"-c 2\")\n global x1, x2\n x1 = \"\"\n x2 = \"\"\n\n def play(pkt):\n global x1, x2\n if not pkt:\n return\n if not pkt.haslayer(UDP) or not pkt.haslayer(IP):\n return\n ip = pkt.getlayer(IP)\n if s1 in [ip.src, ip.dst]:\n if ip.dst == s1:\n x1 += pkt.getlayer(conf.raw_layer).load[12:]\n else:\n x2 += pkt.getlayer(conf.raw_layer).load[12:]\n x1, x2, r = _merge_sound_bytes(x1, x2)\n dsp.write(r)\n\n sniff(store=0, prn=play, **kargs)\n\n\ndef voip_play3(lst=None, **kargs):\n \"\"\"Same than voip_play, but made to\n read and play VoIP RTP packets, without\n checking IP.\n\n .. seealso:: voip_play\n for basic VoIP packets\n \"\"\"\n dsp, rd = os.popen2(sox_base % \"\")\n\n def play(pkt, dsp=dsp):\n if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):\n dsp.write(pkt.getlayer(RTP).load)\n try:\n if lst is None:\n sniff(store=0, prn=play, **kargs)\n else:\n for p in lst:\n play(p)\n finally:\n try:\n dsp.close()\n rd.close()\n except Exception:\n pass\n", "path": "scapy/modules/voip.py"}], "after_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n# This program is published under a GPLv2 license\n\n\"\"\"\nVoIP (Voice over IP) related functions\n\"\"\"\n\nfrom __future__ import absolute_import\nimport subprocess\n###################\n# Listen VoIP #\n###################\n\nfrom scapy.sendrecv import sniff\nfrom scapy.layers.inet import IP, UDP\nfrom scapy.layers.rtp import RTP\nfrom scapy.consts import WINDOWS\nfrom scapy.config import conf\nfrom scapy.modules.six.moves import range\n\n\nsox_base = ([\"sox\", \"-t\", \".ul\"], [\"-\", \"-t\", \"ossdsp\", \"/dev/dsp\"])\n\nif WINDOWS:\n if conf.prog.sox is None:\n raise OSError(\"Sox must be installed to play VoIP packets\")\n sox_base = ([conf.prog.sox, \"-t\", \".ul\"], [\"-\", \"-t\", \"waveaudio\"])\n\n\ndef _merge_sound_bytes(x, y, sample_size=2):\n # TODO: find a better way to merge sound bytes\n # This will only add them one next to each other:\n # \\xff + \\xff ==> \\xff\\xff\n m = \"\"\n ss = sample_size\n min_ = 0\n if len(x) >= len(y):\n min_ = y\n elif len(x) < len(y):\n min_ = x\n r_ = len(min_)\n for i in range(r_ / ss):\n m += x[ss * i:ss * (i + 1)] + y[ss * i:ss * (i + 1)]\n return x[r_:], y[r_:], m\n\n\ndef voip_play(s1, lst=None, **kargs):\n \"\"\"Play VoIP packets with RAW data that\n are either sniffed either from an IP, or\n specified as a list.\n\n It will play only the incoming packets !\n\n :param s1: The IP of the src of all VoIP packets.\n :param lst: (optional) A list of packets to load\n :type s1: string\n :type lst: list\n\n :Example:\n\n >>> voip_play(\"64.2.142.189\")\n while calling '[email protected]'\n\n >>> voip_play(\"64.2.142.189\", lst)\n with list a list of packets with VoIP data\n in their RAW layer\n\n .. seealso:: voip_play2\n to play both the outcoming and incoming packets\n at the same time.\n\n .. seealso:: voip_play3\n to read RTP VoIP packets\n \"\"\"\n\n proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n dsp, rd = proc.stdin, proc.stdout\n\n def play(pkt):\n if not pkt:\n return\n if not pkt.haslayer(UDP) or not pkt.haslayer(IP):\n return\n ip = pkt.getlayer(IP)\n if s1 == ip.src:\n dsp.write(pkt.getlayer(conf.raw_layer).load[12:])\n try:\n if lst is None:\n sniff(store=0, prn=play, **kargs)\n else:\n for p in lst:\n play(p)\n finally:\n dsp.close()\n rd.close()\n\n\ndef voip_play1(s1, lst=None, **kargs):\n \"\"\"Same than voip_play, backward compatibility\n \"\"\"\n return voip_play(s1, lst, **kargs)\n\n\ndef voip_play2(s1, **kargs):\n \"\"\"\n Same than voip_play, but will play\n both incoming and outcoming packets.\n The sound will surely suffer distortion.\n\n Only supports sniffing.\n\n .. seealso:: voip_play\n to play only incoming packets.\n \"\"\"\n proc = subprocess.Popen(sox_base[0] + [\"-c\", \"2\"] + sox_base[1],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n dsp, rd = proc.stdin, proc.stdout\n global x1, x2\n x1 = \"\"\n x2 = \"\"\n\n def play(pkt):\n global x1, x2\n if not pkt:\n return\n if not pkt.haslayer(UDP) or not pkt.haslayer(IP):\n return\n ip = pkt.getlayer(IP)\n if s1 in [ip.src, ip.dst]:\n if ip.dst == s1:\n x1 += pkt.getlayer(conf.raw_layer).load[12:]\n else:\n x2 += pkt.getlayer(conf.raw_layer).load[12:]\n x1, x2, r = _merge_sound_bytes(x1, x2)\n dsp.write(r)\n\n try:\n sniff(store=0, prn=play, **kargs)\n finally:\n try:\n dsp.close()\n rd.close()\n except Exception:\n pass\n\n\ndef voip_play3(lst=None, **kargs):\n \"\"\"Same than voip_play, but made to\n read and play VoIP RTP packets, without\n checking IP.\n\n .. seealso:: voip_play\n for basic VoIP packets\n \"\"\"\n proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n dsp, rd = proc.stdin, proc.stdout\n\n def play(pkt, dsp=dsp):\n if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):\n dsp.write(pkt.getlayer(RTP).load)\n try:\n if lst is None:\n sniff(store=0, prn=play, **kargs)\n else:\n for p in lst:\n play(p)\n finally:\n try:\n dsp.close()\n rd.close()\n except Exception:\n pass\n", "path": "scapy/modules/voip.py"}]} | 1,915 | 692 |
gh_patches_debug_27244 | rasdani/github-patches | git_diff | falconry__falcon-1182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop support for Python 3.3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import glob
2 import imp
3 import io
4 import os
5 from os import path
6 import re
7 import sys
8
9 from setuptools import Extension, find_packages, setup
10
11 MYDIR = path.abspath(os.path.dirname(__file__))
12
13 VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))
14 VERSION = VERSION.__version__
15
16 # NOTE(kgriffs): python-mimeparse is better-maintained fork of mimeparse
17 REQUIRES = ['six>=1.4.0', 'python-mimeparse>=1.5.2']
18
19 try:
20 sys.pypy_version_info
21 PYPY = True
22 except AttributeError:
23 PYPY = False
24
25 if PYPY:
26 CYTHON = False
27 else:
28 try:
29 from Cython.Distutils import build_ext
30 CYTHON = True
31 except ImportError:
32 # TODO(kgriffs): pip now ignores all output, so the user
33 # may not see this message. See also:
34 #
35 # https://github.com/pypa/pip/issues/2732
36 #
37 print('\nNOTE: Cython not installed. '
38 'Falcon will still work fine, but may run '
39 'a bit slower.\n')
40 CYTHON = False
41
42 if CYTHON:
43 def list_modules(dirname):
44 filenames = glob.glob(path.join(dirname, '*.py'))
45
46 module_names = []
47 for name in filenames:
48 module, ext = path.splitext(path.basename(name))
49 if module != '__init__':
50 module_names.append(module)
51
52 return module_names
53
54 package_names = ['falcon', 'falcon.util', 'falcon.routing', 'falcon.media']
55 ext_modules = [
56 Extension(
57 package + '.' + module,
58 [path.join(*(package.split('.') + [module + '.py']))]
59 )
60 for package in package_names
61 for module in list_modules(path.join(MYDIR, *package.split('.')))
62 ]
63
64 cmdclass = {'build_ext': build_ext}
65
66 else:
67 cmdclass = {}
68 ext_modules = []
69
70
71 def load_description():
72 in_raw = False
73
74 description_lines = []
75
76 # NOTE(kgriffs): PyPI does not support the raw directive
77 for readme_line in io.open('README.rst', 'r', encoding='utf-8'):
78 if readme_line.startswith('.. raw::'):
79 in_raw = True
80 elif in_raw:
81 if readme_line and not re.match('\s', readme_line):
82 in_raw = False
83
84 if not in_raw:
85 description_lines.append(readme_line)
86
87 return ''.join(description_lines)
88
89
90 setup(
91 name='falcon',
92 version=VERSION,
93 description='An unladen web framework for building APIs and app backends.',
94 long_description=load_description(),
95 classifiers=[
96 'Development Status :: 5 - Production/Stable',
97 'Environment :: Web Environment',
98 'Natural Language :: English',
99 'Intended Audience :: Developers',
100 'Intended Audience :: System Administrators',
101 'License :: OSI Approved :: Apache Software License',
102 'Operating System :: MacOS :: MacOS X',
103 'Operating System :: Microsoft :: Windows',
104 'Operating System :: POSIX',
105 'Topic :: Internet :: WWW/HTTP :: WSGI',
106 'Topic :: Software Development :: Libraries :: Application Frameworks',
107 'Programming Language :: Python',
108 'Programming Language :: Python :: Implementation :: CPython',
109 'Programming Language :: Python :: Implementation :: PyPy',
110 'Programming Language :: Python :: 2.7',
111 'Programming Language :: Python :: 3.3',
112 'Programming Language :: Python :: 3.4',
113 'Programming Language :: Python :: 3.5',
114 'Programming Language :: Python :: 3.6',
115 ],
116 keywords='wsgi web api framework rest http cloud',
117 author='Kurt Griffiths',
118 author_email='[email protected]',
119 url='http://falconframework.org',
120 license='Apache 2.0',
121 packages=find_packages(exclude=['tests']),
122 include_package_data=True,
123 zip_safe=False,
124 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*',
125 install_requires=REQUIRES,
126 cmdclass=cmdclass,
127 ext_modules=ext_modules,
128 tests_require=['testtools', 'requests', 'pyyaml', 'pytest', 'pytest-runner'],
129 entry_points={
130 'console_scripts': [
131 'falcon-bench = falcon.cmd.bench:main',
132 'falcon-print-routes = falcon.cmd.print_routes:main'
133 ]
134 }
135 )
136
```
Path: `falcon/media/msgpack.py`
Content:
```
1 from __future__ import absolute_import
2
3 from falcon import errors
4 from falcon.media import BaseHandler
5
6
7 class MessagePackHandler(BaseHandler):
8 """Handler built using the :py:mod:`msgpack` module.
9
10 This handler uses ``msgpack.unpackb()`` and ``msgpack.packb()``. The
11 MessagePack ``bin`` type is used to distinguish between Unicode strings
12 (``str`` on Python 3, ``unicode`` on Python 2) and byte strings
13 (``bytes`` on Python 2/3, or ``str`` on Python 2).
14
15 Note:
16 This handler requires the extra ``msgpack`` package, which must be
17 installed in addition to ``falcon`` from PyPI:
18
19 .. code::
20
21 $ pip install msgpack
22
23 Python 2.6 users will need to use the deprecated ``msgpack-python``
24 package instead, pinned to version 0.4.8.
25 """
26
27 def __init__(self):
28 import msgpack
29
30 self.msgpack = msgpack
31 self.packer = msgpack.Packer(
32 encoding='utf-8',
33 autoreset=True,
34 use_bin_type=True,
35 )
36
37 def deserialize(self, raw):
38 try:
39 # NOTE(jmvrbanac): Using unpackb since we would need to manage
40 # a buffer for Unpacker() which wouldn't gain us much.
41 return self.msgpack.unpackb(raw, encoding='utf-8')
42 except ValueError as err:
43 raise errors.HTTPBadRequest(
44 'Invalid MessagePack',
45 'Could not parse MessagePack body - {0}'.format(err)
46 )
47
48 def serialize(self, media):
49 return self.packer.pack(media)
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/falcon/media/msgpack.py b/falcon/media/msgpack.py
--- a/falcon/media/msgpack.py
+++ b/falcon/media/msgpack.py
@@ -19,9 +19,6 @@
.. code::
$ pip install msgpack
-
- Python 2.6 users will need to use the deprecated ``msgpack-python``
- package instead, pinned to version 0.4.8.
"""
def __init__(self):
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -107,8 +107,9 @@
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
+ 'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
@@ -121,7 +122,7 @@
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
- python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*',
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
install_requires=REQUIRES,
cmdclass=cmdclass,
ext_modules=ext_modules,
| {"golden_diff": "diff --git a/falcon/media/msgpack.py b/falcon/media/msgpack.py\n--- a/falcon/media/msgpack.py\n+++ b/falcon/media/msgpack.py\n@@ -19,9 +19,6 @@\n .. code::\n \n $ pip install msgpack\n-\n- Python 2.6 users will need to use the deprecated ``msgpack-python``\n- package instead, pinned to version 0.4.8.\n \"\"\"\n \n def __init__(self):\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -107,8 +107,9 @@\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n+ 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n- 'Programming Language :: Python :: 3.3',\n+ 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n@@ -121,7 +122,7 @@\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n- python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*',\n+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=REQUIRES,\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n", "issue": "Drop support for Python 3.3\n\n", "before_files": [{"content": "import glob\nimport imp\nimport io\nimport os\nfrom os import path\nimport re\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nMYDIR = path.abspath(os.path.dirname(__file__))\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\n# NOTE(kgriffs): python-mimeparse is better-maintained fork of mimeparse\nREQUIRES = ['six>=1.4.0', 'python-mimeparse>=1.5.2']\n\ntry:\n sys.pypy_version_info\n PYPY = True\nexcept AttributeError:\n PYPY = False\n\nif PYPY:\n CYTHON = False\nelse:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n # TODO(kgriffs): pip now ignores all output, so the user\n # may not see this message. See also:\n #\n # https://github.com/pypa/pip/issues/2732\n #\n print('\\nNOTE: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n def list_modules(dirname):\n filenames = glob.glob(path.join(dirname, '*.py'))\n\n module_names = []\n for name in filenames:\n module, ext = path.splitext(path.basename(name))\n if module != '__init__':\n module_names.append(module)\n\n return module_names\n\n package_names = ['falcon', 'falcon.util', 'falcon.routing', 'falcon.media']\n ext_modules = [\n Extension(\n package + '.' + module,\n [path.join(*(package.split('.') + [module + '.py']))]\n )\n for package in package_names\n for module in list_modules(path.join(MYDIR, *package.split('.')))\n ]\n\n cmdclass = {'build_ext': build_ext}\n\nelse:\n cmdclass = {}\n ext_modules = []\n\n\ndef load_description():\n in_raw = False\n\n description_lines = []\n\n # NOTE(kgriffs): PyPI does not support the raw directive\n for readme_line in io.open('README.rst', 'r', encoding='utf-8'):\n if readme_line.startswith('.. raw::'):\n in_raw = True\n elif in_raw:\n if readme_line and not re.match('\\s', readme_line):\n in_raw = False\n\n if not in_raw:\n description_lines.append(readme_line)\n\n return ''.join(description_lines)\n\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=load_description(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='[email protected]',\n url='http://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*',\n install_requires=REQUIRES,\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n tests_require=['testtools', 'requests', 'pyyaml', 'pytest', 'pytest-runner'],\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main',\n 'falcon-print-routes = falcon.cmd.print_routes:main'\n ]\n }\n)\n", "path": "setup.py"}, {"content": "from __future__ import absolute_import\n\nfrom falcon import errors\nfrom falcon.media import BaseHandler\n\n\nclass MessagePackHandler(BaseHandler):\n \"\"\"Handler built using the :py:mod:`msgpack` module.\n\n This handler uses ``msgpack.unpackb()`` and ``msgpack.packb()``. The\n MessagePack ``bin`` type is used to distinguish between Unicode strings\n (``str`` on Python 3, ``unicode`` on Python 2) and byte strings\n (``bytes`` on Python 2/3, or ``str`` on Python 2).\n\n Note:\n This handler requires the extra ``msgpack`` package, which must be\n installed in addition to ``falcon`` from PyPI:\n\n .. code::\n\n $ pip install msgpack\n\n Python 2.6 users will need to use the deprecated ``msgpack-python``\n package instead, pinned to version 0.4.8.\n \"\"\"\n\n def __init__(self):\n import msgpack\n\n self.msgpack = msgpack\n self.packer = msgpack.Packer(\n encoding='utf-8',\n autoreset=True,\n use_bin_type=True,\n )\n\n def deserialize(self, raw):\n try:\n # NOTE(jmvrbanac): Using unpackb since we would need to manage\n # a buffer for Unpacker() which wouldn't gain us much.\n return self.msgpack.unpackb(raw, encoding='utf-8')\n except ValueError as err:\n raise errors.HTTPBadRequest(\n 'Invalid MessagePack',\n 'Could not parse MessagePack body - {0}'.format(err)\n )\n\n def serialize(self, media):\n return self.packer.pack(media)\n", "path": "falcon/media/msgpack.py"}], "after_files": [{"content": "import glob\nimport imp\nimport io\nimport os\nfrom os import path\nimport re\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nMYDIR = path.abspath(os.path.dirname(__file__))\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\n# NOTE(kgriffs): python-mimeparse is better-maintained fork of mimeparse\nREQUIRES = ['six>=1.4.0', 'python-mimeparse>=1.5.2']\n\ntry:\n sys.pypy_version_info\n PYPY = True\nexcept AttributeError:\n PYPY = False\n\nif PYPY:\n CYTHON = False\nelse:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n # TODO(kgriffs): pip now ignores all output, so the user\n # may not see this message. See also:\n #\n # https://github.com/pypa/pip/issues/2732\n #\n print('\\nNOTE: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n def list_modules(dirname):\n filenames = glob.glob(path.join(dirname, '*.py'))\n\n module_names = []\n for name in filenames:\n module, ext = path.splitext(path.basename(name))\n if module != '__init__':\n module_names.append(module)\n\n return module_names\n\n package_names = ['falcon', 'falcon.util', 'falcon.routing', 'falcon.media']\n ext_modules = [\n Extension(\n package + '.' + module,\n [path.join(*(package.split('.') + [module + '.py']))]\n )\n for package in package_names\n for module in list_modules(path.join(MYDIR, *package.split('.')))\n ]\n\n cmdclass = {'build_ext': build_ext}\n\nelse:\n cmdclass = {}\n ext_modules = []\n\n\ndef load_description():\n in_raw = False\n\n description_lines = []\n\n # NOTE(kgriffs): PyPI does not support the raw directive\n for readme_line in io.open('README.rst', 'r', encoding='utf-8'):\n if readme_line.startswith('.. raw::'):\n in_raw = True\n elif in_raw:\n if readme_line and not re.match('\\s', readme_line):\n in_raw = False\n\n if not in_raw:\n description_lines.append(readme_line)\n\n return ''.join(description_lines)\n\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=load_description(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='[email protected]',\n url='http://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=REQUIRES,\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n tests_require=['testtools', 'requests', 'pyyaml', 'pytest', 'pytest-runner'],\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main',\n 'falcon-print-routes = falcon.cmd.print_routes:main'\n ]\n }\n)\n", "path": "setup.py"}, {"content": "from __future__ import absolute_import\n\nfrom falcon import errors\nfrom falcon.media import BaseHandler\n\n\nclass MessagePackHandler(BaseHandler):\n \"\"\"Handler built using the :py:mod:`msgpack` module.\n\n This handler uses ``msgpack.unpackb()`` and ``msgpack.packb()``. The\n MessagePack ``bin`` type is used to distinguish between Unicode strings\n (``str`` on Python 3, ``unicode`` on Python 2) and byte strings\n (``bytes`` on Python 2/3, or ``str`` on Python 2).\n\n Note:\n This handler requires the extra ``msgpack`` package, which must be\n installed in addition to ``falcon`` from PyPI:\n\n .. code::\n\n $ pip install msgpack\n \"\"\"\n\n def __init__(self):\n import msgpack\n\n self.msgpack = msgpack\n self.packer = msgpack.Packer(\n encoding='utf-8',\n autoreset=True,\n use_bin_type=True,\n )\n\n def deserialize(self, raw):\n try:\n # NOTE(jmvrbanac): Using unpackb since we would need to manage\n # a buffer for Unpacker() which wouldn't gain us much.\n return self.msgpack.unpackb(raw, encoding='utf-8')\n except ValueError as err:\n raise errors.HTTPBadRequest(\n 'Invalid MessagePack',\n 'Could not parse MessagePack body - {0}'.format(err)\n )\n\n def serialize(self, media):\n return self.packer.pack(media)\n", "path": "falcon/media/msgpack.py"}]} | 2,037 | 365 |
gh_patches_debug_11025 | rasdani/github-patches | git_diff | Qiskit__qiskit-3555 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't invert gate created from QuantumCircuit.to_gate
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**:
- **Python version**:
- **Operating system**:
### What is the current behavior?
When inverting a gate created from QuantumCircuit.to_gate the following exception is raised:
`ValueError: not enough values to unpack (expected 3, got 2)`
### Steps to reproduce the problem
```
qc = QuantumCircuit(1)
qc.x(0)
gate = qc.to_gate()
gate.inverse()
```
### What is the expected behavior?
### Suggested solutions
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/converters/circuit_to_gate.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017, 2019.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """Helper function for converting a circuit to a gate"""
16
17 from qiskit.circuit.gate import Gate
18 from qiskit.circuit.quantumregister import QuantumRegister, Qubit
19 from qiskit.exceptions import QiskitError
20
21
22 def circuit_to_gate(circuit, parameter_map=None):
23 """Build a ``Gate`` object from a ``QuantumCircuit``.
24
25 The gate is anonymous (not tied to a named quantum register),
26 and so can be inserted into another circuit. The gate will
27 have the same string name as the circuit.
28
29 Args:
30 circuit (QuantumCircuit): the input circuit.
31 parameter_map (dict): For parameterized circuits, a mapping from
32 parameters in the circuit to parameters to be used in the gate.
33 If None, existing circuit parameters will also parameterize the
34 Gate.
35
36 Raises:
37 QiskitError: if circuit is non-unitary or if
38 parameter_map is not compatible with circuit
39
40 Return:
41 Gate: a Gate equivalent to the action of the
42 input circuit. Upon decomposition, this gate will
43 yield the components comprising the original circuit.
44 """
45 for inst, _, _ in circuit.data:
46 if not isinstance(inst, Gate):
47 raise QiskitError('One or more instructions in this instruction '
48 'cannot be converted to a gate')
49
50 if parameter_map is None:
51 parameter_dict = {p: p for p in circuit.parameters}
52 else:
53 parameter_dict = circuit._unroll_param_dict(parameter_map)
54
55 if parameter_dict.keys() != circuit.parameters:
56 raise QiskitError(('parameter_map should map all circuit parameters. '
57 'Circuit parameters: {}, parameter_map: {}').format(
58 circuit.parameters, parameter_dict))
59
60 gate = Gate(name=circuit.name,
61 num_qubits=sum([qreg.size for qreg in circuit.qregs]),
62 params=sorted(parameter_dict.values(), key=lambda p: p.name))
63 gate.condition = None
64
65 def find_bit_position(bit):
66 """find the index of a given bit (Register, int) within
67 a flat ordered list of bits of the circuit
68 """
69 if isinstance(bit, Qubit):
70 ordered_regs = circuit.qregs
71 else:
72 ordered_regs = circuit.cregs
73 reg_index = ordered_regs.index(bit.register)
74 return sum([reg.size for reg in ordered_regs[:reg_index]]) + bit.index
75
76 target = circuit.copy()
77 target._substitute_parameters(parameter_dict)
78
79 definition = target.data
80
81 if gate.num_qubits > 0:
82 q = QuantumRegister(gate.num_qubits, 'q')
83
84 definition = list(map(lambda x:
85 (x[0], list(map(lambda y: q[find_bit_position(y)], x[1]))),
86 definition))
87 gate.definition = definition
88
89 return gate
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qiskit/converters/circuit_to_gate.py b/qiskit/converters/circuit_to_gate.py
--- a/qiskit/converters/circuit_to_gate.py
+++ b/qiskit/converters/circuit_to_gate.py
@@ -81,9 +81,14 @@
if gate.num_qubits > 0:
q = QuantumRegister(gate.num_qubits, 'q')
- definition = list(map(lambda x:
- (x[0], list(map(lambda y: q[find_bit_position(y)], x[1]))),
- definition))
+ # The 3rd parameter in the output tuple) is hard coded to [] because
+ # Gate objects do not have cregs set and we've verified that all
+ # instructions are gates
+ definition = list(map(
+ lambda x: (x[0],
+ list(map(lambda y: q[find_bit_position(y)], x[1])),
+ []),
+ definition))
gate.definition = definition
return gate
| {"golden_diff": "diff --git a/qiskit/converters/circuit_to_gate.py b/qiskit/converters/circuit_to_gate.py\n--- a/qiskit/converters/circuit_to_gate.py\n+++ b/qiskit/converters/circuit_to_gate.py\n@@ -81,9 +81,14 @@\n if gate.num_qubits > 0:\n q = QuantumRegister(gate.num_qubits, 'q')\n \n- definition = list(map(lambda x:\n- (x[0], list(map(lambda y: q[find_bit_position(y)], x[1]))),\n- definition))\n+ # The 3rd parameter in the output tuple) is hard coded to [] because\n+ # Gate objects do not have cregs set and we've verified that all\n+ # instructions are gates\n+ definition = list(map(\n+ lambda x: (x[0],\n+ list(map(lambda y: q[find_bit_position(y)], x[1])),\n+ []),\n+ definition))\n gate.definition = definition\n \n return gate\n", "issue": "Can't invert gate created from QuantumCircuit.to_gate\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**:\r\n- **Python version**:\r\n- **Operating system**:\r\n\r\n### What is the current behavior?\r\nWhen inverting a gate created from QuantumCircuit.to_gate the following exception is raised:\r\n\r\n`ValueError: not enough values to unpack (expected 3, got 2)`\r\n\r\n\r\n### Steps to reproduce the problem\r\n```\r\nqc = QuantumCircuit(1)\r\nqc.x(0)\r\ngate = qc.to_gate()\r\ngate.inverse()\r\n```\r\n\r\n### What is the expected behavior?\r\n\r\n\r\n\r\n### Suggested solutions\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Helper function for converting a circuit to a gate\"\"\"\n\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.quantumregister import QuantumRegister, Qubit\nfrom qiskit.exceptions import QiskitError\n\n\ndef circuit_to_gate(circuit, parameter_map=None):\n \"\"\"Build a ``Gate`` object from a ``QuantumCircuit``.\n\n The gate is anonymous (not tied to a named quantum register),\n and so can be inserted into another circuit. The gate will\n have the same string name as the circuit.\n\n Args:\n circuit (QuantumCircuit): the input circuit.\n parameter_map (dict): For parameterized circuits, a mapping from\n parameters in the circuit to parameters to be used in the gate.\n If None, existing circuit parameters will also parameterize the\n Gate.\n\n Raises:\n QiskitError: if circuit is non-unitary or if\n parameter_map is not compatible with circuit\n\n Return:\n Gate: a Gate equivalent to the action of the\n input circuit. Upon decomposition, this gate will\n yield the components comprising the original circuit.\n \"\"\"\n for inst, _, _ in circuit.data:\n if not isinstance(inst, Gate):\n raise QiskitError('One or more instructions in this instruction '\n 'cannot be converted to a gate')\n\n if parameter_map is None:\n parameter_dict = {p: p for p in circuit.parameters}\n else:\n parameter_dict = circuit._unroll_param_dict(parameter_map)\n\n if parameter_dict.keys() != circuit.parameters:\n raise QiskitError(('parameter_map should map all circuit parameters. '\n 'Circuit parameters: {}, parameter_map: {}').format(\n circuit.parameters, parameter_dict))\n\n gate = Gate(name=circuit.name,\n num_qubits=sum([qreg.size for qreg in circuit.qregs]),\n params=sorted(parameter_dict.values(), key=lambda p: p.name))\n gate.condition = None\n\n def find_bit_position(bit):\n \"\"\"find the index of a given bit (Register, int) within\n a flat ordered list of bits of the circuit\n \"\"\"\n if isinstance(bit, Qubit):\n ordered_regs = circuit.qregs\n else:\n ordered_regs = circuit.cregs\n reg_index = ordered_regs.index(bit.register)\n return sum([reg.size for reg in ordered_regs[:reg_index]]) + bit.index\n\n target = circuit.copy()\n target._substitute_parameters(parameter_dict)\n\n definition = target.data\n\n if gate.num_qubits > 0:\n q = QuantumRegister(gate.num_qubits, 'q')\n\n definition = list(map(lambda x:\n (x[0], list(map(lambda y: q[find_bit_position(y)], x[1]))),\n definition))\n gate.definition = definition\n\n return gate\n", "path": "qiskit/converters/circuit_to_gate.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Helper function for converting a circuit to a gate\"\"\"\n\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.quantumregister import QuantumRegister, Qubit\nfrom qiskit.exceptions import QiskitError\n\n\ndef circuit_to_gate(circuit, parameter_map=None):\n \"\"\"Build a ``Gate`` object from a ``QuantumCircuit``.\n\n The gate is anonymous (not tied to a named quantum register),\n and so can be inserted into another circuit. The gate will\n have the same string name as the circuit.\n\n Args:\n circuit (QuantumCircuit): the input circuit.\n parameter_map (dict): For parameterized circuits, a mapping from\n parameters in the circuit to parameters to be used in the gate.\n If None, existing circuit parameters will also parameterize the\n Gate.\n\n Raises:\n QiskitError: if circuit is non-unitary or if\n parameter_map is not compatible with circuit\n\n Return:\n Gate: a Gate equivalent to the action of the\n input circuit. Upon decomposition, this gate will\n yield the components comprising the original circuit.\n \"\"\"\n for inst, _, _ in circuit.data:\n if not isinstance(inst, Gate):\n raise QiskitError('One or more instructions in this instruction '\n 'cannot be converted to a gate')\n\n if parameter_map is None:\n parameter_dict = {p: p for p in circuit.parameters}\n else:\n parameter_dict = circuit._unroll_param_dict(parameter_map)\n\n if parameter_dict.keys() != circuit.parameters:\n raise QiskitError(('parameter_map should map all circuit parameters. '\n 'Circuit parameters: {}, parameter_map: {}').format(\n circuit.parameters, parameter_dict))\n\n gate = Gate(name=circuit.name,\n num_qubits=sum([qreg.size for qreg in circuit.qregs]),\n params=sorted(parameter_dict.values(), key=lambda p: p.name))\n gate.condition = None\n\n def find_bit_position(bit):\n \"\"\"find the index of a given bit (Register, int) within\n a flat ordered list of bits of the circuit\n \"\"\"\n if isinstance(bit, Qubit):\n ordered_regs = circuit.qregs\n else:\n ordered_regs = circuit.cregs\n reg_index = ordered_regs.index(bit.register)\n return sum([reg.size for reg in ordered_regs[:reg_index]]) + bit.index\n\n target = circuit.copy()\n target._substitute_parameters(parameter_dict)\n\n definition = target.data\n\n if gate.num_qubits > 0:\n q = QuantumRegister(gate.num_qubits, 'q')\n\n # The 3rd parameter in the output tuple) is hard coded to [] because\n # Gate objects do not have cregs set and we've verified that all\n # instructions are gates\n definition = list(map(\n lambda x: (x[0],\n list(map(lambda y: q[find_bit_position(y)], x[1])),\n []),\n definition))\n gate.definition = definition\n\n return gate\n", "path": "qiskit/converters/circuit_to_gate.py"}]} | 1,340 | 228 |
gh_patches_debug_62393 | rasdani/github-patches | git_diff | AUTOMATIC1111__stable-diffusion-webui-6772 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: New SHA256 hash takes extremely long time up to a point of of model load being unusable
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Newly added sha-256 hash takes extremely long time to calculate on model load up to a point where loading appears to hang (i've restarted server twice before i even let it run until completion)
Previously switching to a new model was sub 10 sec, now switching to a new model (that does not have hash stored already) takes 100-150 sec (and this is a high end system)!
And to make it worse, messages about hash calculation are only printed **after** it has been calculated, there is no progress info or anything to indicate system is actually doing anything for 2 min!
### Steps to reproduce the problem
1. Switch to a new model and wait for completion - it takes forever
### What should have happened?
Model load should **never** take over 2 minutes to complete.
### Commit where the problem happens
f8c512478568293155539f616dce26c5e4495055
### What platforms do you use to access UI ?
Windows, Linux
### What browsers do you use to access the UI ?
Google Chrome, Microsoft Edge
### Command Line Arguments
```Shell
--api --xformers
```
### Additional information, context and logs
Console log showing model load taking 142 seconds!
```text
Calculating sha256 for /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt: bcc0afd3b264ea028928187f56f70840f8d87ccf283b020982beba35d9c7e4ef
Loading weights [bcc0afd3b2] from /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt
Couldn't find VAE named vae-ft-mse-840000-ema-pruned; using None instead
Applying xformers cross attention optimization.
Weights loaded in 142.6s.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modules/hashes.py`
Content:
```
1 import hashlib
2 import json
3 import os.path
4
5 import filelock
6
7
8 cache_filename = "cache.json"
9 cache_data = None
10
11
12 def dump_cache():
13 with filelock.FileLock(cache_filename+".lock"):
14 with open(cache_filename, "w", encoding="utf8") as file:
15 json.dump(cache_data, file, indent=4)
16
17
18 def cache(subsection):
19 global cache_data
20
21 if cache_data is None:
22 with filelock.FileLock(cache_filename+".lock"):
23 if not os.path.isfile(cache_filename):
24 cache_data = {}
25 else:
26 with open(cache_filename, "r", encoding="utf8") as file:
27 cache_data = json.load(file)
28
29 s = cache_data.get(subsection, {})
30 cache_data[subsection] = s
31
32 return s
33
34
35 def calculate_sha256(filename):
36 hash_sha256 = hashlib.sha256()
37
38 with open(filename, "rb") as f:
39 for chunk in iter(lambda: f.read(4096), b""):
40 hash_sha256.update(chunk)
41
42 return hash_sha256.hexdigest()
43
44
45 def sha256_from_cache(filename, title):
46 hashes = cache("hashes")
47 ondisk_mtime = os.path.getmtime(filename)
48
49 if title not in hashes:
50 return None
51
52 cached_sha256 = hashes[title].get("sha256", None)
53 cached_mtime = hashes[title].get("mtime", 0)
54
55 if ondisk_mtime > cached_mtime or cached_sha256 is None:
56 return None
57
58 return cached_sha256
59
60
61 def sha256(filename, title):
62 hashes = cache("hashes")
63
64 sha256_value = sha256_from_cache(filename, title)
65 if sha256_value is not None:
66 return sha256_value
67
68 print(f"Calculating sha256 for {filename}: ", end='')
69 sha256_value = calculate_sha256(filename)
70 print(f"{sha256_value}")
71
72 hashes[title] = {
73 "mtime": os.path.getmtime(filename),
74 "sha256": sha256_value,
75 }
76
77 dump_cache()
78
79 return sha256_value
80
81
82
83
84
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modules/hashes.py b/modules/hashes.py
--- a/modules/hashes.py
+++ b/modules/hashes.py
@@ -34,9 +34,10 @@
def calculate_sha256(filename):
hash_sha256 = hashlib.sha256()
+ blksize = 1024 * 1024
with open(filename, "rb") as f:
- for chunk in iter(lambda: f.read(4096), b""):
+ for chunk in iter(lambda: f.read(blksize), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
| {"golden_diff": "diff --git a/modules/hashes.py b/modules/hashes.py\n--- a/modules/hashes.py\n+++ b/modules/hashes.py\n@@ -34,9 +34,10 @@\n \r\n def calculate_sha256(filename):\r\n hash_sha256 = hashlib.sha256()\r\n+ blksize = 1024 * 1024\r\n \r\n with open(filename, \"rb\") as f:\r\n- for chunk in iter(lambda: f.read(4096), b\"\"):\r\n+ for chunk in iter(lambda: f.read(blksize), b\"\"):\r\n hash_sha256.update(chunk)\r\n \r\n return hash_sha256.hexdigest()\n", "issue": "[Bug]: New SHA256 hash takes extremely long time up to a point of of model load being unusable\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues and checked the recent builds/commits\r\n\r\n### What happened?\r\n\r\nNewly added sha-256 hash takes extremely long time to calculate on model load up to a point where loading appears to hang (i've restarted server twice before i even let it run until completion) \r\n\r\nPreviously switching to a new model was sub 10 sec, now switching to a new model (that does not have hash stored already) takes 100-150 sec (and this is a high end system)!\r\n\r\nAnd to make it worse, messages about hash calculation are only printed **after** it has been calculated, there is no progress info or anything to indicate system is actually doing anything for 2 min!\r\n\r\n\r\n### Steps to reproduce the problem\r\n\r\n1. Switch to a new model and wait for completion - it takes forever\r\n\r\n\r\n### What should have happened?\r\n\r\nModel load should **never** take over 2 minutes to complete.\r\n\r\n### Commit where the problem happens\r\n\r\nf8c512478568293155539f616dce26c5e4495055\r\n\r\n### What platforms do you use to access UI ?\r\n\r\nWindows, Linux\r\n\r\n### What browsers do you use to access the UI ?\r\n\r\nGoogle Chrome, Microsoft Edge\r\n\r\n### Command Line Arguments\r\n\r\n```Shell\r\n--api --xformers\r\n```\r\n\r\n\r\n### Additional information, context and logs\r\n\r\nConsole log showing model load taking 142 seconds!\r\n\r\n```text\r\nCalculating sha256 for /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt: bcc0afd3b264ea028928187f56f70840f8d87ccf283b020982beba35d9c7e4ef\r\nLoading weights [bcc0afd3b2] from /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt\r\nCouldn't find VAE named vae-ft-mse-840000-ema-pruned; using None instead\r\nApplying xformers cross attention optimization.\r\nWeights loaded in 142.6s.\r\n```\r\n\n", "before_files": [{"content": "import hashlib\r\nimport json\r\nimport os.path\r\n\r\nimport filelock\r\n\r\n\r\ncache_filename = \"cache.json\"\r\ncache_data = None\r\n\r\n\r\ndef dump_cache():\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n with open(cache_filename, \"w\", encoding=\"utf8\") as file:\r\n json.dump(cache_data, file, indent=4)\r\n\r\n\r\ndef cache(subsection):\r\n global cache_data\r\n\r\n if cache_data is None:\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n if not os.path.isfile(cache_filename):\r\n cache_data = {}\r\n else:\r\n with open(cache_filename, \"r\", encoding=\"utf8\") as file:\r\n cache_data = json.load(file)\r\n\r\n s = cache_data.get(subsection, {})\r\n cache_data[subsection] = s\r\n\r\n return s\r\n\r\n\r\ndef calculate_sha256(filename):\r\n hash_sha256 = hashlib.sha256()\r\n\r\n with open(filename, \"rb\") as f:\r\n for chunk in iter(lambda: f.read(4096), b\"\"):\r\n hash_sha256.update(chunk)\r\n\r\n return hash_sha256.hexdigest()\r\n\r\n\r\ndef sha256_from_cache(filename, title):\r\n hashes = cache(\"hashes\")\r\n ondisk_mtime = os.path.getmtime(filename)\r\n\r\n if title not in hashes:\r\n return None\r\n\r\n cached_sha256 = hashes[title].get(\"sha256\", None)\r\n cached_mtime = hashes[title].get(\"mtime\", 0)\r\n\r\n if ondisk_mtime > cached_mtime or cached_sha256 is None:\r\n return None\r\n\r\n return cached_sha256\r\n\r\n\r\ndef sha256(filename, title):\r\n hashes = cache(\"hashes\")\r\n\r\n sha256_value = sha256_from_cache(filename, title)\r\n if sha256_value is not None:\r\n return sha256_value\r\n\r\n print(f\"Calculating sha256 for {filename}: \", end='')\r\n sha256_value = calculate_sha256(filename)\r\n print(f\"{sha256_value}\")\r\n\r\n hashes[title] = {\r\n \"mtime\": os.path.getmtime(filename),\r\n \"sha256\": sha256_value,\r\n }\r\n\r\n dump_cache()\r\n\r\n return sha256_value\r\n\r\n\r\n\r\n\r\n\r\n", "path": "modules/hashes.py"}], "after_files": [{"content": "import hashlib\r\nimport json\r\nimport os.path\r\n\r\nimport filelock\r\n\r\n\r\ncache_filename = \"cache.json\"\r\ncache_data = None\r\n\r\n\r\ndef dump_cache():\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n with open(cache_filename, \"w\", encoding=\"utf8\") as file:\r\n json.dump(cache_data, file, indent=4)\r\n\r\n\r\ndef cache(subsection):\r\n global cache_data\r\n\r\n if cache_data is None:\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n if not os.path.isfile(cache_filename):\r\n cache_data = {}\r\n else:\r\n with open(cache_filename, \"r\", encoding=\"utf8\") as file:\r\n cache_data = json.load(file)\r\n\r\n s = cache_data.get(subsection, {})\r\n cache_data[subsection] = s\r\n\r\n return s\r\n\r\n\r\ndef calculate_sha256(filename):\r\n hash_sha256 = hashlib.sha256()\r\n blksize = 1024 * 1024\r\n\r\n with open(filename, \"rb\") as f:\r\n for chunk in iter(lambda: f.read(blksize), b\"\"):\r\n hash_sha256.update(chunk)\r\n\r\n return hash_sha256.hexdigest()\r\n\r\n\r\ndef sha256_from_cache(filename, title):\r\n hashes = cache(\"hashes\")\r\n ondisk_mtime = os.path.getmtime(filename)\r\n\r\n if title not in hashes:\r\n return None\r\n\r\n cached_sha256 = hashes[title].get(\"sha256\", None)\r\n cached_mtime = hashes[title].get(\"mtime\", 0)\r\n\r\n if ondisk_mtime > cached_mtime or cached_sha256 is None:\r\n return None\r\n\r\n return cached_sha256\r\n\r\n\r\ndef sha256(filename, title):\r\n hashes = cache(\"hashes\")\r\n\r\n sha256_value = sha256_from_cache(filename, title)\r\n if sha256_value is not None:\r\n return sha256_value\r\n\r\n print(f\"Calculating sha256 for {filename}: \", end='')\r\n sha256_value = calculate_sha256(filename)\r\n print(f\"{sha256_value}\")\r\n\r\n hashes[title] = {\r\n \"mtime\": os.path.getmtime(filename),\r\n \"sha256\": sha256_value,\r\n }\r\n\r\n dump_cache()\r\n\r\n return sha256_value\r\n\r\n\r\n\r\n\r\n\r\n", "path": "modules/hashes.py"}]} | 1,459 | 148 |
gh_patches_debug_605 | rasdani/github-patches | git_diff | pex-tool__pex-1664 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.71
On the docket:
+ [x] Secure Pex against sha1 collision attacks. #1662
+ [x] Problems building venvs from certain distributions. #1656
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.70"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.70"
+__version__ = "2.1.71"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.70\"\n+__version__ = \"2.1.71\"\n", "issue": "Release 2.1.71\nOn the docket:\r\n+ [x] Secure Pex against sha1 collision attacks. #1662 \r\n+ [x] Problems building venvs from certain distributions. #1656\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.70\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.71\"\n", "path": "pex/version.py"}]} | 359 | 96 |
gh_patches_debug_881 | rasdani/github-patches | git_diff | python__peps-3263 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Infra: Check Sphinx warnings on CI
This is similar to what we have in the CPython repo, most recently: https://github.com/python/cpython/pull/106460, and will help us gradually remove Sphinx warnings, and avoid new ones being introduces.
It checks three things:
1. If a file previously had no warnings (not listed in `.nitignore`), and new ones are introduced, it fails
* -> To prevent regressions
2. If a file previously had warnings (it's lsited in `.nitignore`), but now has none, it fails and tells us to remove it from `.nitignore`
* To help us incrementally improve over time
3. If a file previously had warnings (it's listed in `.nitignore`), and still has warnings, it doesn't fail, but it will annotate the PR to show the warning
* To make them more visible, and give us the opportunity to fix them
I've intentionally kept the code and layout as close as possible to the CPython version (see https://github.com/python/cpython/tree/main/Doc/tools) for easier future maintenance.
<!-- readthedocs-preview pep-previews start -->
----
:books: Documentation preview :books:: https://pep-previews--3213.org.readthedocs.build/
<!-- readthedocs-preview pep-previews end -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conf.py`
Content:
```
1 # This file is placed in the public domain or under the
2 # CC0-1.0-Universal license, whichever is more permissive.
3
4 """Configuration for building PEPs using Sphinx."""
5
6 from pathlib import Path
7 import sys
8
9 sys.path.append(str(Path(".").absolute()))
10
11 # -- Project information -----------------------------------------------------
12
13 project = "PEPs"
14 master_doc = "contents"
15
16 # -- General configuration ---------------------------------------------------
17
18 # Add any Sphinx extension module names here, as strings.
19 extensions = [
20 "pep_sphinx_extensions",
21 "sphinx.ext.intersphinx",
22 "sphinx.ext.githubpages",
23 ]
24
25 # The file extensions of source files. Sphinx uses these suffixes as sources.
26 source_suffix = {
27 ".rst": "pep",
28 ".txt": "pep",
29 }
30
31 # List of patterns (relative to source dir) to ignore when looking for source files.
32 include_patterns = [
33 # Required for Sphinx
34 "contents.rst",
35 # PEP files
36 "pep-????.rst",
37 "pep-????.txt",
38 # PEP ancillary files
39 "pep-????/*.rst",
40 # Documentation
41 "docs/*.rst",
42 ]
43 exclude_patterns = [
44 # PEP Template
45 "pep-0012/pep-NNNN.rst",
46 ]
47
48 # Intersphinx configuration
49 intersphinx_mapping = {
50 'python': ('https://docs.python.org/3/', None),
51 'packaging': ('https://packaging.python.org/en/latest/', None),
52 'devguide': ('https://devguide.python.org/', None),
53 'py3.11': ('https://docs.python.org/3.11/', None),
54 'py3.12': ('https://docs.python.org/3.12/', None),
55 }
56 intersphinx_disabled_reftypes = []
57
58 # -- Options for HTML output -------------------------------------------------
59
60 # HTML output settings
61 html_math_renderer = "maths_to_html" # Maths rendering
62
63 # Theme settings
64 html_theme_path = ["pep_sphinx_extensions"]
65 html_theme = "pep_theme" # The actual theme directory (child of html_theme_path)
66 html_use_index = False # Disable index (we use PEP 0)
67 html_style = "" # must be defined here or in theme.conf, but is unused
68 html_permalinks = False # handled in the PEPContents transform
69 html_baseurl = "https://peps.python.org" # to create the CNAME file
70 gettext_auto_build = False # speed-ups
71
72 templates_path = ["pep_sphinx_extensions/pep_theme/templates"] # Theme template relative paths from `confdir`
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conf.py b/conf.py
--- a/conf.py
+++ b/conf.py
@@ -45,6 +45,9 @@
"pep-0012/pep-NNNN.rst",
]
+# Warn on missing references
+nitpicky = True
+
# Intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
| {"golden_diff": "diff --git a/conf.py b/conf.py\n--- a/conf.py\n+++ b/conf.py\n@@ -45,6 +45,9 @@\n \"pep-0012/pep-NNNN.rst\",\n ]\n \n+# Warn on missing references\n+nitpicky = True\n+\n # Intersphinx configuration\n intersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n", "issue": "Infra: Check Sphinx warnings on CI\nThis is similar to what we have in the CPython repo, most recently: https://github.com/python/cpython/pull/106460, and will help us gradually remove Sphinx warnings, and avoid new ones being introduces.\r\n\r\nIt checks three things:\r\n\r\n1. If a file previously had no warnings (not listed in `.nitignore`), and new ones are introduced, it fails\r\n * -> To prevent regressions\r\n\r\n2. If a file previously had warnings (it's lsited in `.nitignore`), but now has none, it fails and tells us to remove it from `.nitignore`\r\n * To help us incrementally improve over time\r\n\r\n3. If a file previously had warnings (it's listed in `.nitignore`), and still has warnings, it doesn't fail, but it will annotate the PR to show the warning\r\n * To make them more visible, and give us the opportunity to fix them\r\n\r\nI've intentionally kept the code and layout as close as possible to the CPython version (see https://github.com/python/cpython/tree/main/Doc/tools) for easier future maintenance.\r\n\r\n\r\n\r\n<!-- readthedocs-preview pep-previews start -->\r\n----\n:books: Documentation preview :books:: https://pep-previews--3213.org.readthedocs.build/\n\r\n<!-- readthedocs-preview pep-previews end -->\n", "before_files": [{"content": "# This file is placed in the public domain or under the\n# CC0-1.0-Universal license, whichever is more permissive.\n\n\"\"\"Configuration for building PEPs using Sphinx.\"\"\"\n\nfrom pathlib import Path\nimport sys\n\nsys.path.append(str(Path(\".\").absolute()))\n\n# -- Project information -----------------------------------------------------\n\nproject = \"PEPs\"\nmaster_doc = \"contents\"\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings.\nextensions = [\n \"pep_sphinx_extensions\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.githubpages\",\n]\n\n# The file extensions of source files. Sphinx uses these suffixes as sources.\nsource_suffix = {\n \".rst\": \"pep\",\n \".txt\": \"pep\",\n}\n\n# List of patterns (relative to source dir) to ignore when looking for source files.\ninclude_patterns = [\n # Required for Sphinx\n \"contents.rst\",\n # PEP files\n \"pep-????.rst\",\n \"pep-????.txt\",\n # PEP ancillary files\n \"pep-????/*.rst\",\n # Documentation\n \"docs/*.rst\",\n]\nexclude_patterns = [\n # PEP Template\n \"pep-0012/pep-NNNN.rst\",\n]\n\n# Intersphinx configuration\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'packaging': ('https://packaging.python.org/en/latest/', None),\n 'devguide': ('https://devguide.python.org/', None),\n 'py3.11': ('https://docs.python.org/3.11/', None),\n 'py3.12': ('https://docs.python.org/3.12/', None),\n}\nintersphinx_disabled_reftypes = []\n\n# -- Options for HTML output -------------------------------------------------\n\n# HTML output settings\nhtml_math_renderer = \"maths_to_html\" # Maths rendering\n\n# Theme settings\nhtml_theme_path = [\"pep_sphinx_extensions\"]\nhtml_theme = \"pep_theme\" # The actual theme directory (child of html_theme_path)\nhtml_use_index = False # Disable index (we use PEP 0)\nhtml_style = \"\" # must be defined here or in theme.conf, but is unused\nhtml_permalinks = False # handled in the PEPContents transform\nhtml_baseurl = \"https://peps.python.org\" # to create the CNAME file\ngettext_auto_build = False # speed-ups\n\ntemplates_path = [\"pep_sphinx_extensions/pep_theme/templates\"] # Theme template relative paths from `confdir`\n", "path": "conf.py"}], "after_files": [{"content": "# This file is placed in the public domain or under the\n# CC0-1.0-Universal license, whichever is more permissive.\n\n\"\"\"Configuration for building PEPs using Sphinx.\"\"\"\n\nfrom pathlib import Path\nimport sys\n\nsys.path.append(str(Path(\".\").absolute()))\n\n# -- Project information -----------------------------------------------------\n\nproject = \"PEPs\"\nmaster_doc = \"contents\"\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings.\nextensions = [\n \"pep_sphinx_extensions\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.githubpages\",\n]\n\n# The file extensions of source files. Sphinx uses these suffixes as sources.\nsource_suffix = {\n \".rst\": \"pep\",\n \".txt\": \"pep\",\n}\n\n# List of patterns (relative to source dir) to ignore when looking for source files.\ninclude_patterns = [\n # Required for Sphinx\n \"contents.rst\",\n # PEP files\n \"pep-????.rst\",\n \"pep-????.txt\",\n # PEP ancillary files\n \"pep-????/*.rst\",\n # Documentation\n \"docs/*.rst\",\n]\nexclude_patterns = [\n # PEP Template\n \"pep-0012/pep-NNNN.rst\",\n]\n\n# Warn on missing references\nnitpicky = True\n\n# Intersphinx configuration\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'packaging': ('https://packaging.python.org/en/latest/', None),\n 'devguide': ('https://devguide.python.org/', None),\n 'py3.11': ('https://docs.python.org/3.11/', None),\n 'py3.12': ('https://docs.python.org/3.12/', None),\n}\nintersphinx_disabled_reftypes = []\n\n# -- Options for HTML output -------------------------------------------------\n\n# HTML output settings\nhtml_math_renderer = \"maths_to_html\" # Maths rendering\n\n# Theme settings\nhtml_theme_path = [\"pep_sphinx_extensions\"]\nhtml_theme = \"pep_theme\" # The actual theme directory (child of html_theme_path)\nhtml_use_index = False # Disable index (we use PEP 0)\nhtml_style = \"\" # must be defined here or in theme.conf, but is unused\nhtml_permalinks = False # handled in the PEPContents transform\nhtml_baseurl = \"https://peps.python.org\" # to create the CNAME file\ngettext_auto_build = False # speed-ups\n\ntemplates_path = [\"pep_sphinx_extensions/pep_theme/templates\"] # Theme template relative paths from `confdir`\n", "path": "conf.py"}]} | 1,252 | 92 |
gh_patches_debug_32502 | rasdani/github-patches | git_diff | TileDB-Inc__TileDB-Py-263 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
multi_index not accepting tuples
The [UDF apply docs](https://docs.tiledb.com/cloud/client-api/serverless-udfs#multi-index-usage) imply the tuples and slices are interchangeable. The standard API throws an exception on tuples (for both tiledb: and s3:)
```
>>> import tiledb, tiledb.cloud
>>> A = tiledb.DenseArray("tiledb://TileDB-Inc/quickstart_dense", ctx=tiledb.cloud.Ctx())
>>> A.multi_index[[(1,2), 4], [slice(1,4)]]['a']
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py", line 121, in __getitem__
ranges = self.getitem_ranges(idx)
File "/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py", line 109, in getitem_ranges
subranges = sel_to_subranges(sel)
File "/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py", line 60, in sel_to_subranges
raise TypeError("Unsupported selection ")
TypeError: Unsupported selection
```
It would be great if multi_index and UDF apply accepted the same index types.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tiledb/multirange_indexing.py`
Content:
```
1 import tiledb
2 from tiledb import Array, ArraySchema
3 import os, numpy as np
4 import sys, weakref
5
6 try:
7 from tiledb.libtiledb import multi_index
8 except:
9 from tiledb.indexing import multi_index
10
11 def _index_as_tuple(idx):
12 """Forces scalar index objects to a tuple representation"""
13 if isinstance(idx, tuple):
14 return idx
15 return (idx,)
16
17 def mr_dense_result_shape(ranges, base_shape = None):
18 # assumptions: len(ranges) matches number of dims
19 if base_shape is not None:
20 assert len(ranges) == len(base_shape), "internal error: mismatched shapes"
21
22 new_shape = list()
23 for i,rr in enumerate(ranges):
24 if rr != ():
25 m = list(map(lambda y: abs(y[1] - y[0]) + 1, rr))
26 new_shape.append(np.sum(m))
27 else:
28 if base_shape is None:
29 raise ValueError("Missing required base_shape for whole-dimension slices")
30 # empty range covers dimension
31 new_shape.append(base_shape[i])
32
33 return tuple(new_shape)
34
35 def mr_dense_result_numel(ranges):
36 return np.prod(mr_dense_result_shape(ranges))
37
38 def sel_to_subranges(dim_sel):
39 if isinstance(dim_sel, list):
40 dim_sel = tuple(dim_sel)
41 elif not isinstance(dim_sel, tuple):
42 dim_sel = (dim_sel,)
43
44 subranges = list()
45 for range in dim_sel:
46 if np.isscalar(range):
47 subranges.append( (range, range) )
48 elif isinstance(range, slice):
49 if range.step is not None:
50 raise ValueError("Stepped slice ranges are not supported")
51 elif range.start is None and range.stop is None:
52 # ':' full slice
53 pass
54 else:
55 subranges.append( (range.start, range.stop) )
56 elif isinstance(range, list):
57 for el in range:
58 subranges.append( (el, el) )
59 else:
60 raise TypeError("Unsupported selection ")
61
62 return tuple(subranges)
63
64
65 class MultiRangeIndexer(object):
66 """
67 Implements multi-range / outer / orthogonal indexing.
68
69 """
70 # for cython
71 # comment out for Python 2 :/
72 #array: Array
73 #schema: ArraySchema
74 #def __init__(self, array: Array, query = None):
75
76 def __init__(self, array, query = None):
77 if not issubclass(type(array), tiledb.Array):
78 raise ValueError("Internal error: MultiRangeIndexer expected tiledb.Array")
79 self.array_ref = weakref.ref(array)
80 self.schema = array.schema
81 self.query = query
82
83 @property
84 def array(self):
85 assert self.array_ref() is not None, \
86 "Internal error: invariant violation (indexing call w/ dead array_ref)"
87 return self.array_ref()
88
89 @classmethod
90 def __test_init__(cls, array):
91 """
92 Internal helper method for testing getitem range calculation.
93 :param array:
94 :return:
95 """
96 m = cls.__new__(cls)
97 m.array_ref = weakref.ref(array)
98 m.schema = array.schema
99 m.query = None
100 return m
101
102 def getitem_ranges(self, idx):
103 dom = self.schema.domain
104 ndim = dom.ndim
105 idx = _index_as_tuple(idx)
106
107 ranges = list()
108 for i,sel in enumerate(idx):
109 subranges = sel_to_subranges(sel)
110 ranges.append(subranges)
111
112 # extend the list to ndim
113 if len(ranges) < ndim:
114 ranges.extend([ tuple() for _ in range(ndim-len(ranges))])
115
116 rval = tuple(ranges)
117 return rval
118
119 def __getitem__(self, idx):
120 # implements multi-range / outer / orthogonal indexing
121 ranges = self.getitem_ranges(idx)
122
123 dom = self.schema.domain
124 attr_names = tuple(self.schema.attr(i).name for i in range(self.schema.nattr))
125
126 coords = None
127 if self.query is not None:
128 # if we are called via Query object, then we need to respect Query semantics
129 attr_names = tuple(self.query.attrs) if self.query.attrs else attr_names # query.attrs might be None -> all
130 coords = self.query.coords
131
132 # TODO order
133 result_dict = multi_index(
134 self.array,
135 attr_names,
136 ranges,
137 coords=coords
138 )
139
140 if self.schema.sparse:
141 return result_dict
142 else:
143 result_shape = mr_dense_result_shape(ranges, self.schema.shape)
144 for arr in result_dict.values():
145 # TODO check/test layout
146 arr.shape = result_shape
147 return result_dict
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tiledb/multirange_indexing.py b/tiledb/multirange_indexing.py
--- a/tiledb/multirange_indexing.py
+++ b/tiledb/multirange_indexing.py
@@ -8,11 +8,6 @@
except:
from tiledb.indexing import multi_index
-def _index_as_tuple(idx):
- """Forces scalar index objects to a tuple representation"""
- if isinstance(idx, tuple):
- return idx
- return (idx,)
def mr_dense_result_shape(ranges, base_shape = None):
# assumptions: len(ranges) matches number of dims
@@ -36,11 +31,6 @@
return np.prod(mr_dense_result_shape(ranges))
def sel_to_subranges(dim_sel):
- if isinstance(dim_sel, list):
- dim_sel = tuple(dim_sel)
- elif not isinstance(dim_sel, tuple):
- dim_sel = (dim_sel,)
-
subranges = list()
for range in dim_sel:
if np.isscalar(range):
@@ -53,6 +43,8 @@
pass
else:
subranges.append( (range.start, range.stop) )
+ elif isinstance(range, tuple):
+ subranges.extend((range,))
elif isinstance(range, list):
for el in range:
subranges.append( (el, el) )
@@ -102,10 +94,16 @@
def getitem_ranges(self, idx):
dom = self.schema.domain
ndim = dom.ndim
- idx = _index_as_tuple(idx)
+
+ if isinstance(idx, tuple):
+ idx = list(idx)
+ else:
+ idx = [idx]
ranges = list()
for i,sel in enumerate(idx):
+ if not isinstance(sel, list):
+ sel = [sel]
subranges = sel_to_subranges(sel)
ranges.append(subranges)
| {"golden_diff": "diff --git a/tiledb/multirange_indexing.py b/tiledb/multirange_indexing.py\n--- a/tiledb/multirange_indexing.py\n+++ b/tiledb/multirange_indexing.py\n@@ -8,11 +8,6 @@\n except:\n from tiledb.indexing import multi_index\n \n-def _index_as_tuple(idx):\n- \"\"\"Forces scalar index objects to a tuple representation\"\"\"\n- if isinstance(idx, tuple):\n- return idx\n- return (idx,)\n \n def mr_dense_result_shape(ranges, base_shape = None):\n # assumptions: len(ranges) matches number of dims\n@@ -36,11 +31,6 @@\n return np.prod(mr_dense_result_shape(ranges))\n \n def sel_to_subranges(dim_sel):\n- if isinstance(dim_sel, list):\n- dim_sel = tuple(dim_sel)\n- elif not isinstance(dim_sel, tuple):\n- dim_sel = (dim_sel,)\n-\n subranges = list()\n for range in dim_sel:\n if np.isscalar(range):\n@@ -53,6 +43,8 @@\n pass\n else:\n subranges.append( (range.start, range.stop) )\n+ elif isinstance(range, tuple):\n+ subranges.extend((range,))\n elif isinstance(range, list):\n for el in range:\n subranges.append( (el, el) )\n@@ -102,10 +94,16 @@\n def getitem_ranges(self, idx):\n dom = self.schema.domain\n ndim = dom.ndim\n- idx = _index_as_tuple(idx)\n+\n+ if isinstance(idx, tuple):\n+ idx = list(idx)\n+ else:\n+ idx = [idx]\n \n ranges = list()\n for i,sel in enumerate(idx):\n+ if not isinstance(sel, list):\n+ sel = [sel]\n subranges = sel_to_subranges(sel)\n ranges.append(subranges)\n", "issue": "multi_index not accepting tuples\nThe [UDF apply docs](https://docs.tiledb.com/cloud/client-api/serverless-udfs#multi-index-usage) imply the tuples and slices are interchangeable. The standard API throws an exception on tuples (for both tiledb: and s3:)\r\n\r\n\r\n```\r\n>>> import tiledb, tiledb.cloud\r\n>>> A = tiledb.DenseArray(\"tiledb://TileDB-Inc/quickstart_dense\", ctx=tiledb.cloud.Ctx())\r\n>>> A.multi_index[[(1,2), 4], [slice(1,4)]]['a']\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py\", line 121, in __getitem__\r\n ranges = self.getitem_ranges(idx)\r\n File \"/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py\", line 109, in getitem_ranges\r\n subranges = sel_to_subranges(sel)\r\n File \"/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py\", line 60, in sel_to_subranges\r\n raise TypeError(\"Unsupported selection \")\r\nTypeError: Unsupported selection \r\n```\r\n\r\nIt would be great if multi_index and UDF apply accepted the same index types.\n", "before_files": [{"content": "import tiledb\nfrom tiledb import Array, ArraySchema\nimport os, numpy as np\nimport sys, weakref\n\ntry:\n from tiledb.libtiledb import multi_index\nexcept:\n from tiledb.indexing import multi_index\n\ndef _index_as_tuple(idx):\n \"\"\"Forces scalar index objects to a tuple representation\"\"\"\n if isinstance(idx, tuple):\n return idx\n return (idx,)\n\ndef mr_dense_result_shape(ranges, base_shape = None):\n # assumptions: len(ranges) matches number of dims\n if base_shape is not None:\n assert len(ranges) == len(base_shape), \"internal error: mismatched shapes\"\n\n new_shape = list()\n for i,rr in enumerate(ranges):\n if rr != ():\n m = list(map(lambda y: abs(y[1] - y[0]) + 1, rr))\n new_shape.append(np.sum(m))\n else:\n if base_shape is None:\n raise ValueError(\"Missing required base_shape for whole-dimension slices\")\n # empty range covers dimension\n new_shape.append(base_shape[i])\n\n return tuple(new_shape)\n\ndef mr_dense_result_numel(ranges):\n return np.prod(mr_dense_result_shape(ranges))\n\ndef sel_to_subranges(dim_sel):\n if isinstance(dim_sel, list):\n dim_sel = tuple(dim_sel)\n elif not isinstance(dim_sel, tuple):\n dim_sel = (dim_sel,)\n\n subranges = list()\n for range in dim_sel:\n if np.isscalar(range):\n subranges.append( (range, range) )\n elif isinstance(range, slice):\n if range.step is not None:\n raise ValueError(\"Stepped slice ranges are not supported\")\n elif range.start is None and range.stop is None:\n # ':' full slice\n pass\n else:\n subranges.append( (range.start, range.stop) )\n elif isinstance(range, list):\n for el in range:\n subranges.append( (el, el) )\n else:\n raise TypeError(\"Unsupported selection \")\n\n return tuple(subranges)\n\n\nclass MultiRangeIndexer(object):\n \"\"\"\n Implements multi-range / outer / orthogonal indexing.\n\n \"\"\"\n # for cython\n # comment out for Python 2 :/\n #array: Array\n #schema: ArraySchema\n #def __init__(self, array: Array, query = None):\n\n def __init__(self, array, query = None):\n if not issubclass(type(array), tiledb.Array):\n raise ValueError(\"Internal error: MultiRangeIndexer expected tiledb.Array\")\n self.array_ref = weakref.ref(array)\n self.schema = array.schema\n self.query = query\n\n @property\n def array(self):\n assert self.array_ref() is not None, \\\n \"Internal error: invariant violation (indexing call w/ dead array_ref)\"\n return self.array_ref()\n\n @classmethod\n def __test_init__(cls, array):\n \"\"\"\n Internal helper method for testing getitem range calculation.\n :param array:\n :return:\n \"\"\"\n m = cls.__new__(cls)\n m.array_ref = weakref.ref(array)\n m.schema = array.schema\n m.query = None\n return m\n\n def getitem_ranges(self, idx):\n dom = self.schema.domain\n ndim = dom.ndim\n idx = _index_as_tuple(idx)\n\n ranges = list()\n for i,sel in enumerate(idx):\n subranges = sel_to_subranges(sel)\n ranges.append(subranges)\n\n # extend the list to ndim\n if len(ranges) < ndim:\n ranges.extend([ tuple() for _ in range(ndim-len(ranges))])\n\n rval = tuple(ranges)\n return rval\n\n def __getitem__(self, idx):\n # implements multi-range / outer / orthogonal indexing\n ranges = self.getitem_ranges(idx)\n\n dom = self.schema.domain\n attr_names = tuple(self.schema.attr(i).name for i in range(self.schema.nattr))\n\n coords = None\n if self.query is not None:\n # if we are called via Query object, then we need to respect Query semantics\n attr_names = tuple(self.query.attrs) if self.query.attrs else attr_names # query.attrs might be None -> all\n coords = self.query.coords\n\n # TODO order\n result_dict = multi_index(\n self.array,\n attr_names,\n ranges,\n coords=coords\n )\n\n if self.schema.sparse:\n return result_dict\n else:\n result_shape = mr_dense_result_shape(ranges, self.schema.shape)\n for arr in result_dict.values():\n # TODO check/test layout\n arr.shape = result_shape\n return result_dict", "path": "tiledb/multirange_indexing.py"}], "after_files": [{"content": "import tiledb\nfrom tiledb import Array, ArraySchema\nimport os, numpy as np\nimport sys, weakref\n\ntry:\n from tiledb.libtiledb import multi_index\nexcept:\n from tiledb.indexing import multi_index\n\n\ndef mr_dense_result_shape(ranges, base_shape = None):\n # assumptions: len(ranges) matches number of dims\n if base_shape is not None:\n assert len(ranges) == len(base_shape), \"internal error: mismatched shapes\"\n\n new_shape = list()\n for i,rr in enumerate(ranges):\n if rr != ():\n m = list(map(lambda y: abs(y[1] - y[0]) + 1, rr))\n new_shape.append(np.sum(m))\n else:\n if base_shape is None:\n raise ValueError(\"Missing required base_shape for whole-dimension slices\")\n # empty range covers dimension\n new_shape.append(base_shape[i])\n\n return tuple(new_shape)\n\ndef mr_dense_result_numel(ranges):\n return np.prod(mr_dense_result_shape(ranges))\n\ndef sel_to_subranges(dim_sel):\n subranges = list()\n for range in dim_sel:\n if np.isscalar(range):\n subranges.append( (range, range) )\n elif isinstance(range, slice):\n if range.step is not None:\n raise ValueError(\"Stepped slice ranges are not supported\")\n elif range.start is None and range.stop is None:\n # ':' full slice\n pass\n else:\n subranges.append( (range.start, range.stop) )\n elif isinstance(range, tuple):\n subranges.extend((range,))\n elif isinstance(range, list):\n for el in range:\n subranges.append( (el, el) )\n else:\n raise TypeError(\"Unsupported selection \")\n\n return tuple(subranges)\n\n\nclass MultiRangeIndexer(object):\n \"\"\"\n Implements multi-range / outer / orthogonal indexing.\n\n \"\"\"\n # for cython\n # comment out for Python 2 :/\n #array: Array\n #schema: ArraySchema\n #def __init__(self, array: Array, query = None):\n\n def __init__(self, array, query = None):\n if not issubclass(type(array), tiledb.Array):\n raise ValueError(\"Internal error: MultiRangeIndexer expected tiledb.Array\")\n self.array_ref = weakref.ref(array)\n self.schema = array.schema\n self.query = query\n\n @property\n def array(self):\n assert self.array_ref() is not None, \\\n \"Internal error: invariant violation (indexing call w/ dead array_ref)\"\n return self.array_ref()\n\n @classmethod\n def __test_init__(cls, array):\n \"\"\"\n Internal helper method for testing getitem range calculation.\n :param array:\n :return:\n \"\"\"\n m = cls.__new__(cls)\n m.array_ref = weakref.ref(array)\n m.schema = array.schema\n m.query = None\n return m\n\n def getitem_ranges(self, idx):\n dom = self.schema.domain\n ndim = dom.ndim\n\n if isinstance(idx, tuple):\n idx = list(idx)\n else:\n idx = [idx]\n\n ranges = list()\n for i,sel in enumerate(idx):\n if not isinstance(sel, list):\n sel = [sel]\n subranges = sel_to_subranges(sel)\n ranges.append(subranges)\n\n # extend the list to ndim\n if len(ranges) < ndim:\n ranges.extend([ tuple() for _ in range(ndim-len(ranges))])\n\n rval = tuple(ranges)\n return rval\n\n def __getitem__(self, idx):\n # implements multi-range / outer / orthogonal indexing\n ranges = self.getitem_ranges(idx)\n\n dom = self.schema.domain\n attr_names = tuple(self.schema.attr(i).name for i in range(self.schema.nattr))\n\n coords = None\n if self.query is not None:\n # if we are called via Query object, then we need to respect Query semantics\n attr_names = tuple(self.query.attrs) if self.query.attrs else attr_names # query.attrs might be None -> all\n coords = self.query.coords\n\n # TODO order\n result_dict = multi_index(\n self.array,\n attr_names,\n ranges,\n coords=coords\n )\n\n if self.schema.sparse:\n return result_dict\n else:\n result_shape = mr_dense_result_shape(ranges, self.schema.shape)\n for arr in result_dict.values():\n # TODO check/test layout\n arr.shape = result_shape\n return result_dict", "path": "tiledb/multirange_indexing.py"}]} | 1,933 | 427 |
gh_patches_debug_3260 | rasdani/github-patches | git_diff | getredash__redash-5623 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Loading schema for Sqlite DB with "Order" column name fails
### Issue Summary
I added a Sqlite Database which has an column with the name `Order`.
When I try to create a query, the error `Schema refresh failed.` comes up.
### Steps to Reproduce
1. Add an Sqlite Database which has a column with the name `Order`
2. Try to create a query
3. Get the error `Schema refresh failed.`
### Technical details:
* Redash Version: cloned from master
* Browser/OS: Brave Browser & Ubuntu 18.1
* How did you install Redash: built from source
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/query_runner/sqlite.py`
Content:
```
1 import logging
2 import sqlite3
3
4 from redash.query_runner import BaseSQLQueryRunner, register, JobTimeoutException
5 from redash.utils import json_dumps, json_loads
6
7 logger = logging.getLogger(__name__)
8
9
10 class Sqlite(BaseSQLQueryRunner):
11 noop_query = "pragma quick_check"
12
13 @classmethod
14 def configuration_schema(cls):
15 return {
16 "type": "object",
17 "properties": {"dbpath": {"type": "string", "title": "Database Path"}},
18 "required": ["dbpath"],
19 }
20
21 @classmethod
22 def type(cls):
23 return "sqlite"
24
25 def __init__(self, configuration):
26 super(Sqlite, self).__init__(configuration)
27
28 self._dbpath = self.configuration["dbpath"]
29
30 def _get_tables(self, schema):
31 query_table = "select tbl_name from sqlite_master where type='table'"
32 query_columns = "PRAGMA table_info(%s)"
33
34 results, error = self.run_query(query_table, None)
35
36 if error is not None:
37 raise Exception("Failed getting schema.")
38
39 results = json_loads(results)
40
41 for row in results["rows"]:
42 table_name = row["tbl_name"]
43 schema[table_name] = {"name": table_name, "columns": []}
44 results_table, error = self.run_query(query_columns % (table_name,), None)
45 if error is not None:
46 raise Exception("Failed getting schema.")
47
48 results_table = json_loads(results_table)
49 for row_column in results_table["rows"]:
50 schema[table_name]["columns"].append(row_column["name"])
51
52 return list(schema.values())
53
54 def run_query(self, query, user):
55 connection = sqlite3.connect(self._dbpath)
56
57 cursor = connection.cursor()
58
59 try:
60 cursor.execute(query)
61
62 if cursor.description is not None:
63 columns = self.fetch_columns([(i[0], None) for i in cursor.description])
64 rows = [
65 dict(zip((column["name"] for column in columns), row))
66 for row in cursor
67 ]
68
69 data = {"columns": columns, "rows": rows}
70 error = None
71 json_data = json_dumps(data)
72 else:
73 error = "Query completed but it returned no data."
74 json_data = None
75 except (KeyboardInterrupt, JobTimeoutException):
76 connection.cancel()
77 raise
78 finally:
79 connection.close()
80 return json_data, error
81
82
83 register(Sqlite)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/query_runner/sqlite.py b/redash/query_runner/sqlite.py
--- a/redash/query_runner/sqlite.py
+++ b/redash/query_runner/sqlite.py
@@ -29,7 +29,7 @@
def _get_tables(self, schema):
query_table = "select tbl_name from sqlite_master where type='table'"
- query_columns = "PRAGMA table_info(%s)"
+ query_columns = "PRAGMA table_info(\"%s\")"
results, error = self.run_query(query_table, None)
| {"golden_diff": "diff --git a/redash/query_runner/sqlite.py b/redash/query_runner/sqlite.py\n--- a/redash/query_runner/sqlite.py\n+++ b/redash/query_runner/sqlite.py\n@@ -29,7 +29,7 @@\n \n def _get_tables(self, schema):\n query_table = \"select tbl_name from sqlite_master where type='table'\"\n- query_columns = \"PRAGMA table_info(%s)\"\n+ query_columns = \"PRAGMA table_info(\\\"%s\\\")\"\n \n results, error = self.run_query(query_table, None)\n", "issue": "Loading schema for Sqlite DB with \"Order\" column name fails\n### Issue Summary\r\n\r\nI added a Sqlite Database which has an column with the name `Order`.\r\nWhen I try to create a query, the error `Schema refresh failed.` comes up.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Add an Sqlite Database which has a column with the name `Order`\r\n2. Try to create a query\r\n3. Get the error `Schema refresh failed.`\r\n\r\n\r\n### Technical details:\r\n\r\n* Redash Version: cloned from master\r\n* Browser/OS: Brave Browser & Ubuntu 18.1\r\n* How did you install Redash: built from source\r\n\n", "before_files": [{"content": "import logging\nimport sqlite3\n\nfrom redash.query_runner import BaseSQLQueryRunner, register, JobTimeoutException\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\n\nclass Sqlite(BaseSQLQueryRunner):\n noop_query = \"pragma quick_check\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\"dbpath\": {\"type\": \"string\", \"title\": \"Database Path\"}},\n \"required\": [\"dbpath\"],\n }\n\n @classmethod\n def type(cls):\n return \"sqlite\"\n\n def __init__(self, configuration):\n super(Sqlite, self).__init__(configuration)\n\n self._dbpath = self.configuration[\"dbpath\"]\n\n def _get_tables(self, schema):\n query_table = \"select tbl_name from sqlite_master where type='table'\"\n query_columns = \"PRAGMA table_info(%s)\"\n\n results, error = self.run_query(query_table, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results[\"rows\"]:\n table_name = row[\"tbl_name\"]\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n results_table, error = self.run_query(query_columns % (table_name,), None)\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results_table = json_loads(results_table)\n for row_column in results_table[\"rows\"]:\n schema[table_name][\"columns\"].append(row_column[\"name\"])\n\n return list(schema.values())\n\n def run_query(self, query, user):\n connection = sqlite3.connect(self._dbpath)\n\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n\n if cursor.description is not None:\n columns = self.fetch_columns([(i[0], None) for i in cursor.description])\n rows = [\n dict(zip((column[\"name\"] for column in columns), row))\n for row in cursor\n ]\n\n data = {\"columns\": columns, \"rows\": rows}\n error = None\n json_data = json_dumps(data)\n else:\n error = \"Query completed but it returned no data.\"\n json_data = None\n except (KeyboardInterrupt, JobTimeoutException):\n connection.cancel()\n raise\n finally:\n connection.close()\n return json_data, error\n\n\nregister(Sqlite)\n", "path": "redash/query_runner/sqlite.py"}], "after_files": [{"content": "import logging\nimport sqlite3\n\nfrom redash.query_runner import BaseSQLQueryRunner, register, JobTimeoutException\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\n\nclass Sqlite(BaseSQLQueryRunner):\n noop_query = \"pragma quick_check\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\"dbpath\": {\"type\": \"string\", \"title\": \"Database Path\"}},\n \"required\": [\"dbpath\"],\n }\n\n @classmethod\n def type(cls):\n return \"sqlite\"\n\n def __init__(self, configuration):\n super(Sqlite, self).__init__(configuration)\n\n self._dbpath = self.configuration[\"dbpath\"]\n\n def _get_tables(self, schema):\n query_table = \"select tbl_name from sqlite_master where type='table'\"\n query_columns = \"PRAGMA table_info(\\\"%s\\\")\"\n\n results, error = self.run_query(query_table, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results[\"rows\"]:\n table_name = row[\"tbl_name\"]\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n results_table, error = self.run_query(query_columns % (table_name,), None)\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results_table = json_loads(results_table)\n for row_column in results_table[\"rows\"]:\n schema[table_name][\"columns\"].append(row_column[\"name\"])\n\n return list(schema.values())\n\n def run_query(self, query, user):\n connection = sqlite3.connect(self._dbpath)\n\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n\n if cursor.description is not None:\n columns = self.fetch_columns([(i[0], None) for i in cursor.description])\n rows = [\n dict(zip((column[\"name\"] for column in columns), row))\n for row in cursor\n ]\n\n data = {\"columns\": columns, \"rows\": rows}\n error = None\n json_data = json_dumps(data)\n else:\n error = \"Query completed but it returned no data.\"\n json_data = None\n except (KeyboardInterrupt, JobTimeoutException):\n connection.cancel()\n raise\n finally:\n connection.close()\n return json_data, error\n\n\nregister(Sqlite)\n", "path": "redash/query_runner/sqlite.py"}]} | 1,087 | 120 |
gh_patches_debug_35067 | rasdani/github-patches | git_diff | fedora-infra__bodhi-3276 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RSS feed gives no information about packages build
Since commit 60dc56c that RSS feed doesn't give information about the packages affected in a build.
Example from [Fedora30 Stable - RSS](https://bodhi.fedoraproject.org/rss/updates/?releases=F30&status=stable) :
```xml
<item>
<title>FEDORA-2019-59d394e0fd</title>
<link>https://bodhi.fedoraproject.org/updates/createrepo_c-0.14.1-1.fc30</link>
<description>- Update to 0.14.1
- Add --pkgorigins mode for Koji
- Correct pkg count in headers if there were invalid pkgs (RhBug:1596211)
- Prevent exiting with 0 if errors occur while finalizing repodata.
</description>
<pubDate>Fri, 24 May 2019 12:20:49 +0000</pubDate>
</item>
```
Also the link is invalid as in #3248
Looking to the [Fedora30 Stable - Web UI](https://bodhi.fedoraproject.org/updates/?releases=F30&status=stable) it seems that it should be:
```
<title>createrepo_c-0.14.1-1.fc30</title>
<link>https://bodhi.fedoraproject.org/updates/FEDORA-2019-59d394e0fd</link>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/renderers.py`
Content:
```
1 # Copyright © 2014-2019 Red Hat, Inc.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """Define special view renderers, such as RSS."""
19 import logging
20 import operator
21
22 from pytz import utc
23 from feedgen.feed import FeedGenerator
24 from pyramid.exceptions import HTTPBadRequest
25
26
27 log = logging.getLogger(__name__)
28
29
30 def rss(info):
31 """
32 Return a RSS renderer.
33
34 Args:
35 info (pyramid.renderers.RendererHelper): Unused.
36 Returns:
37 function: A function that can be used to render a RSS view.
38 """
39 def render(data, system):
40 """
41 Render the given data as an RSS view.
42
43 If the request's content type is set to the default, this function will change it to
44 application/rss+xml.
45
46 Args:
47 data (dict): A dictionary describing the information to be rendered. The information can
48 be different types of objects, such as updates, users, comments, or overrides.
49 system (pyramid.events.BeforeRender): Used to get the current request.
50 Returns:
51 str: An RSS document representing the given data.
52 """
53 request = system.get('request')
54 if request is not None:
55 response = request.response
56 ct = response.content_type
57 if ct == response.default_content_type:
58 response.content_type = 'application/rss+xml'
59
60 if 'updates' in data:
61 key = 'updates'
62 feed_title = 'Released updates'
63 elif 'users' in data:
64 key = 'users'
65 feed_title = 'Bodhi users'
66 elif 'comments' in data:
67 key = 'comments'
68 feed_title = 'User comments'
69 elif 'overrides' in data:
70 key = 'overrides'
71 feed_title = 'Update overrides'
72 else:
73 # This is a request we don't know how to render. Let's return BadRequest and log.
74 log.debug('Unable to render RSS feed for data: %s', data)
75 # See if we have a request so we can set a code without raising an Exception
76 if request is not None:
77 response.status = HTTPBadRequest.code
78 return 'Invalid RSS feed request'
79 else:
80 raise HTTPBadRequest('Invalid RSS feed request')
81
82 feed_description_list = []
83 for k in request.GET.keys():
84 feed_description_list.append('%s(%s)' % (k, request.GET[k]))
85 if feed_description_list:
86 feed_description = 'Filtered on: ' + ', '.join(feed_description_list)
87 else:
88 feed_description = "All %s" % (key)
89
90 feed = FeedGenerator()
91 feed.title(feed_title)
92 feed.link(href=request.url, rel='self')
93 feed.description(feed_description)
94 feed.language('en')
95
96 def linker(route, param, key):
97 def link_dict(obj):
98 return dict(href=request.route_url(route, **{param: obj[key]}))
99 return link_dict
100
101 getters = {
102 'updates': {
103 'title': operator.itemgetter('alias'),
104 'link': linker('update', 'id', 'alias'),
105 'description': operator.itemgetter('notes'),
106 'pubDate': lambda obj: utc.localize(obj['date_submitted']),
107 },
108 'users': {
109 'title': operator.itemgetter('name'),
110 'link': linker('user', 'name', 'name'),
111 'description': operator.itemgetter('name'),
112 },
113 'comments': {
114 'title': operator.itemgetter('rss_title'),
115 'link': linker('comment', 'id', 'id'),
116 'description': operator.itemgetter('text'),
117 'pubDate': lambda obj: utc.localize(obj['timestamp']),
118 },
119 'overrides': {
120 'title': operator.itemgetter('nvr'),
121 'link': linker('override', 'nvr', 'nvr'),
122 'description': operator.itemgetter('notes'),
123 'pubDate': lambda obj: utc.localize(obj['submission_date']),
124 },
125 }
126
127 for value in data[key]:
128 feed_item = feed.add_item()
129 for name, getter in getters[key].items():
130 # Because we have to use methods to fill feed entry attributes,
131 # it's done by getting methods by name and calling them
132 # on the same line.
133 getattr(feed_item, name)(getter(value))
134
135 return feed.rss_str()
136
137 return render
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/server/renderers.py b/bodhi/server/renderers.py
--- a/bodhi/server/renderers.py
+++ b/bodhi/server/renderers.py
@@ -23,6 +23,8 @@
from feedgen.feed import FeedGenerator
from pyramid.exceptions import HTTPBadRequest
+from bodhi.server.util import markup
+
log = logging.getLogger(__name__)
@@ -98,11 +100,44 @@
return dict(href=request.route_url(route, **{param: obj[key]}))
return link_dict
+ def describe_update(alias, notes, builds):
+ """
+ Wrap calls to operator.itemgetter to retrieve notes and builds list.
+
+ Methods are used to fill feed entry values, so we must use a wrapper
+ to get an HTML formatted description from the `notes` and the `builds`
+ properties of the update.
+
+ For example:
+ getter = describe_update(operator.itemgetter('notes'),operator.itemgetter('builds'))
+ description_value = getter(update_data)
+
+ Args:
+ alias (operator.itemgetter): A callable object which returns update alias
+ as string.
+ notes (operator.itemgetter): A callable object which returns update notes
+ as string.
+ builds (operator.itemgetter): A callable object which returns a list of builds
+ associated to the update.
+ Returns:
+ function: A function which accepts a dict representing an update as parameter.
+ """
+ def describe(*args, **kwargs):
+ text = f'# {alias(*args, **kwargs)}\n'
+ text += f'## Packages in this update:\n'
+ for p in builds(*args, **kwargs):
+ text += f'* {p.nvr}\n'
+ text += f'## Update description:\n{notes(*args, **kwargs)}'
+ return markup(None, text)
+ return describe
+
getters = {
'updates': {
- 'title': operator.itemgetter('alias'),
+ 'title': operator.itemgetter('title'),
'link': linker('update', 'id', 'alias'),
- 'description': operator.itemgetter('notes'),
+ 'description': describe_update(operator.itemgetter('alias'),
+ operator.itemgetter('notes'),
+ operator.itemgetter('builds')),
'pubDate': lambda obj: utc.localize(obj['date_submitted']),
},
'users': {
| {"golden_diff": "diff --git a/bodhi/server/renderers.py b/bodhi/server/renderers.py\n--- a/bodhi/server/renderers.py\n+++ b/bodhi/server/renderers.py\n@@ -23,6 +23,8 @@\n from feedgen.feed import FeedGenerator\n from pyramid.exceptions import HTTPBadRequest\n \n+from bodhi.server.util import markup\n+\n \n log = logging.getLogger(__name__)\n \n@@ -98,11 +100,44 @@\n return dict(href=request.route_url(route, **{param: obj[key]}))\n return link_dict\n \n+ def describe_update(alias, notes, builds):\n+ \"\"\"\n+ Wrap calls to operator.itemgetter to retrieve notes and builds list.\n+\n+ Methods are used to fill feed entry values, so we must use a wrapper\n+ to get an HTML formatted description from the `notes` and the `builds`\n+ properties of the update.\n+\n+ For example:\n+ getter = describe_update(operator.itemgetter('notes'),operator.itemgetter('builds'))\n+ description_value = getter(update_data)\n+\n+ Args:\n+ alias (operator.itemgetter): A callable object which returns update alias\n+ as string.\n+ notes (operator.itemgetter): A callable object which returns update notes\n+ as string.\n+ builds (operator.itemgetter): A callable object which returns a list of builds\n+ associated to the update.\n+ Returns:\n+ function: A function which accepts a dict representing an update as parameter.\n+ \"\"\"\n+ def describe(*args, **kwargs):\n+ text = f'# {alias(*args, **kwargs)}\\n'\n+ text += f'## Packages in this update:\\n'\n+ for p in builds(*args, **kwargs):\n+ text += f'* {p.nvr}\\n'\n+ text += f'## Update description:\\n{notes(*args, **kwargs)}'\n+ return markup(None, text)\n+ return describe\n+\n getters = {\n 'updates': {\n- 'title': operator.itemgetter('alias'),\n+ 'title': operator.itemgetter('title'),\n 'link': linker('update', 'id', 'alias'),\n- 'description': operator.itemgetter('notes'),\n+ 'description': describe_update(operator.itemgetter('alias'),\n+ operator.itemgetter('notes'),\n+ operator.itemgetter('builds')),\n 'pubDate': lambda obj: utc.localize(obj['date_submitted']),\n },\n 'users': {\n", "issue": "RSS feed gives no information about packages build\nSince commit 60dc56c that RSS feed doesn't give information about the packages affected in a build.\r\nExample from [Fedora30 Stable - RSS](https://bodhi.fedoraproject.org/rss/updates/?releases=F30&status=stable) :\r\n```xml\r\n<item>\r\n <title>FEDORA-2019-59d394e0fd</title>\r\n <link>https://bodhi.fedoraproject.org/updates/createrepo_c-0.14.1-1.fc30</link>\r\n <description>- Update to 0.14.1\r\n- Add --pkgorigins mode for Koji\r\n- Correct pkg count in headers if there were invalid pkgs (RhBug:1596211)\r\n- Prevent exiting with 0 if errors occur while finalizing repodata.\r\n </description>\r\n <pubDate>Fri, 24 May 2019 12:20:49 +0000</pubDate>\r\n</item>\r\n```\r\n\r\nAlso the link is invalid as in #3248\r\n\r\nLooking to the [Fedora30 Stable - Web UI](https://bodhi.fedoraproject.org/updates/?releases=F30&status=stable) it seems that it should be:\r\n```\r\n <title>createrepo_c-0.14.1-1.fc30</title>\r\n <link>https://bodhi.fedoraproject.org/updates/FEDORA-2019-59d394e0fd</link>\r\n```\n", "before_files": [{"content": "# Copyright \u00a9 2014-2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"Define special view renderers, such as RSS.\"\"\"\nimport logging\nimport operator\n\nfrom pytz import utc\nfrom feedgen.feed import FeedGenerator\nfrom pyramid.exceptions import HTTPBadRequest\n\n\nlog = logging.getLogger(__name__)\n\n\ndef rss(info):\n \"\"\"\n Return a RSS renderer.\n\n Args:\n info (pyramid.renderers.RendererHelper): Unused.\n Returns:\n function: A function that can be used to render a RSS view.\n \"\"\"\n def render(data, system):\n \"\"\"\n Render the given data as an RSS view.\n\n If the request's content type is set to the default, this function will change it to\n application/rss+xml.\n\n Args:\n data (dict): A dictionary describing the information to be rendered. The information can\n be different types of objects, such as updates, users, comments, or overrides.\n system (pyramid.events.BeforeRender): Used to get the current request.\n Returns:\n str: An RSS document representing the given data.\n \"\"\"\n request = system.get('request')\n if request is not None:\n response = request.response\n ct = response.content_type\n if ct == response.default_content_type:\n response.content_type = 'application/rss+xml'\n\n if 'updates' in data:\n key = 'updates'\n feed_title = 'Released updates'\n elif 'users' in data:\n key = 'users'\n feed_title = 'Bodhi users'\n elif 'comments' in data:\n key = 'comments'\n feed_title = 'User comments'\n elif 'overrides' in data:\n key = 'overrides'\n feed_title = 'Update overrides'\n else:\n # This is a request we don't know how to render. Let's return BadRequest and log.\n log.debug('Unable to render RSS feed for data: %s', data)\n # See if we have a request so we can set a code without raising an Exception\n if request is not None:\n response.status = HTTPBadRequest.code\n return 'Invalid RSS feed request'\n else:\n raise HTTPBadRequest('Invalid RSS feed request')\n\n feed_description_list = []\n for k in request.GET.keys():\n feed_description_list.append('%s(%s)' % (k, request.GET[k]))\n if feed_description_list:\n feed_description = 'Filtered on: ' + ', '.join(feed_description_list)\n else:\n feed_description = \"All %s\" % (key)\n\n feed = FeedGenerator()\n feed.title(feed_title)\n feed.link(href=request.url, rel='self')\n feed.description(feed_description)\n feed.language('en')\n\n def linker(route, param, key):\n def link_dict(obj):\n return dict(href=request.route_url(route, **{param: obj[key]}))\n return link_dict\n\n getters = {\n 'updates': {\n 'title': operator.itemgetter('alias'),\n 'link': linker('update', 'id', 'alias'),\n 'description': operator.itemgetter('notes'),\n 'pubDate': lambda obj: utc.localize(obj['date_submitted']),\n },\n 'users': {\n 'title': operator.itemgetter('name'),\n 'link': linker('user', 'name', 'name'),\n 'description': operator.itemgetter('name'),\n },\n 'comments': {\n 'title': operator.itemgetter('rss_title'),\n 'link': linker('comment', 'id', 'id'),\n 'description': operator.itemgetter('text'),\n 'pubDate': lambda obj: utc.localize(obj['timestamp']),\n },\n 'overrides': {\n 'title': operator.itemgetter('nvr'),\n 'link': linker('override', 'nvr', 'nvr'),\n 'description': operator.itemgetter('notes'),\n 'pubDate': lambda obj: utc.localize(obj['submission_date']),\n },\n }\n\n for value in data[key]:\n feed_item = feed.add_item()\n for name, getter in getters[key].items():\n # Because we have to use methods to fill feed entry attributes,\n # it's done by getting methods by name and calling them\n # on the same line.\n getattr(feed_item, name)(getter(value))\n\n return feed.rss_str()\n\n return render\n", "path": "bodhi/server/renderers.py"}], "after_files": [{"content": "# Copyright \u00a9 2014-2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"Define special view renderers, such as RSS.\"\"\"\nimport logging\nimport operator\n\nfrom pytz import utc\nfrom feedgen.feed import FeedGenerator\nfrom pyramid.exceptions import HTTPBadRequest\n\nfrom bodhi.server.util import markup\n\n\nlog = logging.getLogger(__name__)\n\n\ndef rss(info):\n \"\"\"\n Return a RSS renderer.\n\n Args:\n info (pyramid.renderers.RendererHelper): Unused.\n Returns:\n function: A function that can be used to render a RSS view.\n \"\"\"\n def render(data, system):\n \"\"\"\n Render the given data as an RSS view.\n\n If the request's content type is set to the default, this function will change it to\n application/rss+xml.\n\n Args:\n data (dict): A dictionary describing the information to be rendered. The information can\n be different types of objects, such as updates, users, comments, or overrides.\n system (pyramid.events.BeforeRender): Used to get the current request.\n Returns:\n str: An RSS document representing the given data.\n \"\"\"\n request = system.get('request')\n if request is not None:\n response = request.response\n ct = response.content_type\n if ct == response.default_content_type:\n response.content_type = 'application/rss+xml'\n\n if 'updates' in data:\n key = 'updates'\n feed_title = 'Released updates'\n elif 'users' in data:\n key = 'users'\n feed_title = 'Bodhi users'\n elif 'comments' in data:\n key = 'comments'\n feed_title = 'User comments'\n elif 'overrides' in data:\n key = 'overrides'\n feed_title = 'Update overrides'\n else:\n # This is a request we don't know how to render. Let's return BadRequest and log.\n log.debug('Unable to render RSS feed for data: %s', data)\n # See if we have a request so we can set a code without raising an Exception\n if request is not None:\n response.status = HTTPBadRequest.code\n return 'Invalid RSS feed request'\n else:\n raise HTTPBadRequest('Invalid RSS feed request')\n\n feed_description_list = []\n for k in request.GET.keys():\n feed_description_list.append('%s(%s)' % (k, request.GET[k]))\n if feed_description_list:\n feed_description = 'Filtered on: ' + ', '.join(feed_description_list)\n else:\n feed_description = \"All %s\" % (key)\n\n feed = FeedGenerator()\n feed.title(feed_title)\n feed.link(href=request.url, rel='self')\n feed.description(feed_description)\n feed.language('en')\n\n def linker(route, param, key):\n def link_dict(obj):\n return dict(href=request.route_url(route, **{param: obj[key]}))\n return link_dict\n\n def describe_update(alias, notes, builds):\n \"\"\"\n Wrap calls to operator.itemgetter to retrieve notes and builds list.\n\n Methods are used to fill feed entry values, so we must use a wrapper\n to get an HTML formatted description from the `notes` and the `builds`\n properties of the update.\n\n For example:\n getter = describe_update(operator.itemgetter('notes'),operator.itemgetter('builds'))\n description_value = getter(update_data)\n\n Args:\n alias (operator.itemgetter): A callable object which returns update alias\n as string.\n notes (operator.itemgetter): A callable object which returns update notes\n as string.\n builds (operator.itemgetter): A callable object which returns a list of builds\n associated to the update.\n Returns:\n function: A function which accepts a dict representing an update as parameter.\n \"\"\"\n def describe(*args, **kwargs):\n text = f'# {alias(*args, **kwargs)}\\n'\n text += f'## Packages in this update:\\n'\n for p in builds(*args, **kwargs):\n text += f'* {p.nvr}\\n'\n text += f'## Update description:\\n{notes(*args, **kwargs)}'\n return markup(None, text)\n return describe\n\n getters = {\n 'updates': {\n 'title': operator.itemgetter('title'),\n 'link': linker('update', 'id', 'alias'),\n 'description': describe_update(operator.itemgetter('alias'),\n operator.itemgetter('notes'),\n operator.itemgetter('builds')),\n 'pubDate': lambda obj: utc.localize(obj['date_submitted']),\n },\n 'users': {\n 'title': operator.itemgetter('name'),\n 'link': linker('user', 'name', 'name'),\n 'description': operator.itemgetter('name'),\n },\n 'comments': {\n 'title': operator.itemgetter('rss_title'),\n 'link': linker('comment', 'id', 'id'),\n 'description': operator.itemgetter('text'),\n 'pubDate': lambda obj: utc.localize(obj['timestamp']),\n },\n 'overrides': {\n 'title': operator.itemgetter('nvr'),\n 'link': linker('override', 'nvr', 'nvr'),\n 'description': operator.itemgetter('notes'),\n 'pubDate': lambda obj: utc.localize(obj['submission_date']),\n },\n }\n\n for value in data[key]:\n feed_item = feed.add_item()\n for name, getter in getters[key].items():\n # Because we have to use methods to fill feed entry attributes,\n # it's done by getting methods by name and calling them\n # on the same line.\n getattr(feed_item, name)(getter(value))\n\n return feed.rss_str()\n\n return render\n", "path": "bodhi/server/renderers.py"}]} | 2,030 | 536 |
gh_patches_debug_36153 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1556 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove Python 2 syntax from events examples in comments
# Description
In `pyhf.events` there are a two examples of Python 2 syntax being used for
https://github.com/scikit-hep/pyhf/blob/29c3df0e23a428004a065aed61cefb6a526a7332/src/pyhf/events.py#L46-L53
and
https://github.com/scikit-hep/pyhf/blob/29c3df0e23a428004a065aed61cefb6a526a7332/src/pyhf/events.py#L69-L87
These examples should be updated to use Python 3 syntax.
Also the examples are wrong themselves. For example, the first example should be
```python
>>> import pyhf
>>> @pyhf.events.subscribe('myevent')
... def test(a,b):
... print(a+b)
...
>>> pyhf.events.trigger("myevent")(1,2)
3
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/events.py`
Content:
```
1 import weakref
2 from functools import wraps
3
4 __events = {}
5 __disabled_events = set()
6
7 __all__ = [
8 "Callables",
9 "disable",
10 "enable",
11 "noop",
12 "register",
13 "subscribe",
14 "trigger",
15 ]
16
17
18 def __dir__():
19 return __all__
20
21
22 def noop(*args, **kwargs):
23 pass
24
25
26 class Callables:
27 def __init__(self):
28 self._callbacks = []
29
30 @property
31 def callbacks(self):
32 """
33 Get the current list of living callbacks.
34 """
35 self._flush()
36 return self._callbacks
37
38 def append(self, callback):
39 """
40 Append a new bound method as a callback to the list of callables.
41 """
42 try:
43 # methods
44 callback_ref = weakref.ref(callback.__func__), weakref.ref(
45 callback.__self__
46 )
47 except AttributeError:
48 callback_ref = weakref.ref(callback), None
49 self._callbacks.append(callback_ref)
50
51 def _flush(self):
52 """
53 Flush the list of callbacks with those who are weakly-referencing deleted objects.
54
55 Note: must interact with the self._callbacks directly, and not
56 self.callbacks, to avoid infinite recursion.
57 """
58 _callbacks = []
59 for func, arg in self._callbacks:
60 if arg is not None:
61 arg_ref = arg()
62 if arg_ref is None:
63 continue
64 _callbacks.append((func, arg))
65 self._callbacks = _callbacks
66
67 def __call__(self, *args, **kwargs):
68 for func, arg in self.callbacks:
69 # weakref: needs to be de-ref'd first before calling
70 if arg is not None:
71 func()(arg(), *args, **kwargs)
72 else:
73 func()(*args, **kwargs)
74
75 def __iter__(self):
76 return iter(self.callbacks)
77
78 def __getitem__(self, index):
79 return self.callbacks[index]
80
81 def __len__(self):
82 return len(self.callbacks)
83
84 def __repr__(self):
85 return f"Callables({self.callbacks})"
86
87
88 def subscribe(event):
89 """
90 Subscribe a function or object method as a callback to an event.
91
92 Note: this is meant to be used as a decorator.
93
94 Args:
95 event (:obj:`str`): The name of the event to subscribe to.
96
97 Returns:
98 :obj:`function`: Decorated function.
99 """
100 # Example:
101 #
102 # >>> @pyhf.events.subscribe('myevent')
103 # ... def test(a,b):
104 # ... print a+b
105 # ...
106 # >>> pyhf.events.trigger_myevent(1,2)
107 # 3
108 global __events
109
110 def __decorator(func):
111 __events.setdefault(event, Callables()).append(func)
112 return func
113
114 return __decorator
115
116
117 def register(event):
118 """
119 Register a function or object method to trigger an event. This creates two
120 events: ``{event_name}::before`` and ``{event_name}::after``.
121
122 Note: this is meant to be used as a decorator.
123
124 Args:
125 event (:obj:`str`): The name of the event to subscribe to.
126
127 Returns:
128 :obj:`function`: Decorated function.
129
130 """
131 # Examples:
132 #
133 # >>> @pyhf.events.register('test_func')
134 # ... def test(a,b):
135 # ... print a+b
136 # ...
137 # >>> @pyhf.events.subscribe('test_func::before')
138 # ... def precall():
139 # ... print 'before call'
140 # ...
141 # >>> @pyhf.events.subscribe('test_func::after')
142 # ... def postcall():
143 # ... print 'after call'
144 # ...
145 # >>> test(1,2)
146 # "before call"
147 # 3
148 # "after call"
149 # >>>
150
151 def _register(func):
152 @wraps(func)
153 def register_wrapper(*args, **kwargs):
154 trigger(f"{event:s}::before")()
155 result = func(*args, **kwargs)
156 trigger(f"{event:s}::after")()
157 return result
158
159 return register_wrapper
160
161 return _register
162
163
164 def trigger(event):
165 """
166 Trigger an event if not disabled.
167 """
168 global __events, __disabled_events, noop
169 is_noop = bool(event in __disabled_events or event not in __events)
170 return noop if is_noop else __events.get(event)
171
172
173 def disable(event):
174 """
175 Disable an event from firing.
176 """
177 global __disabled_events
178 __disabled_events.add(event)
179
180
181 def enable(event):
182 """
183 Enable an event to be fired if disabled.
184 """
185 global __disabled_events
186 __disabled_events.remove(event)
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pyhf/events.py b/src/pyhf/events.py
--- a/src/pyhf/events.py
+++ b/src/pyhf/events.py
@@ -89,22 +89,27 @@
"""
Subscribe a function or object method as a callback to an event.
- Note: this is meant to be used as a decorator.
+ .. note::
+
+ This is meant to be used as a decorator.
Args:
event (:obj:`str`): The name of the event to subscribe to.
Returns:
:obj:`function`: Decorated function.
+
+ Example:
+ >>> import pyhf
+ >>> @pyhf.events.subscribe("myevent")
+ ... def test(a, b):
+ ... print(a + b)
+ ...
+ >>> pyhf.events.trigger("myevent")(1, 2)
+ 3
+
"""
- # Example:
- #
- # >>> @pyhf.events.subscribe('myevent')
- # ... def test(a,b):
- # ... print a+b
- # ...
- # >>> pyhf.events.trigger_myevent(1,2)
- # 3
+
global __events
def __decorator(func):
@@ -119,7 +124,9 @@
Register a function or object method to trigger an event. This creates two
events: ``{event_name}::before`` and ``{event_name}::after``.
- Note: this is meant to be used as a decorator.
+ .. note::
+
+ This is meant to be used as a decorator.
Args:
event (:obj:`str`): The name of the event to subscribe to.
@@ -127,26 +134,26 @@
Returns:
:obj:`function`: Decorated function.
+ Example:
+ >>> import pyhf
+ >>> @pyhf.events.register("test_func")
+ ... def test(a, b):
+ ... print(a + b)
+ ...
+ >>> @pyhf.events.subscribe("test_func::before")
+ ... def precall():
+ ... print("before call")
+ ...
+ >>> @pyhf.events.subscribe("test_func::after")
+ ... def postcall():
+ ... print("after call")
+ ...
+ >>> test(1, 2)
+ before call
+ 3
+ after call
+
"""
- # Examples:
- #
- # >>> @pyhf.events.register('test_func')
- # ... def test(a,b):
- # ... print a+b
- # ...
- # >>> @pyhf.events.subscribe('test_func::before')
- # ... def precall():
- # ... print 'before call'
- # ...
- # >>> @pyhf.events.subscribe('test_func::after')
- # ... def postcall():
- # ... print 'after call'
- # ...
- # >>> test(1,2)
- # "before call"
- # 3
- # "after call"
- # >>>
def _register(func):
@wraps(func)
| {"golden_diff": "diff --git a/src/pyhf/events.py b/src/pyhf/events.py\n--- a/src/pyhf/events.py\n+++ b/src/pyhf/events.py\n@@ -89,22 +89,27 @@\n \"\"\"\n Subscribe a function or object method as a callback to an event.\n \n- Note: this is meant to be used as a decorator.\n+ .. note::\n+\n+ This is meant to be used as a decorator.\n \n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n \n Returns:\n :obj:`function`: Decorated function.\n+\n+ Example:\n+ >>> import pyhf\n+ >>> @pyhf.events.subscribe(\"myevent\")\n+ ... def test(a, b):\n+ ... print(a + b)\n+ ...\n+ >>> pyhf.events.trigger(\"myevent\")(1, 2)\n+ 3\n+\n \"\"\"\n- # Example:\n- #\n- # >>> @pyhf.events.subscribe('myevent')\n- # ... def test(a,b):\n- # ... print a+b\n- # ...\n- # >>> pyhf.events.trigger_myevent(1,2)\n- # 3\n+\n global __events\n \n def __decorator(func):\n@@ -119,7 +124,9 @@\n Register a function or object method to trigger an event. This creates two\n events: ``{event_name}::before`` and ``{event_name}::after``.\n \n- Note: this is meant to be used as a decorator.\n+ .. note::\n+\n+ This is meant to be used as a decorator.\n \n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n@@ -127,26 +134,26 @@\n Returns:\n :obj:`function`: Decorated function.\n \n+ Example:\n+ >>> import pyhf\n+ >>> @pyhf.events.register(\"test_func\")\n+ ... def test(a, b):\n+ ... print(a + b)\n+ ...\n+ >>> @pyhf.events.subscribe(\"test_func::before\")\n+ ... def precall():\n+ ... print(\"before call\")\n+ ...\n+ >>> @pyhf.events.subscribe(\"test_func::after\")\n+ ... def postcall():\n+ ... print(\"after call\")\n+ ...\n+ >>> test(1, 2)\n+ before call\n+ 3\n+ after call\n+\n \"\"\"\n- # Examples:\n- #\n- # >>> @pyhf.events.register('test_func')\n- # ... def test(a,b):\n- # ... print a+b\n- # ...\n- # >>> @pyhf.events.subscribe('test_func::before')\n- # ... def precall():\n- # ... print 'before call'\n- # ...\n- # >>> @pyhf.events.subscribe('test_func::after')\n- # ... def postcall():\n- # ... print 'after call'\n- # ...\n- # >>> test(1,2)\n- # \"before call\"\n- # 3\n- # \"after call\"\n- # >>>\n \n def _register(func):\n @wraps(func)\n", "issue": "Remove Python 2 syntax from events examples in comments\n# Description\r\n\r\nIn `pyhf.events` there are a two examples of Python 2 syntax being used for \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/29c3df0e23a428004a065aed61cefb6a526a7332/src/pyhf/events.py#L46-L53\r\n\r\nand \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/29c3df0e23a428004a065aed61cefb6a526a7332/src/pyhf/events.py#L69-L87\r\n\r\nThese examples should be updated to use Python 3 syntax.\r\n\r\nAlso the examples are wrong themselves. For example, the first example should be\r\n\r\n```python\r\n>>> import pyhf\r\n>>> @pyhf.events.subscribe('myevent')\r\n... def test(a,b):\r\n... print(a+b)\r\n... \r\n>>> pyhf.events.trigger(\"myevent\")(1,2)\r\n3\r\n```\n", "before_files": [{"content": "import weakref\nfrom functools import wraps\n\n__events = {}\n__disabled_events = set()\n\n__all__ = [\n \"Callables\",\n \"disable\",\n \"enable\",\n \"noop\",\n \"register\",\n \"subscribe\",\n \"trigger\",\n]\n\n\ndef __dir__():\n return __all__\n\n\ndef noop(*args, **kwargs):\n pass\n\n\nclass Callables:\n def __init__(self):\n self._callbacks = []\n\n @property\n def callbacks(self):\n \"\"\"\n Get the current list of living callbacks.\n \"\"\"\n self._flush()\n return self._callbacks\n\n def append(self, callback):\n \"\"\"\n Append a new bound method as a callback to the list of callables.\n \"\"\"\n try:\n # methods\n callback_ref = weakref.ref(callback.__func__), weakref.ref(\n callback.__self__\n )\n except AttributeError:\n callback_ref = weakref.ref(callback), None\n self._callbacks.append(callback_ref)\n\n def _flush(self):\n \"\"\"\n Flush the list of callbacks with those who are weakly-referencing deleted objects.\n\n Note: must interact with the self._callbacks directly, and not\n self.callbacks, to avoid infinite recursion.\n \"\"\"\n _callbacks = []\n for func, arg in self._callbacks:\n if arg is not None:\n arg_ref = arg()\n if arg_ref is None:\n continue\n _callbacks.append((func, arg))\n self._callbacks = _callbacks\n\n def __call__(self, *args, **kwargs):\n for func, arg in self.callbacks:\n # weakref: needs to be de-ref'd first before calling\n if arg is not None:\n func()(arg(), *args, **kwargs)\n else:\n func()(*args, **kwargs)\n\n def __iter__(self):\n return iter(self.callbacks)\n\n def __getitem__(self, index):\n return self.callbacks[index]\n\n def __len__(self):\n return len(self.callbacks)\n\n def __repr__(self):\n return f\"Callables({self.callbacks})\"\n\n\ndef subscribe(event):\n \"\"\"\n Subscribe a function or object method as a callback to an event.\n\n Note: this is meant to be used as a decorator.\n\n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n\n Returns:\n :obj:`function`: Decorated function.\n \"\"\"\n # Example:\n #\n # >>> @pyhf.events.subscribe('myevent')\n # ... def test(a,b):\n # ... print a+b\n # ...\n # >>> pyhf.events.trigger_myevent(1,2)\n # 3\n global __events\n\n def __decorator(func):\n __events.setdefault(event, Callables()).append(func)\n return func\n\n return __decorator\n\n\ndef register(event):\n \"\"\"\n Register a function or object method to trigger an event. This creates two\n events: ``{event_name}::before`` and ``{event_name}::after``.\n\n Note: this is meant to be used as a decorator.\n\n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n\n Returns:\n :obj:`function`: Decorated function.\n\n \"\"\"\n # Examples:\n #\n # >>> @pyhf.events.register('test_func')\n # ... def test(a,b):\n # ... print a+b\n # ...\n # >>> @pyhf.events.subscribe('test_func::before')\n # ... def precall():\n # ... print 'before call'\n # ...\n # >>> @pyhf.events.subscribe('test_func::after')\n # ... def postcall():\n # ... print 'after call'\n # ...\n # >>> test(1,2)\n # \"before call\"\n # 3\n # \"after call\"\n # >>>\n\n def _register(func):\n @wraps(func)\n def register_wrapper(*args, **kwargs):\n trigger(f\"{event:s}::before\")()\n result = func(*args, **kwargs)\n trigger(f\"{event:s}::after\")()\n return result\n\n return register_wrapper\n\n return _register\n\n\ndef trigger(event):\n \"\"\"\n Trigger an event if not disabled.\n \"\"\"\n global __events, __disabled_events, noop\n is_noop = bool(event in __disabled_events or event not in __events)\n return noop if is_noop else __events.get(event)\n\n\ndef disable(event):\n \"\"\"\n Disable an event from firing.\n \"\"\"\n global __disabled_events\n __disabled_events.add(event)\n\n\ndef enable(event):\n \"\"\"\n Enable an event to be fired if disabled.\n \"\"\"\n global __disabled_events\n __disabled_events.remove(event)\n", "path": "src/pyhf/events.py"}], "after_files": [{"content": "import weakref\nfrom functools import wraps\n\n__events = {}\n__disabled_events = set()\n\n__all__ = [\n \"Callables\",\n \"disable\",\n \"enable\",\n \"noop\",\n \"register\",\n \"subscribe\",\n \"trigger\",\n]\n\n\ndef __dir__():\n return __all__\n\n\ndef noop(*args, **kwargs):\n pass\n\n\nclass Callables:\n def __init__(self):\n self._callbacks = []\n\n @property\n def callbacks(self):\n \"\"\"\n Get the current list of living callbacks.\n \"\"\"\n self._flush()\n return self._callbacks\n\n def append(self, callback):\n \"\"\"\n Append a new bound method as a callback to the list of callables.\n \"\"\"\n try:\n # methods\n callback_ref = weakref.ref(callback.__func__), weakref.ref(\n callback.__self__\n )\n except AttributeError:\n callback_ref = weakref.ref(callback), None\n self._callbacks.append(callback_ref)\n\n def _flush(self):\n \"\"\"\n Flush the list of callbacks with those who are weakly-referencing deleted objects.\n\n Note: must interact with the self._callbacks directly, and not\n self.callbacks, to avoid infinite recursion.\n \"\"\"\n _callbacks = []\n for func, arg in self._callbacks:\n if arg is not None:\n arg_ref = arg()\n if arg_ref is None:\n continue\n _callbacks.append((func, arg))\n self._callbacks = _callbacks\n\n def __call__(self, *args, **kwargs):\n for func, arg in self.callbacks:\n # weakref: needs to be de-ref'd first before calling\n if arg is not None:\n func()(arg(), *args, **kwargs)\n else:\n func()(*args, **kwargs)\n\n def __iter__(self):\n return iter(self.callbacks)\n\n def __getitem__(self, index):\n return self.callbacks[index]\n\n def __len__(self):\n return len(self.callbacks)\n\n def __repr__(self):\n return f\"Callables({self.callbacks})\"\n\n\ndef subscribe(event):\n \"\"\"\n Subscribe a function or object method as a callback to an event.\n\n .. note::\n\n This is meant to be used as a decorator.\n\n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n\n Returns:\n :obj:`function`: Decorated function.\n\n Example:\n >>> import pyhf\n >>> @pyhf.events.subscribe(\"myevent\")\n ... def test(a, b):\n ... print(a + b)\n ...\n >>> pyhf.events.trigger(\"myevent\")(1, 2)\n 3\n\n \"\"\"\n\n global __events\n\n def __decorator(func):\n __events.setdefault(event, Callables()).append(func)\n return func\n\n return __decorator\n\n\ndef register(event):\n \"\"\"\n Register a function or object method to trigger an event. This creates two\n events: ``{event_name}::before`` and ``{event_name}::after``.\n\n .. note::\n\n This is meant to be used as a decorator.\n\n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n\n Returns:\n :obj:`function`: Decorated function.\n\n Example:\n >>> import pyhf\n >>> @pyhf.events.register(\"test_func\")\n ... def test(a, b):\n ... print(a + b)\n ...\n >>> @pyhf.events.subscribe(\"test_func::before\")\n ... def precall():\n ... print(\"before call\")\n ...\n >>> @pyhf.events.subscribe(\"test_func::after\")\n ... def postcall():\n ... print(\"after call\")\n ...\n >>> test(1, 2)\n before call\n 3\n after call\n\n \"\"\"\n\n def _register(func):\n @wraps(func)\n def register_wrapper(*args, **kwargs):\n trigger(f\"{event:s}::before\")()\n result = func(*args, **kwargs)\n trigger(f\"{event:s}::after\")()\n return result\n\n return register_wrapper\n\n return _register\n\n\ndef trigger(event):\n \"\"\"\n Trigger an event if not disabled.\n \"\"\"\n global __events, __disabled_events, noop\n is_noop = bool(event in __disabled_events or event not in __events)\n return noop if is_noop else __events.get(event)\n\n\ndef disable(event):\n \"\"\"\n Disable an event from firing.\n \"\"\"\n global __disabled_events\n __disabled_events.add(event)\n\n\ndef enable(event):\n \"\"\"\n Enable an event to be fired if disabled.\n \"\"\"\n global __disabled_events\n __disabled_events.remove(event)\n", "path": "src/pyhf/events.py"}]} | 1,988 | 715 |
gh_patches_debug_11482 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-228 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Capture details of Celery Chains and Chords
Celery has some more advanced features to join multiple jobs into one. The agent needs testing and investigation into how they can be best instrumented.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/celery.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import datetime as dt
5
6 from celery.signals import before_task_publish, task_postrun, task_prerun
7
8 import scout_apm.core
9 from scout_apm.compat import datetime_to_timestamp
10 from scout_apm.core.tracked_request import TrackedRequest
11
12
13 def before_publish_callback(headers=None, properties=None, **kwargs):
14 if "scout_task_start" not in headers:
15 headers["scout_task_start"] = datetime_to_timestamp(dt.datetime.utcnow())
16
17
18 def prerun_callback(task=None, **kwargs):
19 tracked_request = TrackedRequest.instance()
20 tracked_request.mark_real_request()
21
22 start = getattr(task.request, "scout_task_start", None)
23 if start is not None:
24 now = datetime_to_timestamp(dt.datetime.utcnow())
25 try:
26 queue_time = now - start
27 except TypeError:
28 pass
29 else:
30 tracked_request.tag("queue_time", queue_time)
31
32 delivery_info = task.request.delivery_info
33 tracked_request.tag("is_eager", delivery_info.get("is_eager", False))
34 tracked_request.tag("exchange", delivery_info.get("exchange", "unknown"))
35 tracked_request.tag("routing_key", delivery_info.get("routing_key", "unknown"))
36 tracked_request.tag("queue", delivery_info.get("queue", "unknown"))
37
38 tracked_request.start_span(operation=("Job/" + task.name))
39
40
41 def postrun_callback(task=None, **kwargs):
42 tracked_request = TrackedRequest.instance()
43 tracked_request.stop_span()
44
45
46 def install():
47 installed = scout_apm.core.install()
48 if not installed:
49 return
50
51 before_task_publish.connect(before_publish_callback)
52 task_prerun.connect(prerun_callback)
53 task_postrun.connect(postrun_callback)
54
55
56 def uninstall():
57 before_task_publish.disconnect(before_publish_callback)
58 task_prerun.disconnect(prerun_callback)
59 task_postrun.disconnect(postrun_callback)
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py
--- a/src/scout_apm/celery.py
+++ b/src/scout_apm/celery.py
@@ -29,6 +29,13 @@
else:
tracked_request.tag("queue_time", queue_time)
+ task_id = getattr(task.request, "id", None)
+ if task_id:
+ tracked_request.tag("task_id", task_id)
+ parent_task_id = getattr(task.request, "parent_id", None)
+ if parent_task_id:
+ tracked_request.tag("parent_task_id", parent_task_id)
+
delivery_info = task.request.delivery_info
tracked_request.tag("is_eager", delivery_info.get("is_eager", False))
tracked_request.tag("exchange", delivery_info.get("exchange", "unknown"))
| {"golden_diff": "diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py\n--- a/src/scout_apm/celery.py\n+++ b/src/scout_apm/celery.py\n@@ -29,6 +29,13 @@\n else:\n tracked_request.tag(\"queue_time\", queue_time)\n \n+ task_id = getattr(task.request, \"id\", None)\n+ if task_id:\n+ tracked_request.tag(\"task_id\", task_id)\n+ parent_task_id = getattr(task.request, \"parent_id\", None)\n+ if parent_task_id:\n+ tracked_request.tag(\"parent_task_id\", parent_task_id)\n+\n delivery_info = task.request.delivery_info\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n", "issue": "Capture details of Celery Chains and Chords\nCelery has some more advanced features to join multiple jobs into one. The agent needs testing and investigation into how they can be best instrumented.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\n\nfrom celery.signals import before_task_publish, task_postrun, task_prerun\n\nimport scout_apm.core\nfrom scout_apm.compat import datetime_to_timestamp\nfrom scout_apm.core.tracked_request import TrackedRequest\n\n\ndef before_publish_callback(headers=None, properties=None, **kwargs):\n if \"scout_task_start\" not in headers:\n headers[\"scout_task_start\"] = datetime_to_timestamp(dt.datetime.utcnow())\n\n\ndef prerun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.mark_real_request()\n\n start = getattr(task.request, \"scout_task_start\", None)\n if start is not None:\n now = datetime_to_timestamp(dt.datetime.utcnow())\n try:\n queue_time = now - start\n except TypeError:\n pass\n else:\n tracked_request.tag(\"queue_time\", queue_time)\n\n delivery_info = task.request.delivery_info\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n\n tracked_request.start_span(operation=(\"Job/\" + task.name))\n\n\ndef postrun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.stop_span()\n\n\ndef install():\n installed = scout_apm.core.install()\n if not installed:\n return\n\n before_task_publish.connect(before_publish_callback)\n task_prerun.connect(prerun_callback)\n task_postrun.connect(postrun_callback)\n\n\ndef uninstall():\n before_task_publish.disconnect(before_publish_callback)\n task_prerun.disconnect(prerun_callback)\n task_postrun.disconnect(postrun_callback)\n", "path": "src/scout_apm/celery.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\n\nfrom celery.signals import before_task_publish, task_postrun, task_prerun\n\nimport scout_apm.core\nfrom scout_apm.compat import datetime_to_timestamp\nfrom scout_apm.core.tracked_request import TrackedRequest\n\n\ndef before_publish_callback(headers=None, properties=None, **kwargs):\n if \"scout_task_start\" not in headers:\n headers[\"scout_task_start\"] = datetime_to_timestamp(dt.datetime.utcnow())\n\n\ndef prerun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.mark_real_request()\n\n start = getattr(task.request, \"scout_task_start\", None)\n if start is not None:\n now = datetime_to_timestamp(dt.datetime.utcnow())\n try:\n queue_time = now - start\n except TypeError:\n pass\n else:\n tracked_request.tag(\"queue_time\", queue_time)\n\n task_id = getattr(task.request, \"id\", None)\n if task_id:\n tracked_request.tag(\"task_id\", task_id)\n parent_task_id = getattr(task.request, \"parent_id\", None)\n if parent_task_id:\n tracked_request.tag(\"parent_task_id\", parent_task_id)\n\n delivery_info = task.request.delivery_info\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n\n tracked_request.start_span(operation=(\"Job/\" + task.name))\n\n\ndef postrun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.stop_span()\n\n\ndef install():\n installed = scout_apm.core.install()\n if not installed:\n return\n\n before_task_publish.connect(before_publish_callback)\n task_prerun.connect(prerun_callback)\n task_postrun.connect(postrun_callback)\n\n\ndef uninstall():\n before_task_publish.disconnect(before_publish_callback)\n task_prerun.disconnect(prerun_callback)\n task_postrun.disconnect(postrun_callback)\n", "path": "src/scout_apm/celery.py"}]} | 837 | 190 |
gh_patches_debug_20353 | rasdani/github-patches | git_diff | WeblateOrg__weblate-10604 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Some languages don't have all strings available for translation
### Describe the issue
My project is here: https://hosted.weblate.org/projects/feeder/android-strings
A few languages Polish, French and Chinese (Simplified), are missing a dozen strings.
One example is the string `other_minutes` which is not available for translation in these languages.
I have tried re-scanning strings and similar with no change.
### I already tried
- [X] I've read and searched [the documentation](https://docs.weblate.org/).
- [X] I've searched for similar issues in this repository.
### Steps to reproduce the behavior
Not sure how to reproduce it but it is happening here :https://hosted.weblate.org/projects/feeder/android-strings
look at string `other_minutes`, it is missing from Polish, French, and Chinese (Simplified)
### Expected behavior
All strings should be available for translation in all languages.
### Screenshots
_No response_
### Exception traceback
_No response_
### How do you run Weblate?
weblate.org service
### Weblate versions
_No response_
### Weblate deploy checks
_No response_
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/addons/cleanup.py`
Content:
```
1 # Copyright © Michal Čihař <[email protected]>
2 #
3 # SPDX-License-Identifier: GPL-3.0-or-later
4
5 from django.utils.translation import gettext_lazy
6
7 from weblate.addons.base import UpdateBaseAddon
8 from weblate.addons.events import EVENT_POST_COMMIT, EVENT_POST_UPDATE, EVENT_PRE_COMMIT
9 from weblate.trans.exceptions import FileParseError
10
11
12 class BaseCleanupAddon(UpdateBaseAddon):
13 @classmethod
14 def can_install(cls, component, user):
15 if not component.has_template():
16 return False
17 return super().can_install(component, user)
18
19
20 class CleanupAddon(BaseCleanupAddon):
21 name = "weblate.cleanup.generic"
22 verbose = gettext_lazy("Cleanup translation files")
23 description = gettext_lazy(
24 "Update all translation files to match the monolingual base file. "
25 "For most file formats, this means removing stale translation keys "
26 "no longer present in the base file."
27 )
28 icon = "eraser.svg"
29 events = (EVENT_PRE_COMMIT, EVENT_POST_UPDATE)
30
31 def update_translations(self, component, previous_head):
32 for translation in self.iterate_translations(component):
33 filenames = translation.store.cleanup_unused()
34 if filenames is None:
35 continue
36 self.extra_files.extend(filenames)
37 translation.store_hash()
38
39 def pre_commit(self, translation, author):
40 if translation.is_source and not translation.component.intermediate:
41 return
42 try:
43 filenames = translation.store.cleanup_unused()
44 except FileParseError:
45 return
46 if filenames is not None:
47 self.extra_files.extend(filenames)
48 translation.store_hash()
49
50
51 class RemoveBlankAddon(BaseCleanupAddon):
52 name = "weblate.cleanup.blank"
53 verbose = gettext_lazy("Remove blank strings")
54 description = gettext_lazy(
55 "Removes strings without a translation from translation files."
56 )
57 events = (EVENT_POST_COMMIT, EVENT_POST_UPDATE)
58 icon = "eraser.svg"
59
60 def update_translations(self, component, previous_head):
61 for translation in self.iterate_translations(component):
62 filenames = translation.store.cleanup_blank()
63 if filenames is None:
64 continue
65 self.extra_files.extend(filenames)
66 translation.store_hash()
67
68 def post_commit(self, component):
69 self.post_update(component, None, skip_push=True)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/weblate/addons/cleanup.py b/weblate/addons/cleanup.py
--- a/weblate/addons/cleanup.py
+++ b/weblate/addons/cleanup.py
@@ -34,7 +34,7 @@
if filenames is None:
continue
self.extra_files.extend(filenames)
- translation.store_hash()
+ # Do not update hash here as this is just before parsing updated files
def pre_commit(self, translation, author):
if translation.is_source and not translation.component.intermediate:
@@ -63,7 +63,9 @@
if filenames is None:
continue
self.extra_files.extend(filenames)
- translation.store_hash()
+ # Do not update hash in post_update, only in post_commit
+ if previous_head == "weblate:post-commit":
+ translation.store_hash()
def post_commit(self, component):
- self.post_update(component, None, skip_push=True)
+ self.post_update(component, "weblate:post-commit", skip_push=True)
| {"golden_diff": "diff --git a/weblate/addons/cleanup.py b/weblate/addons/cleanup.py\n--- a/weblate/addons/cleanup.py\n+++ b/weblate/addons/cleanup.py\n@@ -34,7 +34,7 @@\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n- translation.store_hash()\n+ # Do not update hash here as this is just before parsing updated files\n \n def pre_commit(self, translation, author):\n if translation.is_source and not translation.component.intermediate:\n@@ -63,7 +63,9 @@\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n- translation.store_hash()\n+ # Do not update hash in post_update, only in post_commit\n+ if previous_head == \"weblate:post-commit\":\n+ translation.store_hash()\n \n def post_commit(self, component):\n- self.post_update(component, None, skip_push=True)\n+ self.post_update(component, \"weblate:post-commit\", skip_push=True)\n", "issue": "Some languages don't have all strings available for translation\n### Describe the issue\n\nMy project is here: https://hosted.weblate.org/projects/feeder/android-strings\r\n\r\nA few languages Polish, French and Chinese (Simplified), are missing a dozen strings.\r\n\r\nOne example is the string `other_minutes` which is not available for translation in these languages.\r\n\r\nI have tried re-scanning strings and similar with no change.\n\n### I already tried\n\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\n- [X] I've searched for similar issues in this repository.\n\n### Steps to reproduce the behavior\n\nNot sure how to reproduce it but it is happening here :https://hosted.weblate.org/projects/feeder/android-strings\r\n\r\nlook at string `other_minutes`, it is missing from Polish, French, and Chinese (Simplified)\n\n### Expected behavior\n\nAll strings should be available for translation in all languages.\n\n### Screenshots\n\n_No response_\n\n### Exception traceback\n\n_No response_\n\n### How do you run Weblate?\n\nweblate.org service\n\n### Weblate versions\n\n_No response_\n\n### Weblate deploy checks\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom django.utils.translation import gettext_lazy\n\nfrom weblate.addons.base import UpdateBaseAddon\nfrom weblate.addons.events import EVENT_POST_COMMIT, EVENT_POST_UPDATE, EVENT_PRE_COMMIT\nfrom weblate.trans.exceptions import FileParseError\n\n\nclass BaseCleanupAddon(UpdateBaseAddon):\n @classmethod\n def can_install(cls, component, user):\n if not component.has_template():\n return False\n return super().can_install(component, user)\n\n\nclass CleanupAddon(BaseCleanupAddon):\n name = \"weblate.cleanup.generic\"\n verbose = gettext_lazy(\"Cleanup translation files\")\n description = gettext_lazy(\n \"Update all translation files to match the monolingual base file. \"\n \"For most file formats, this means removing stale translation keys \"\n \"no longer present in the base file.\"\n )\n icon = \"eraser.svg\"\n events = (EVENT_PRE_COMMIT, EVENT_POST_UPDATE)\n\n def update_translations(self, component, previous_head):\n for translation in self.iterate_translations(component):\n filenames = translation.store.cleanup_unused()\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n translation.store_hash()\n\n def pre_commit(self, translation, author):\n if translation.is_source and not translation.component.intermediate:\n return\n try:\n filenames = translation.store.cleanup_unused()\n except FileParseError:\n return\n if filenames is not None:\n self.extra_files.extend(filenames)\n translation.store_hash()\n\n\nclass RemoveBlankAddon(BaseCleanupAddon):\n name = \"weblate.cleanup.blank\"\n verbose = gettext_lazy(\"Remove blank strings\")\n description = gettext_lazy(\n \"Removes strings without a translation from translation files.\"\n )\n events = (EVENT_POST_COMMIT, EVENT_POST_UPDATE)\n icon = \"eraser.svg\"\n\n def update_translations(self, component, previous_head):\n for translation in self.iterate_translations(component):\n filenames = translation.store.cleanup_blank()\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n translation.store_hash()\n\n def post_commit(self, component):\n self.post_update(component, None, skip_push=True)\n", "path": "weblate/addons/cleanup.py"}], "after_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom django.utils.translation import gettext_lazy\n\nfrom weblate.addons.base import UpdateBaseAddon\nfrom weblate.addons.events import EVENT_POST_COMMIT, EVENT_POST_UPDATE, EVENT_PRE_COMMIT\nfrom weblate.trans.exceptions import FileParseError\n\n\nclass BaseCleanupAddon(UpdateBaseAddon):\n @classmethod\n def can_install(cls, component, user):\n if not component.has_template():\n return False\n return super().can_install(component, user)\n\n\nclass CleanupAddon(BaseCleanupAddon):\n name = \"weblate.cleanup.generic\"\n verbose = gettext_lazy(\"Cleanup translation files\")\n description = gettext_lazy(\n \"Update all translation files to match the monolingual base file. \"\n \"For most file formats, this means removing stale translation keys \"\n \"no longer present in the base file.\"\n )\n icon = \"eraser.svg\"\n events = (EVENT_PRE_COMMIT, EVENT_POST_UPDATE)\n\n def update_translations(self, component, previous_head):\n for translation in self.iterate_translations(component):\n filenames = translation.store.cleanup_unused()\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n # Do not update hash here as this is just before parsing updated files\n\n def pre_commit(self, translation, author):\n if translation.is_source and not translation.component.intermediate:\n return\n try:\n filenames = translation.store.cleanup_unused()\n except FileParseError:\n return\n if filenames is not None:\n self.extra_files.extend(filenames)\n translation.store_hash()\n\n\nclass RemoveBlankAddon(BaseCleanupAddon):\n name = \"weblate.cleanup.blank\"\n verbose = gettext_lazy(\"Remove blank strings\")\n description = gettext_lazy(\n \"Removes strings without a translation from translation files.\"\n )\n events = (EVENT_POST_COMMIT, EVENT_POST_UPDATE)\n icon = \"eraser.svg\"\n\n def update_translations(self, component, previous_head):\n for translation in self.iterate_translations(component):\n filenames = translation.store.cleanup_blank()\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n # Do not update hash in post_update, only in post_commit\n if previous_head == \"weblate:post-commit\":\n translation.store_hash()\n\n def post_commit(self, component):\n self.post_update(component, \"weblate:post-commit\", skip_push=True)\n", "path": "weblate/addons/cleanup.py"}]} | 1,143 | 230 |
gh_patches_debug_3202 | rasdani/github-patches | git_diff | hylang__hy-2190 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `project_urls` to `setup.py`
This would allow us to provide links to our GitHub repository etc. in a sidebar on PyPI.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import find_packages, setup
4 import fastentrypoints # Monkey-patches setuptools.
5
6 from get_version import __version__
7
8 os.chdir(os.path.split(os.path.abspath(__file__))[0])
9
10 PKG = "hy"
11
12 long_description = """Hy is a Python <--> Lisp layer. It helps
13 make things work nicer, and lets Python and the Hy lisp variant play
14 nice together. """
15
16 setup(
17 name=PKG,
18 version=__version__,
19 install_requires=[
20 'rply>=0.7.7',
21 'funcparserlib>=1.0.0a0',
22 'colorama',
23 'astor>=0.8 ; python_version < "3.9"',
24 ],
25 python_requires = '>= 3.7, <= 3.10',
26 entry_points={
27 'console_scripts': [
28 'hy = hy.cmdline:hy_main',
29 'hy3 = hy.cmdline:hy_main',
30 'hyc = hy.cmdline:hyc_main',
31 'hyc3 = hy.cmdline:hyc_main',
32 'hy2py = hy.cmdline:hy2py_main',
33 'hy2py3 = hy.cmdline:hy2py_main',
34 ]
35 },
36 packages=find_packages(exclude=['tests*']),
37 package_data={
38 'hy': ['*.hy', '__pycache__/*'],
39 'hy.contrib': ['*.hy', '__pycache__/*'],
40 'hy.core': ['*.hy', '__pycache__/*'],
41 'hy.extra': ['*.hy', '__pycache__/*'],
42 },
43 data_files=[
44 ('get_version', ['get_version.py'])
45 ],
46 author="Paul Tagliamonte",
47 author_email="[email protected]",
48 long_description=long_description,
49 description='Lisp and Python love each other.',
50 license="Expat",
51 url="http://hylang.org/",
52 platforms=['any'],
53 classifiers=[
54 "Development Status :: 4 - Beta",
55 "Intended Audience :: Developers",
56 "License :: DFSG approved",
57 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
58 "Operating System :: OS Independent",
59 "Programming Language :: Lisp",
60 "Programming Language :: Python",
61 "Programming Language :: Python :: 3",
62 "Programming Language :: Python :: 3.7",
63 "Programming Language :: Python :: 3.8",
64 "Programming Language :: Python :: 3.9",
65 "Programming Language :: Python :: 3.10",
66 "Topic :: Software Development :: Code Generators",
67 "Topic :: Software Development :: Compilers",
68 "Topic :: Software Development :: Libraries",
69 ]
70 )
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,5 +66,9 @@
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
"Topic :: Software Development :: Libraries",
- ]
+ ],
+ project_urls={
+ "Documentation": "https://docs.hylang.org/",
+ "Source": "https://github.com/hylang/hy",
+ }
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,5 +66,9 @@\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n- ]\n+ ],\n+ project_urls={\n+ \"Documentation\": \"https://docs.hylang.org/\",\n+ \"Source\": \"https://github.com/hylang/hy\",\n+ }\n )\n", "issue": "Add `project_urls` to `setup.py`\nThis would allow us to provide links to our GitHub repository etc. in a sidebar on PyPI.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import find_packages, setup\nimport fastentrypoints # Monkey-patches setuptools.\n\nfrom get_version import __version__\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=[\n 'rply>=0.7.7',\n 'funcparserlib>=1.0.0a0',\n 'colorama',\n 'astor>=0.8 ; python_version < \"3.9\"',\n ],\n python_requires = '>= 3.7, <= 3.10',\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy3 = hy.cmdline:hy_main',\n 'hyc = hy.cmdline:hyc_main',\n 'hyc3 = hy.cmdline:hyc_main',\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py3 = hy.cmdline:hy2py_main',\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy': ['*.hy', '__pycache__/*'],\n 'hy.contrib': ['*.hy', '__pycache__/*'],\n 'hy.core': ['*.hy', '__pycache__/*'],\n 'hy.extra': ['*.hy', '__pycache__/*'],\n },\n data_files=[\n ('get_version', ['get_version.py'])\n ],\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import find_packages, setup\nimport fastentrypoints # Monkey-patches setuptools.\n\nfrom get_version import __version__\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=[\n 'rply>=0.7.7',\n 'funcparserlib>=1.0.0a0',\n 'colorama',\n 'astor>=0.8 ; python_version < \"3.9\"',\n ],\n python_requires = '>= 3.7, <= 3.10',\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy3 = hy.cmdline:hy_main',\n 'hyc = hy.cmdline:hyc_main',\n 'hyc3 = hy.cmdline:hyc_main',\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py3 = hy.cmdline:hy2py_main',\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy': ['*.hy', '__pycache__/*'],\n 'hy.contrib': ['*.hy', '__pycache__/*'],\n 'hy.core': ['*.hy', '__pycache__/*'],\n 'hy.extra': ['*.hy', '__pycache__/*'],\n },\n data_files=[\n ('get_version', ['get_version.py'])\n ],\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ],\n project_urls={\n \"Documentation\": \"https://docs.hylang.org/\",\n \"Source\": \"https://github.com/hylang/hy\",\n }\n)\n", "path": "setup.py"}]} | 1,002 | 107 |
gh_patches_debug_39739 | rasdani/github-patches | git_diff | streamlink__streamlink-1878 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Problem with live.russia.tv
I have Problem with the Plugin live.russia.tv :
```
#SERVICE 4097:0:1:0:0:0:224:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/76:Москва 24 HD
#DESCRIPTION Москва 24 HD
#SERVICE 4097:0:1:0:0:0:449:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/1:Rossija 1 HD
#DESCRIPTION Rossija 1 HD
#SERVICE 4097:0:1:0:0:0:445:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/82:Rossija RTR HD
#DESCRIPTION Rossija RTR HD
#SERVICE 4097:0:1:0:0:0:447:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/3:Rossija 24 HD
#DESCRIPTION Rossija 24 HD
```
The Channels not working on streamlink - from PC work the channels ok.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/live_russia_tv.py`
Content:
```
1 import re
2 from streamlink.plugin import Plugin
3 from streamlink.plugin.api import http
4 from streamlink.stream import HLSStream
5
6 class LiveRussia(Plugin):
7 url_re = re.compile(r"https?://(?:www.)?live.russia.tv/index/index/channel_id/")
8 iframe_re = re.compile(r"""<iframe[^>]*src=["']([^'"]+)["'][^>]*>""")
9 stream_re = re.compile(r"""window.pl.data.*m3u8":"(.*)"}.*};""")
10
11 @classmethod
12 def can_handle_url(cls, url):
13 return cls.url_re.match(url) is not None
14
15 def _get_streams(self):
16 res = http.get(self.url)
17 iframe_result = re.search(self.iframe_re, res.text)
18
19 if not iframe_result:
20 self.logger.error("The requested content is unavailable.")
21 return
22
23 res = http.get(iframe_result.group(1))
24 stream_url_result = re.search(self.stream_re, res.text)
25
26 if not stream_url_result:
27 self.logger.error("The requested content is unavailable.")
28 return
29
30 return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1))
31
32
33 __plugin__ = LiveRussia
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/live_russia_tv.py b/src/streamlink/plugins/live_russia_tv.py
--- a/src/streamlink/plugins/live_russia_tv.py
+++ b/src/streamlink/plugins/live_russia_tv.py
@@ -1,33 +1,81 @@
+import logging
import re
+
from streamlink.plugin import Plugin
-from streamlink.plugin.api import http
-from streamlink.stream import HLSStream
+from streamlink.plugin.api import http, validate
+from streamlink.plugin.api.utils import itertags
+from streamlink.stream import HLSStream, HTTPStream
+
+log = logging.getLogger(__name__)
+
class LiveRussia(Plugin):
- url_re = re.compile(r"https?://(?:www.)?live.russia.tv/index/index/channel_id/")
- iframe_re = re.compile(r"""<iframe[^>]*src=["']([^'"]+)["'][^>]*>""")
- stream_re = re.compile(r"""window.pl.data.*m3u8":"(.*)"}.*};""")
+ url_re = re.compile(r"https?://(?:www\.|live\.)?russia.tv")
+ _data_re = re.compile(r"""window\.pl\.data\.([\w_]+)\s*=\s*['"]?(.*?)['"]?;""")
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
+ def _get_iframe_url(self, url):
+ res = http.get(url)
+ for iframe in itertags(res.text, 'iframe'):
+ src = iframe.attributes.get("src")
+ if src:
+ return src
+
+ def _get_stream_info_url(self, url):
+ data = {}
+ res = http.get(url)
+ for m in self._data_re.finditer(res.text):
+ data[m.group(1)] = m.group(2)
+
+ log.debug("Got pl_data={0}".format(data))
+
+ if data:
+ if data["isVod"] == '0':
+ return "https:{domain}/iframe/datalive/id/{id}/sid/{sid}".format(**data)
+ else:
+ return "https:{domain}/iframe/datavideo/id/{id}/sid/{sid}".format(**data)
+
def _get_streams(self):
- res = http.get(self.url)
- iframe_result = re.search(self.iframe_re, res.text)
+ iframe_url = self._get_iframe_url(self.url)
+
+ if iframe_url:
+ log.debug("Found iframe URL={0}".format(iframe_url))
+ info_url = self._get_stream_info_url(iframe_url)
+
+ if info_url:
+ log.debug("Getting info from URL: {0}".format(info_url))
+ res = http.get(info_url, headers={"Referer": iframe_url})
+ data = http.json(res)
+
+ if data['status'] == 200:
+ for media in data['data']['playlist']['medialist']:
+ if media['errors']:
+ log.error(media['errors'].replace('\n', '').replace('\r', ''))
+
+ for media_type in media.get('sources', []):
+
+ if media_type == "m3u8":
+ hls_url = media['sources'][media_type]['auto']
+ for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():
+ yield s
+
+ if media_type == "http":
+ for pix, url in media['sources'][media_type].items():
+ yield "{0}p".format(pix), HTTPStream(self.session, url)
+ else:
+ log.error("An error occurred: {0}".format(data['errors'].replace('\n', '').replace('\r', '')))
+ else:
+ log.error("Unable to get stream info URL")
+ else:
+ log.error("Could not find video iframe")
+
- if not iframe_result:
- self.logger.error("The requested content is unavailable.")
- return
- res = http.get(iframe_result.group(1))
- stream_url_result = re.search(self.stream_re, res.text)
- if not stream_url_result:
- self.logger.error("The requested content is unavailable.")
- return
- return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1))
-__plugin__ = LiveRussia
\ No newline at end of file
+__plugin__ = LiveRussia
| {"golden_diff": "diff --git a/src/streamlink/plugins/live_russia_tv.py b/src/streamlink/plugins/live_russia_tv.py\n--- a/src/streamlink/plugins/live_russia_tv.py\n+++ b/src/streamlink/plugins/live_russia_tv.py\n@@ -1,33 +1,81 @@\n+import logging\n import re\n+\n from streamlink.plugin import Plugin\n-from streamlink.plugin.api import http\n-from streamlink.stream import HLSStream\n+from streamlink.plugin.api import http, validate\n+from streamlink.plugin.api.utils import itertags\n+from streamlink.stream import HLSStream, HTTPStream\n+\n+log = logging.getLogger(__name__)\n+\n \n class LiveRussia(Plugin):\n- url_re = re.compile(r\"https?://(?:www.)?live.russia.tv/index/index/channel_id/\")\n- iframe_re = re.compile(r\"\"\"<iframe[^>]*src=[\"']([^'\"]+)[\"'][^>]*>\"\"\")\n- stream_re = re.compile(r\"\"\"window.pl.data.*m3u8\":\"(.*)\"}.*};\"\"\")\n+ url_re = re.compile(r\"https?://(?:www\\.|live\\.)?russia.tv\")\n+ _data_re = re.compile(r\"\"\"window\\.pl\\.data\\.([\\w_]+)\\s*=\\s*['\"]?(.*?)['\"]?;\"\"\")\n \n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n \n+ def _get_iframe_url(self, url):\n+ res = http.get(url)\n+ for iframe in itertags(res.text, 'iframe'):\n+ src = iframe.attributes.get(\"src\")\n+ if src:\n+ return src\n+\n+ def _get_stream_info_url(self, url):\n+ data = {}\n+ res = http.get(url)\n+ for m in self._data_re.finditer(res.text):\n+ data[m.group(1)] = m.group(2)\n+\n+ log.debug(\"Got pl_data={0}\".format(data))\n+\n+ if data:\n+ if data[\"isVod\"] == '0':\n+ return \"https:{domain}/iframe/datalive/id/{id}/sid/{sid}\".format(**data)\n+ else:\n+ return \"https:{domain}/iframe/datavideo/id/{id}/sid/{sid}\".format(**data)\n+\n def _get_streams(self):\n- res = http.get(self.url)\n- iframe_result = re.search(self.iframe_re, res.text)\n+ iframe_url = self._get_iframe_url(self.url)\n+\n+ if iframe_url:\n+ log.debug(\"Found iframe URL={0}\".format(iframe_url))\n+ info_url = self._get_stream_info_url(iframe_url)\n+\n+ if info_url:\n+ log.debug(\"Getting info from URL: {0}\".format(info_url))\n+ res = http.get(info_url, headers={\"Referer\": iframe_url})\n+ data = http.json(res)\n+\n+ if data['status'] == 200:\n+ for media in data['data']['playlist']['medialist']:\n+ if media['errors']:\n+ log.error(media['errors'].replace('\\n', '').replace('\\r', ''))\n+\n+ for media_type in media.get('sources', []):\n+\n+ if media_type == \"m3u8\":\n+ hls_url = media['sources'][media_type]['auto']\n+ for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():\n+ yield s\n+\n+ if media_type == \"http\":\n+ for pix, url in media['sources'][media_type].items():\n+ yield \"{0}p\".format(pix), HTTPStream(self.session, url)\n+ else:\n+ log.error(\"An error occurred: {0}\".format(data['errors'].replace('\\n', '').replace('\\r', '')))\n+ else:\n+ log.error(\"Unable to get stream info URL\")\n+ else:\n+ log.error(\"Could not find video iframe\")\n+\n \n- if not iframe_result:\n- self.logger.error(\"The requested content is unavailable.\")\n- return\n \n- res = http.get(iframe_result.group(1))\n- stream_url_result = re.search(self.stream_re, res.text)\n \n- if not stream_url_result:\n- self.logger.error(\"The requested content is unavailable.\")\n- return\n \n- return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1))\n \n \n-__plugin__ = LiveRussia\n\\ No newline at end of file\n+__plugin__ = LiveRussia\n", "issue": "Problem with live.russia.tv\nI have Problem with the Plugin live.russia.tv : \r\n```\r\n#SERVICE 4097:0:1:0:0:0:224:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/76:\u041c\u043e\u0441\u043a\u0432\u0430 24 HD\r\n#DESCRIPTION \u041c\u043e\u0441\u043a\u0432\u0430 24 HD\r\n#SERVICE 4097:0:1:0:0:0:449:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/1:Rossija 1 HD\r\n#DESCRIPTION Rossija 1 HD\r\n#SERVICE 4097:0:1:0:0:0:445:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/82:Rossija RTR HD\r\n#DESCRIPTION Rossija RTR HD\r\n#SERVICE 4097:0:1:0:0:0:447:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/3:Rossija 24 HD\r\n#DESCRIPTION Rossija 24 HD\r\n```\r\nThe Channels not working on streamlink - from PC work the channels ok.\n", "before_files": [{"content": "import re\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.stream import HLSStream\n\nclass LiveRussia(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?live.russia.tv/index/index/channel_id/\")\n iframe_re = re.compile(r\"\"\"<iframe[^>]*src=[\"']([^'\"]+)[\"'][^>]*>\"\"\")\n stream_re = re.compile(r\"\"\"window.pl.data.*m3u8\":\"(.*)\"}.*};\"\"\")\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = http.get(self.url)\n iframe_result = re.search(self.iframe_re, res.text)\n\n if not iframe_result:\n self.logger.error(\"The requested content is unavailable.\")\n return\n\n res = http.get(iframe_result.group(1))\n stream_url_result = re.search(self.stream_re, res.text)\n\n if not stream_url_result:\n self.logger.error(\"The requested content is unavailable.\")\n return\n\n return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1))\n\n\n__plugin__ = LiveRussia", "path": "src/streamlink/plugins/live_russia_tv.py"}], "after_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate\nfrom streamlink.plugin.api.utils import itertags\nfrom streamlink.stream import HLSStream, HTTPStream\n\nlog = logging.getLogger(__name__)\n\n\nclass LiveRussia(Plugin):\n url_re = re.compile(r\"https?://(?:www\\.|live\\.)?russia.tv\")\n _data_re = re.compile(r\"\"\"window\\.pl\\.data\\.([\\w_]+)\\s*=\\s*['\"]?(.*?)['\"]?;\"\"\")\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_iframe_url(self, url):\n res = http.get(url)\n for iframe in itertags(res.text, 'iframe'):\n src = iframe.attributes.get(\"src\")\n if src:\n return src\n\n def _get_stream_info_url(self, url):\n data = {}\n res = http.get(url)\n for m in self._data_re.finditer(res.text):\n data[m.group(1)] = m.group(2)\n\n log.debug(\"Got pl_data={0}\".format(data))\n\n if data:\n if data[\"isVod\"] == '0':\n return \"https:{domain}/iframe/datalive/id/{id}/sid/{sid}\".format(**data)\n else:\n return \"https:{domain}/iframe/datavideo/id/{id}/sid/{sid}\".format(**data)\n\n def _get_streams(self):\n iframe_url = self._get_iframe_url(self.url)\n\n if iframe_url:\n log.debug(\"Found iframe URL={0}\".format(iframe_url))\n info_url = self._get_stream_info_url(iframe_url)\n\n if info_url:\n log.debug(\"Getting info from URL: {0}\".format(info_url))\n res = http.get(info_url, headers={\"Referer\": iframe_url})\n data = http.json(res)\n\n if data['status'] == 200:\n for media in data['data']['playlist']['medialist']:\n if media['errors']:\n log.error(media['errors'].replace('\\n', '').replace('\\r', ''))\n\n for media_type in media.get('sources', []):\n\n if media_type == \"m3u8\":\n hls_url = media['sources'][media_type]['auto']\n for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():\n yield s\n\n if media_type == \"http\":\n for pix, url in media['sources'][media_type].items():\n yield \"{0}p\".format(pix), HTTPStream(self.session, url)\n else:\n log.error(\"An error occurred: {0}\".format(data['errors'].replace('\\n', '').replace('\\r', '')))\n else:\n log.error(\"Unable to get stream info URL\")\n else:\n log.error(\"Could not find video iframe\")\n\n\n\n\n\n\n\n__plugin__ = LiveRussia\n", "path": "src/streamlink/plugins/live_russia_tv.py"}]} | 939 | 980 |
gh_patches_debug_11877 | rasdani/github-patches | git_diff | CTFd__CTFd-1048 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
import will crash ctfd
<!--
If this is a bug report please fill out the template below.
If this is a feature request please describe the behavior that you'd like to see.
-->
**Environment**:
- CTFd Version/Commit: 2.1.3
- Operating System: ubuntu 18.04
- Web Browser and Version: Opera 60.0.3255.170
**What happened?**
trying to import db (zip file)
**What did you expect to happen?**
it would import db (zip file)
**How to reproduce your issue**
**Any associated stack traces or error logs**
Failed to disable foreign key checks. Continuing.
Error: No support for ALTER of constraints in SQLite dialect
I believe it's Alembic fault
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `migrations/versions/b5551cd26764_add_captain_column_to_teams.py`
Content:
```
1 """Add captain column to Teams
2
3 Revision ID: b5551cd26764
4 Revises: 4e4d5a9ea000
5 Create Date: 2019-04-12 00:29:08.021141
6
7 """
8 from CTFd.models import db
9 from alembic import op
10 import sqlalchemy as sa
11 from sqlalchemy.sql import text, table, column, and_
12
13 # revision identifiers, used by Alembic.
14 revision = 'b5551cd26764'
15 down_revision = '4e4d5a9ea000'
16 branch_labels = None
17 depends_on = None
18
19 teams_table = table('teams',
20 column('id', db.Integer),
21 column('captain_id', db.Integer),
22 )
23
24 users_table = table('users',
25 column('id', db.Integer),
26 column('team_id', db.Integer),
27 )
28
29
30 def upgrade():
31 # ### commands auto generated by Alembic - please adjust! ###
32 op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True))
33 op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])
34
35 connection = op.get_bind()
36 for team in connection.execute(teams_table.select()):
37 users = connection.execute(
38 users_table.select().where(users_table.c.team_id == team.id).order_by(users_table.c.id).limit(1)
39 )
40 for user in users:
41 connection.execute(
42 teams_table.update().where(
43 teams_table.c.id == team.id
44 ).values(
45 captain_id=user.id
46 )
47 )
48 # ### end Alembic commands ###
49
50
51 def downgrade():
52 # ### commands auto generated by Alembic - please adjust! ###
53 op.drop_constraint('team_captain_id', 'teams', type_='foreignkey')
54 op.drop_column('teams', 'captain_id')
55 # ### end Alembic commands ###
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/migrations/versions/b5551cd26764_add_captain_column_to_teams.py b/migrations/versions/b5551cd26764_add_captain_column_to_teams.py
--- a/migrations/versions/b5551cd26764_add_captain_column_to_teams.py
+++ b/migrations/versions/b5551cd26764_add_captain_column_to_teams.py
@@ -30,7 +30,11 @@
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True))
- op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])
+
+ bind = op.get_bind()
+ url = str(bind.engine.url)
+ if url.startswith('sqlite') is False:
+ op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])
connection = op.get_bind()
for team in connection.execute(teams_table.select()):
| {"golden_diff": "diff --git a/migrations/versions/b5551cd26764_add_captain_column_to_teams.py b/migrations/versions/b5551cd26764_add_captain_column_to_teams.py\n--- a/migrations/versions/b5551cd26764_add_captain_column_to_teams.py\n+++ b/migrations/versions/b5551cd26764_add_captain_column_to_teams.py\n@@ -30,7 +30,11 @@\n def upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True))\n- op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])\n+\n+ bind = op.get_bind()\n+ url = str(bind.engine.url)\n+ if url.startswith('sqlite') is False:\n+ op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])\n \n connection = op.get_bind()\n for team in connection.execute(teams_table.select()):\n", "issue": "import will crash ctfd\n<!--\r\nIf this is a bug report please fill out the template below.\r\n\r\nIf this is a feature request please describe the behavior that you'd like to see.\r\n-->\r\n\r\n**Environment**:\r\n\r\n - CTFd Version/Commit: 2.1.3\r\n - Operating System: ubuntu 18.04\r\n - Web Browser and Version: Opera 60.0.3255.170\r\n\r\n**What happened?**\r\ntrying to import db (zip file)\r\n**What did you expect to happen?**\r\nit would import db (zip file)\r\n**How to reproduce your issue**\r\n\r\n**Any associated stack traces or error logs**\r\nFailed to disable foreign key checks. Continuing.\r\nError: No support for ALTER of constraints in SQLite dialect\r\n\r\nI believe it's Alembic fault \n", "before_files": [{"content": "\"\"\"Add captain column to Teams\n\nRevision ID: b5551cd26764\nRevises: 4e4d5a9ea000\nCreate Date: 2019-04-12 00:29:08.021141\n\n\"\"\"\nfrom CTFd.models import db\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.sql import text, table, column, and_\n\n# revision identifiers, used by Alembic.\nrevision = 'b5551cd26764'\ndown_revision = '4e4d5a9ea000'\nbranch_labels = None\ndepends_on = None\n\nteams_table = table('teams',\n column('id', db.Integer),\n column('captain_id', db.Integer),\n)\n\nusers_table = table('users',\n column('id', db.Integer),\n column('team_id', db.Integer),\n)\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True))\n op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])\n\n connection = op.get_bind()\n for team in connection.execute(teams_table.select()):\n users = connection.execute(\n users_table.select().where(users_table.c.team_id == team.id).order_by(users_table.c.id).limit(1)\n )\n for user in users:\n connection.execute(\n teams_table.update().where(\n teams_table.c.id == team.id\n ).values(\n captain_id=user.id\n )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('team_captain_id', 'teams', type_='foreignkey')\n op.drop_column('teams', 'captain_id')\n # ### end Alembic commands ###\n", "path": "migrations/versions/b5551cd26764_add_captain_column_to_teams.py"}], "after_files": [{"content": "\"\"\"Add captain column to Teams\n\nRevision ID: b5551cd26764\nRevises: 4e4d5a9ea000\nCreate Date: 2019-04-12 00:29:08.021141\n\n\"\"\"\nfrom CTFd.models import db\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.sql import text, table, column, and_\n\n# revision identifiers, used by Alembic.\nrevision = 'b5551cd26764'\ndown_revision = '4e4d5a9ea000'\nbranch_labels = None\ndepends_on = None\n\nteams_table = table('teams',\n column('id', db.Integer),\n column('captain_id', db.Integer),\n)\n\nusers_table = table('users',\n column('id', db.Integer),\n column('team_id', db.Integer),\n)\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True))\n\n bind = op.get_bind()\n url = str(bind.engine.url)\n if url.startswith('sqlite') is False:\n op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])\n\n connection = op.get_bind()\n for team in connection.execute(teams_table.select()):\n users = connection.execute(\n users_table.select().where(users_table.c.team_id == team.id).order_by(users_table.c.id).limit(1)\n )\n for user in users:\n connection.execute(\n teams_table.update().where(\n teams_table.c.id == team.id\n ).values(\n captain_id=user.id\n )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('team_captain_id', 'teams', type_='foreignkey')\n op.drop_column('teams', 'captain_id')\n # ### end Alembic commands ###\n", "path": "migrations/versions/b5551cd26764_add_captain_column_to_teams.py"}]} | 991 | 257 |
gh_patches_debug_7915 | rasdani/github-patches | git_diff | psychopy__psychopy-3457 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Windows seg fault when defaultView set to Runner
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/app/pavlovia_ui/menu.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # Part of the PsychoPy library
5 # Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
6 # Distributed under the terms of the GNU General Public License (GPL).
7
8 import wx
9 import requests
10
11 from psychopy import logging
12 from .. import dialogs
13 from .functions import logInPavlovia
14 from psychopy.app.pavlovia_ui.project import syncProject
15 from .search import SearchFrame
16 from .project import ProjectEditor
17 from psychopy.localization import _translate
18 from psychopy.projects import pavlovia
19
20
21 class PavloviaMenu(wx.Menu):
22 app = None
23 appData = None
24 currentUser = None
25 knownUsers = None
26 searchDlg = None
27
28 def __init__(self, parent):
29 wx.Menu.__init__(self)
30 self.parent = parent # type: BuilderFrame
31 PavloviaMenu.app = parent.app
32 keys = self.app.keys
33 # from prefs fetch info about prev usernames and projects
34 PavloviaMenu.appData = self.app.prefs.appData['projects']
35
36 # item = self.Append(wx.ID_ANY, _translate("Tell me more..."))
37 # parent.Bind(wx.EVT_MENU, self.onAbout, id=item.GetId())
38
39 PavloviaMenu.knownUsers = pavlovia.knownUsers
40
41 # sub-menu for usernames and login
42 self.userMenu = wx.Menu()
43 # if a user was previously logged in then set them as current
44 lastPavUser = PavloviaMenu.appData['pavloviaUser']
45 if pavlovia.knownUsers and (lastPavUser not in pavlovia.knownUsers):
46 lastPavUser = None
47 # if lastPavUser and not PavloviaMenu.currentUser:
48 # self.setUser(PavloviaMenu.appData['pavloviaUser'])
49 for name in self.knownUsers:
50 self.addToSubMenu(name, self.userMenu, self.onSetUser)
51 self.userMenu.AppendSeparator()
52 self.loginBtn = self.userMenu.Append(wx.ID_ANY,
53 _translate("Log in to Pavlovia...\t{}")
54 .format(keys['pavlovia_logIn']))
55 parent.Bind(wx.EVT_MENU, self.onLogInPavlovia, id=self.loginBtn.GetId())
56 self.AppendSubMenu(self.userMenu, _translate("User"))
57
58 # search
59 self.searchBtn = self.Append(wx.ID_ANY,
60 _translate("Search Pavlovia\t{}")
61 .format(keys['projectsFind']))
62 parent.Bind(wx.EVT_MENU, self.onSearch, id=self.searchBtn.GetId())
63
64 # new
65 self.newBtn = self.Append(wx.ID_ANY,
66 _translate("New...\t{}").format(keys['projectsNew']))
67 parent.Bind(wx.EVT_MENU, self.onNew, id=self.newBtn.GetId())
68
69 self.syncBtn = self.Append(wx.ID_ANY,
70 _translate("Sync\t{}").format(keys['projectsSync']))
71 parent.Bind(wx.EVT_MENU, self.onSync, id=self.syncBtn.GetId())
72
73 def addToSubMenu(self, name, menu, function):
74 item = menu.Append(wx.ID_ANY, name)
75 self.parent.Bind(wx.EVT_MENU, function, id=item.GetId())
76
77 def onAbout(self, event):
78 wx.GetApp().followLink(event)
79
80 def onSetUser(self, event):
81 user = self.userMenu.GetLabelText(event.GetId())
82 self.setUser(user)
83
84 def setUser(self, user=None):
85
86 if user is None and PavloviaMenu.appData['pavloviaUser']:
87 user = PavloviaMenu.appData['pavloviaUser']
88
89 if user in [PavloviaMenu.currentUser, None]:
90 return # nothing to do here. Move along please.
91
92 PavloviaMenu.currentUser = user
93 PavloviaMenu.appData['pavloviaUser'] = user
94 if user in pavlovia.knownUsers:
95 token = pavlovia.knownUsers[user]['token']
96 try:
97 pavlovia.getCurrentSession().setToken(token)
98 except requests.exceptions.ConnectionError:
99 logging.warning("Tried to log in to Pavlovia but no network "
100 "connection")
101 return
102 else:
103 if hasattr(self, 'onLogInPavlovia'):
104 self.onLogInPavlovia()
105
106 if PavloviaMenu.searchDlg:
107 PavloviaMenu.searchDlg.updateUserProjs()
108
109 def onSync(self, event):
110 retVal = syncProject(parent=self.parent, project=self.parent.project)
111 if hasattr(self.parent, 'gitFeedback'):
112 self.parent.gitFeedback(retVal)
113
114 def onSearch(self, event):
115 PavloviaMenu.searchDlg = SearchFrame(app=self.parent.app)
116 PavloviaMenu.searchDlg.Show()
117
118 def onLogInPavlovia(self, event=None):
119 logInPavlovia(parent=self.parent)
120
121 def onNew(self, event):
122 """Create a new project
123 """
124 if pavlovia.getCurrentSession().user.username:
125 projEditor = ProjectEditor()
126 if projEditor.ShowModal() == wx.ID_OK:
127 self.parent.project = projEditor.project
128 # do a first sync as well
129 retVal = syncProject(parent=self.parent, project=projEditor.project)
130 self.parent.gitFeedback(retVal)
131 else:
132 infoDlg = dialogs.MessageDialog(parent=None, type='Info',
133 message=_translate(
134 "You need to log in"
135 " to create a project"))
136 infoDlg.Show()
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/psychopy/app/pavlovia_ui/menu.py b/psychopy/app/pavlovia_ui/menu.py
--- a/psychopy/app/pavlovia_ui/menu.py
+++ b/psychopy/app/pavlovia_ui/menu.py
@@ -82,9 +82,9 @@
self.setUser(user)
def setUser(self, user=None):
-
- if user is None and PavloviaMenu.appData['pavloviaUser']:
- user = PavloviaMenu.appData['pavloviaUser']
+ if PavloviaMenu.appData:
+ if user is None and PavloviaMenu.appData['pavloviaUser']:
+ user = PavloviaMenu.appData['pavloviaUser']
if user in [PavloviaMenu.currentUser, None]:
return # nothing to do here. Move along please.
| {"golden_diff": "diff --git a/psychopy/app/pavlovia_ui/menu.py b/psychopy/app/pavlovia_ui/menu.py\n--- a/psychopy/app/pavlovia_ui/menu.py\n+++ b/psychopy/app/pavlovia_ui/menu.py\n@@ -82,9 +82,9 @@\n self.setUser(user)\n \n def setUser(self, user=None):\n-\n- if user is None and PavloviaMenu.appData['pavloviaUser']:\n- user = PavloviaMenu.appData['pavloviaUser']\n+ if PavloviaMenu.appData:\n+ if user is None and PavloviaMenu.appData['pavloviaUser']:\n+ user = PavloviaMenu.appData['pavloviaUser']\n \n if user in [PavloviaMenu.currentUser, None]:\n return # nothing to do here. Move along please.\n", "issue": "Windows seg fault when defaultView set to Runner\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\nimport wx\nimport requests\n\nfrom psychopy import logging\nfrom .. import dialogs\nfrom .functions import logInPavlovia\nfrom psychopy.app.pavlovia_ui.project import syncProject\nfrom .search import SearchFrame\nfrom .project import ProjectEditor\nfrom psychopy.localization import _translate\nfrom psychopy.projects import pavlovia\n\n\nclass PavloviaMenu(wx.Menu):\n app = None\n appData = None\n currentUser = None\n knownUsers = None\n searchDlg = None\n\n def __init__(self, parent):\n wx.Menu.__init__(self)\n self.parent = parent # type: BuilderFrame\n PavloviaMenu.app = parent.app\n keys = self.app.keys\n # from prefs fetch info about prev usernames and projects\n PavloviaMenu.appData = self.app.prefs.appData['projects']\n\n # item = self.Append(wx.ID_ANY, _translate(\"Tell me more...\"))\n # parent.Bind(wx.EVT_MENU, self.onAbout, id=item.GetId())\n\n PavloviaMenu.knownUsers = pavlovia.knownUsers\n\n # sub-menu for usernames and login\n self.userMenu = wx.Menu()\n # if a user was previously logged in then set them as current\n lastPavUser = PavloviaMenu.appData['pavloviaUser']\n if pavlovia.knownUsers and (lastPavUser not in pavlovia.knownUsers):\n lastPavUser = None\n # if lastPavUser and not PavloviaMenu.currentUser:\n # self.setUser(PavloviaMenu.appData['pavloviaUser'])\n for name in self.knownUsers:\n self.addToSubMenu(name, self.userMenu, self.onSetUser)\n self.userMenu.AppendSeparator()\n self.loginBtn = self.userMenu.Append(wx.ID_ANY,\n _translate(\"Log in to Pavlovia...\\t{}\")\n .format(keys['pavlovia_logIn']))\n parent.Bind(wx.EVT_MENU, self.onLogInPavlovia, id=self.loginBtn.GetId())\n self.AppendSubMenu(self.userMenu, _translate(\"User\"))\n\n # search\n self.searchBtn = self.Append(wx.ID_ANY,\n _translate(\"Search Pavlovia\\t{}\")\n .format(keys['projectsFind']))\n parent.Bind(wx.EVT_MENU, self.onSearch, id=self.searchBtn.GetId())\n\n # new\n self.newBtn = self.Append(wx.ID_ANY,\n _translate(\"New...\\t{}\").format(keys['projectsNew']))\n parent.Bind(wx.EVT_MENU, self.onNew, id=self.newBtn.GetId())\n\n self.syncBtn = self.Append(wx.ID_ANY,\n _translate(\"Sync\\t{}\").format(keys['projectsSync']))\n parent.Bind(wx.EVT_MENU, self.onSync, id=self.syncBtn.GetId())\n\n def addToSubMenu(self, name, menu, function):\n item = menu.Append(wx.ID_ANY, name)\n self.parent.Bind(wx.EVT_MENU, function, id=item.GetId())\n\n def onAbout(self, event):\n wx.GetApp().followLink(event)\n\n def onSetUser(self, event):\n user = self.userMenu.GetLabelText(event.GetId())\n self.setUser(user)\n\n def setUser(self, user=None):\n\n if user is None and PavloviaMenu.appData['pavloviaUser']:\n user = PavloviaMenu.appData['pavloviaUser']\n\n if user in [PavloviaMenu.currentUser, None]:\n return # nothing to do here. Move along please.\n\n PavloviaMenu.currentUser = user\n PavloviaMenu.appData['pavloviaUser'] = user\n if user in pavlovia.knownUsers:\n token = pavlovia.knownUsers[user]['token']\n try:\n pavlovia.getCurrentSession().setToken(token)\n except requests.exceptions.ConnectionError:\n logging.warning(\"Tried to log in to Pavlovia but no network \"\n \"connection\")\n return\n else:\n if hasattr(self, 'onLogInPavlovia'):\n self.onLogInPavlovia()\n\n if PavloviaMenu.searchDlg:\n PavloviaMenu.searchDlg.updateUserProjs()\n\n def onSync(self, event):\n retVal = syncProject(parent=self.parent, project=self.parent.project)\n if hasattr(self.parent, 'gitFeedback'):\n self.parent.gitFeedback(retVal)\n\n def onSearch(self, event):\n PavloviaMenu.searchDlg = SearchFrame(app=self.parent.app)\n PavloviaMenu.searchDlg.Show()\n\n def onLogInPavlovia(self, event=None):\n logInPavlovia(parent=self.parent)\n\n def onNew(self, event):\n \"\"\"Create a new project\n \"\"\"\n if pavlovia.getCurrentSession().user.username:\n projEditor = ProjectEditor()\n if projEditor.ShowModal() == wx.ID_OK:\n self.parent.project = projEditor.project\n # do a first sync as well\n retVal = syncProject(parent=self.parent, project=projEditor.project)\n self.parent.gitFeedback(retVal)\n else:\n infoDlg = dialogs.MessageDialog(parent=None, type='Info',\n message=_translate(\n \"You need to log in\"\n \" to create a project\"))\n infoDlg.Show()\n", "path": "psychopy/app/pavlovia_ui/menu.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\nimport wx\nimport requests\n\nfrom psychopy import logging\nfrom .. import dialogs\nfrom .functions import logInPavlovia\nfrom psychopy.app.pavlovia_ui.project import syncProject\nfrom .search import SearchFrame\nfrom .project import ProjectEditor\nfrom psychopy.localization import _translate\nfrom psychopy.projects import pavlovia\n\n\nclass PavloviaMenu(wx.Menu):\n app = None\n appData = None\n currentUser = None\n knownUsers = None\n searchDlg = None\n\n def __init__(self, parent):\n wx.Menu.__init__(self)\n self.parent = parent # type: BuilderFrame\n PavloviaMenu.app = parent.app\n keys = self.app.keys\n # from prefs fetch info about prev usernames and projects\n PavloviaMenu.appData = self.app.prefs.appData['projects']\n\n # item = self.Append(wx.ID_ANY, _translate(\"Tell me more...\"))\n # parent.Bind(wx.EVT_MENU, self.onAbout, id=item.GetId())\n\n PavloviaMenu.knownUsers = pavlovia.knownUsers\n\n # sub-menu for usernames and login\n self.userMenu = wx.Menu()\n # if a user was previously logged in then set them as current\n lastPavUser = PavloviaMenu.appData['pavloviaUser']\n if pavlovia.knownUsers and (lastPavUser not in pavlovia.knownUsers):\n lastPavUser = None\n # if lastPavUser and not PavloviaMenu.currentUser:\n # self.setUser(PavloviaMenu.appData['pavloviaUser'])\n for name in self.knownUsers:\n self.addToSubMenu(name, self.userMenu, self.onSetUser)\n self.userMenu.AppendSeparator()\n self.loginBtn = self.userMenu.Append(wx.ID_ANY,\n _translate(\"Log in to Pavlovia...\\t{}\")\n .format(keys['pavlovia_logIn']))\n parent.Bind(wx.EVT_MENU, self.onLogInPavlovia, id=self.loginBtn.GetId())\n self.AppendSubMenu(self.userMenu, _translate(\"User\"))\n\n # search\n self.searchBtn = self.Append(wx.ID_ANY,\n _translate(\"Search Pavlovia\\t{}\")\n .format(keys['projectsFind']))\n parent.Bind(wx.EVT_MENU, self.onSearch, id=self.searchBtn.GetId())\n\n # new\n self.newBtn = self.Append(wx.ID_ANY,\n _translate(\"New...\\t{}\").format(keys['projectsNew']))\n parent.Bind(wx.EVT_MENU, self.onNew, id=self.newBtn.GetId())\n\n self.syncBtn = self.Append(wx.ID_ANY,\n _translate(\"Sync\\t{}\").format(keys['projectsSync']))\n parent.Bind(wx.EVT_MENU, self.onSync, id=self.syncBtn.GetId())\n\n def addToSubMenu(self, name, menu, function):\n item = menu.Append(wx.ID_ANY, name)\n self.parent.Bind(wx.EVT_MENU, function, id=item.GetId())\n\n def onAbout(self, event):\n wx.GetApp().followLink(event)\n\n def onSetUser(self, event):\n user = self.userMenu.GetLabelText(event.GetId())\n self.setUser(user)\n\n def setUser(self, user=None):\n if PavloviaMenu.appData:\n if user is None and PavloviaMenu.appData['pavloviaUser']:\n user = PavloviaMenu.appData['pavloviaUser']\n\n if user in [PavloviaMenu.currentUser, None]:\n return # nothing to do here. Move along please.\n\n PavloviaMenu.currentUser = user\n PavloviaMenu.appData['pavloviaUser'] = user\n if user in pavlovia.knownUsers:\n token = pavlovia.knownUsers[user]['token']\n try:\n pavlovia.getCurrentSession().setToken(token)\n except requests.exceptions.ConnectionError:\n logging.warning(\"Tried to log in to Pavlovia but no network \"\n \"connection\")\n return\n else:\n if hasattr(self, 'onLogInPavlovia'):\n self.onLogInPavlovia()\n\n if PavloviaMenu.searchDlg:\n PavloviaMenu.searchDlg.updateUserProjs()\n\n def onSync(self, event):\n retVal = syncProject(parent=self.parent, project=self.parent.project)\n if hasattr(self.parent, 'gitFeedback'):\n self.parent.gitFeedback(retVal)\n\n def onSearch(self, event):\n PavloviaMenu.searchDlg = SearchFrame(app=self.parent.app)\n PavloviaMenu.searchDlg.Show()\n\n def onLogInPavlovia(self, event=None):\n logInPavlovia(parent=self.parent)\n\n def onNew(self, event):\n \"\"\"Create a new project\n \"\"\"\n if pavlovia.getCurrentSession().user.username:\n projEditor = ProjectEditor()\n if projEditor.ShowModal() == wx.ID_OK:\n self.parent.project = projEditor.project\n # do a first sync as well\n retVal = syncProject(parent=self.parent, project=projEditor.project)\n self.parent.gitFeedback(retVal)\n else:\n infoDlg = dialogs.MessageDialog(parent=None, type='Info',\n message=_translate(\n \"You need to log in\"\n \" to create a project\"))\n infoDlg.Show()\n", "path": "psychopy/app/pavlovia_ui/menu.py"}]} | 1,796 | 195 |
gh_patches_debug_11710 | rasdani/github-patches | git_diff | Textualize__textual-2317 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scrolling containers should be focusable by default
`ScrollHorizontal` and `ScrollVertical` should have `can_focus=True`.
Check this doesn't break any of the example apps.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/containers.py`
Content:
```
1 """
2 Container widgets for quick styling.
3
4 """
5
6
7 from .widget import Widget
8
9
10 class Container(Widget):
11 """Simple container widget, with vertical layout."""
12
13 DEFAULT_CSS = """
14 Container {
15 height: 1fr;
16 layout: vertical;
17 overflow: auto;
18 }
19 """
20
21
22 class Vertical(Widget):
23 """A container which arranges children vertically."""
24
25 DEFAULT_CSS = """
26 Vertical {
27 width: 1fr;
28 layout: vertical;
29 overflow: hidden hidden;
30 }
31 """
32
33
34 class VerticalScroll(Widget):
35 """A container which arranges children vertically, with an automatic vertical scrollbar."""
36
37 DEFAULT_CSS = """
38 VerticalScroll {
39 width: 1fr;
40 layout: vertical;
41 overflow-y: auto;
42 }
43 """
44
45
46 class Horizontal(Widget):
47 """A container which arranges children horizontally."""
48
49 DEFAULT_CSS = """
50 Horizontal {
51 height: 1fr;
52 layout: horizontal;
53 overflow: hidden hidden;
54 }
55 """
56
57
58 class HorizontalScroll(Widget):
59 """A container which arranges children horizontally, with an automatic horizontal scrollbar."""
60
61 DEFAULT_CSS = """
62 HorizontalScroll {
63 height: 1fr;
64 layout: horizontal;
65 overflow-x: auto;
66 }
67 """
68
69
70 class Center(Widget):
71 """A container which centers children horizontally."""
72
73 DEFAULT_CSS = """
74 Center {
75 align-horizontal: center;
76 height: auto;
77 width: 1fr;
78 }
79 """
80
81
82 class Middle(Widget):
83 """A container which aligns children vertically in the middle."""
84
85 DEFAULT_CSS = """
86 Middle {
87 align-vertical: middle;
88 width: auto;
89 height: 1fr;
90 }
91 """
92
93
94 class Grid(Widget):
95 """A container with grid alignment."""
96
97 DEFAULT_CSS = """
98 Grid {
99 height: 1fr;
100 layout: grid;
101 }
102 """
103
104
105 class Content(Widget, can_focus=True, can_focus_children=False):
106 """A container for content such as text."""
107
108 DEFAULT_CSS = """
109 VerticalScroll {
110 height: 1fr;
111 layout: vertical;
112 overflow-y: auto;
113 }
114 """
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/textual/containers.py b/src/textual/containers.py
--- a/src/textual/containers.py
+++ b/src/textual/containers.py
@@ -31,7 +31,7 @@
"""
-class VerticalScroll(Widget):
+class VerticalScroll(Widget, can_focus=True):
"""A container which arranges children vertically, with an automatic vertical scrollbar."""
DEFAULT_CSS = """
@@ -55,7 +55,7 @@
"""
-class HorizontalScroll(Widget):
+class HorizontalScroll(Widget, can_focus=True):
"""A container which arranges children horizontally, with an automatic horizontal scrollbar."""
DEFAULT_CSS = """
| {"golden_diff": "diff --git a/src/textual/containers.py b/src/textual/containers.py\n--- a/src/textual/containers.py\n+++ b/src/textual/containers.py\n@@ -31,7 +31,7 @@\n \"\"\"\n \n \n-class VerticalScroll(Widget):\n+class VerticalScroll(Widget, can_focus=True):\n \"\"\"A container which arranges children vertically, with an automatic vertical scrollbar.\"\"\"\n \n DEFAULT_CSS = \"\"\"\n@@ -55,7 +55,7 @@\n \"\"\"\n \n \n-class HorizontalScroll(Widget):\n+class HorizontalScroll(Widget, can_focus=True):\n \"\"\"A container which arranges children horizontally, with an automatic horizontal scrollbar.\"\"\"\n \n DEFAULT_CSS = \"\"\"\n", "issue": "Scrolling containers should be focusable by default\n`ScrollHorizontal` and `ScrollVertical` should have `can_focus=True`.\n\nCheck this doesn't break any of the example apps.\n", "before_files": [{"content": "\"\"\"\nContainer widgets for quick styling.\n\n\"\"\"\n\n\nfrom .widget import Widget\n\n\nclass Container(Widget):\n \"\"\"Simple container widget, with vertical layout.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Container {\n height: 1fr;\n layout: vertical;\n overflow: auto;\n }\n \"\"\"\n\n\nclass Vertical(Widget):\n \"\"\"A container which arranges children vertically.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Vertical {\n width: 1fr;\n layout: vertical;\n overflow: hidden hidden;\n }\n \"\"\"\n\n\nclass VerticalScroll(Widget):\n \"\"\"A container which arranges children vertically, with an automatic vertical scrollbar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n VerticalScroll {\n width: 1fr;\n layout: vertical;\n overflow-y: auto;\n }\n \"\"\"\n\n\nclass Horizontal(Widget):\n \"\"\"A container which arranges children horizontally.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Horizontal {\n height: 1fr;\n layout: horizontal;\n overflow: hidden hidden;\n }\n \"\"\"\n\n\nclass HorizontalScroll(Widget):\n \"\"\"A container which arranges children horizontally, with an automatic horizontal scrollbar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HorizontalScroll {\n height: 1fr;\n layout: horizontal;\n overflow-x: auto;\n }\n \"\"\"\n\n\nclass Center(Widget):\n \"\"\"A container which centers children horizontally.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Center {\n align-horizontal: center;\n height: auto;\n width: 1fr;\n }\n \"\"\"\n\n\nclass Middle(Widget):\n \"\"\"A container which aligns children vertically in the middle.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Middle {\n align-vertical: middle;\n width: auto;\n height: 1fr;\n }\n \"\"\"\n\n\nclass Grid(Widget):\n \"\"\"A container with grid alignment.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Grid {\n height: 1fr;\n layout: grid;\n }\n \"\"\"\n\n\nclass Content(Widget, can_focus=True, can_focus_children=False):\n \"\"\"A container for content such as text.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n VerticalScroll {\n height: 1fr;\n layout: vertical;\n overflow-y: auto;\n }\n \"\"\"\n", "path": "src/textual/containers.py"}], "after_files": [{"content": "\"\"\"\nContainer widgets for quick styling.\n\n\"\"\"\n\n\nfrom .widget import Widget\n\n\nclass Container(Widget):\n \"\"\"Simple container widget, with vertical layout.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Container {\n height: 1fr;\n layout: vertical;\n overflow: auto;\n }\n \"\"\"\n\n\nclass Vertical(Widget):\n \"\"\"A container which arranges children vertically.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Vertical {\n width: 1fr;\n layout: vertical;\n overflow: hidden hidden;\n }\n \"\"\"\n\n\nclass VerticalScroll(Widget, can_focus=True):\n \"\"\"A container which arranges children vertically, with an automatic vertical scrollbar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n VerticalScroll {\n width: 1fr;\n layout: vertical;\n overflow-y: auto;\n }\n \"\"\"\n\n\nclass Horizontal(Widget):\n \"\"\"A container which arranges children horizontally.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Horizontal {\n height: 1fr;\n layout: horizontal;\n overflow: hidden hidden;\n }\n \"\"\"\n\n\nclass HorizontalScroll(Widget, can_focus=True):\n \"\"\"A container which arranges children horizontally, with an automatic horizontal scrollbar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HorizontalScroll {\n height: 1fr;\n layout: horizontal;\n overflow-x: auto;\n }\n \"\"\"\n\n\nclass Center(Widget):\n \"\"\"A container which centers children horizontally.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Center {\n align-horizontal: center;\n height: auto;\n width: 1fr;\n }\n \"\"\"\n\n\nclass Middle(Widget):\n \"\"\"A container which aligns children vertically in the middle.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Middle {\n align-vertical: middle;\n width: auto;\n height: 1fr;\n }\n \"\"\"\n\n\nclass Grid(Widget):\n \"\"\"A container with grid alignment.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Grid {\n height: 1fr;\n layout: grid;\n }\n \"\"\"\n\n\nclass Content(Widget, can_focus=True, can_focus_children=False):\n \"\"\"A container for content such as text.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n VerticalScroll {\n height: 1fr;\n layout: vertical;\n overflow-y: auto;\n }\n \"\"\"\n", "path": "src/textual/containers.py"}]} | 1,002 | 144 |
gh_patches_debug_14494 | rasdani/github-patches | git_diff | quantumlib__Cirq-3163 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update code for when Engine metric qubits are formatted differently
There is currently code in calibration.py that looks at qubit ids that start with `q` and removes this `q` before getting the grid qubit. If the API no longer returns `q` prefixed code this should be removed, otherwise if we are going to continue to support this we should add it to `grid_qubit_from_proto`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/google/engine/calibration.py`
Content:
```
1 # Copyright 2019 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Calibration wrapper for calibrations returned from the Quantum Engine."""
15
16 from collections import abc, defaultdict
17 import datetime
18
19 from typing import Any, Dict, Iterator, Optional, Tuple, TYPE_CHECKING
20
21 from cirq import devices, vis
22 from cirq.google.api import v2
23
24 if TYPE_CHECKING:
25 import cirq
26
27
28 class Calibration(abc.Mapping):
29 """A convenience wrapper for calibrations that acts like a dictionary.
30
31 Calibrations act as dictionaries whose keys are the names of the metric,
32 and whose values are the metric values. The metric values themselves are
33 represented as a dictionary. These metric value dictionaries have
34 keys that are tuples of `cirq.GridQubit`s and values that are lists of the
35 metric values for those qubits. If a metric acts globally and is attached
36 to no specified number of qubits, the map will be from the empty tuple
37 to the metrics values.
38
39 Calibrations act just like a python dictionary. For example you can get
40 a list of all of the metric names using
41
42 `calibration.keys()`
43
44 and query a single value by looking up the name by index:
45
46 `calibration['t1']`
47
48 Attributes:
49 timestamp: The time that this calibration was run, in milliseconds since
50 the epoch.
51 """
52
53 def __init__(self, calibration: v2.metrics_pb2.MetricsSnapshot) -> None:
54 self.timestamp = calibration.timestamp_ms
55 self._metric_dict = self._compute_metric_dict(calibration.metrics)
56
57 def _compute_metric_dict(
58 self, metrics: v2.metrics_pb2.MetricsSnapshot
59 ) -> Dict[str, Dict[Tuple['cirq.GridQubit', ...], Any]]:
60 results: Dict[str, Dict[Tuple[devices.
61 GridQubit, ...], Any]] = defaultdict(dict)
62 for metric in metrics:
63 name = metric.name
64 # Flatten the values to a list, removing keys containing type names
65 # (e.g. proto version of each value is {<type>: value}).
66 flat_values = [
67 getattr(v, v.WhichOneof('val')) for v in metric.values
68 ]
69 if metric.targets:
70 targets = [
71 t[1:] if t.startswith('q') else t for t in metric.targets
72 ]
73 # TODO: Remove when calibrations don't prepend this.
74 # Github issue: https://github.com/quantumlib/Cirq/issues/2963
75 qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets)
76 results[name][qubits] = flat_values
77 else:
78 assert len(results[name]) == 0, (
79 'Only one metric of a given name can have no targets. '
80 'Found multiple for key {}'.format(name))
81 results[name][()] = flat_values
82 return results
83
84 def __getitem__(self, key: str) -> Dict[Tuple['cirq.GridQubit', ...], Any]:
85 """Supports getting calibrations by index.
86
87 Calibration may be accessed by key:
88
89 `calibration['t1']`.
90
91 This returns a map from tuples of `cirq.GridQubit`s to a list of the
92 values of the metric. If there are no targets, the only key will only
93 be an empty tuple.
94 """
95 if not isinstance(key, str):
96 raise TypeError(
97 'Calibration metrics only have string keys. Key was {}'.format(
98 key))
99 if key not in self._metric_dict:
100 raise KeyError('Metric named {} not in calibration'.format(key))
101 return self._metric_dict[key]
102
103 def __iter__(self) -> Iterator:
104 return iter(self._metric_dict)
105
106 def __len__(self) -> int:
107 return len(self._metric_dict)
108
109 def __str__(self) -> str:
110
111 return 'Calibration(keys={})'.format(list(sorted(self.keys())))
112
113 def timestamp_str(self,
114 tz: Optional[datetime.tzinfo] = None,
115 timespec: str = 'auto') -> str:
116 """Return a string for the calibration timestamp.
117
118 Args:
119 tz: The timezone for the string. If None, the method uses the
120 platform's local date and time.
121 timespec: See datetime.isoformat for valid values.
122
123 Returns:
124 The string in ISO 8601 format YYYY-MM-DDTHH:MM:SS.ffffff.
125 """
126 dt = datetime.datetime.fromtimestamp(self.timestamp / 1000, tz)
127 dt += datetime.timedelta(microseconds=self.timestamp % 1000000)
128 return dt.isoformat(sep=' ', timespec=timespec)
129
130 def heatmap(self, key: str) -> vis.Heatmap:
131 """Return a heatmap for metrics that target single qubits.
132
133 Args:
134 key: The metric key to return a heatmap for.
135
136 Returns:
137 A `cirq.Heatmap` for the metric.
138
139 Raises:
140 AssertionError if the heatmap is not for single qubits or the metric
141 values are not single floats.
142 """
143 metrics = self[key]
144 assert all(len(k) == 1 for k in metrics.keys()), (
145 'Heatmaps are only supported if all the targets in a metric'
146 ' are single qubits.')
147 assert all(len(k) == 1 for k in metrics.values()), (
148 'Heatmaps are only supported if all the values in a metric'
149 ' are single metric values.')
150 value_map = {qubit: value for (qubit,), (value,) in metrics.items()}
151 return vis.Heatmap(value_map)
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq/google/engine/calibration.py b/cirq/google/engine/calibration.py
--- a/cirq/google/engine/calibration.py
+++ b/cirq/google/engine/calibration.py
@@ -67,12 +67,8 @@
getattr(v, v.WhichOneof('val')) for v in metric.values
]
if metric.targets:
- targets = [
- t[1:] if t.startswith('q') else t for t in metric.targets
- ]
- # TODO: Remove when calibrations don't prepend this.
- # Github issue: https://github.com/quantumlib/Cirq/issues/2963
- qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets)
+ qubits = tuple(
+ v2.grid_qubit_from_proto_id(t) for t in metric.targets)
results[name][qubits] = flat_values
else:
assert len(results[name]) == 0, (
| {"golden_diff": "diff --git a/cirq/google/engine/calibration.py b/cirq/google/engine/calibration.py\n--- a/cirq/google/engine/calibration.py\n+++ b/cirq/google/engine/calibration.py\n@@ -67,12 +67,8 @@\n getattr(v, v.WhichOneof('val')) for v in metric.values\n ]\n if metric.targets:\n- targets = [\n- t[1:] if t.startswith('q') else t for t in metric.targets\n- ]\n- # TODO: Remove when calibrations don't prepend this.\n- # Github issue: https://github.com/quantumlib/Cirq/issues/2963\n- qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets)\n+ qubits = tuple(\n+ v2.grid_qubit_from_proto_id(t) for t in metric.targets)\n results[name][qubits] = flat_values\n else:\n assert len(results[name]) == 0, (\n", "issue": "Update code for when Engine metric qubits are formatted differently\nThere is currently code in calibration.py that looks at qubit ids that start with `q` and removes this `q` before getting the grid qubit. If the API no longer returns `q` prefixed code this should be removed, otherwise if we are going to continue to support this we should add it to `grid_qubit_from_proto`.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Calibration wrapper for calibrations returned from the Quantum Engine.\"\"\"\n\nfrom collections import abc, defaultdict\nimport datetime\n\nfrom typing import Any, Dict, Iterator, Optional, Tuple, TYPE_CHECKING\n\nfrom cirq import devices, vis\nfrom cirq.google.api import v2\n\nif TYPE_CHECKING:\n import cirq\n\n\nclass Calibration(abc.Mapping):\n \"\"\"A convenience wrapper for calibrations that acts like a dictionary.\n\n Calibrations act as dictionaries whose keys are the names of the metric,\n and whose values are the metric values. The metric values themselves are\n represented as a dictionary. These metric value dictionaries have\n keys that are tuples of `cirq.GridQubit`s and values that are lists of the\n metric values for those qubits. If a metric acts globally and is attached\n to no specified number of qubits, the map will be from the empty tuple\n to the metrics values.\n\n Calibrations act just like a python dictionary. For example you can get\n a list of all of the metric names using\n\n `calibration.keys()`\n\n and query a single value by looking up the name by index:\n\n `calibration['t1']`\n\n Attributes:\n timestamp: The time that this calibration was run, in milliseconds since\n the epoch.\n \"\"\"\n\n def __init__(self, calibration: v2.metrics_pb2.MetricsSnapshot) -> None:\n self.timestamp = calibration.timestamp_ms\n self._metric_dict = self._compute_metric_dict(calibration.metrics)\n\n def _compute_metric_dict(\n self, metrics: v2.metrics_pb2.MetricsSnapshot\n ) -> Dict[str, Dict[Tuple['cirq.GridQubit', ...], Any]]:\n results: Dict[str, Dict[Tuple[devices.\n GridQubit, ...], Any]] = defaultdict(dict)\n for metric in metrics:\n name = metric.name\n # Flatten the values to a list, removing keys containing type names\n # (e.g. proto version of each value is {<type>: value}).\n flat_values = [\n getattr(v, v.WhichOneof('val')) for v in metric.values\n ]\n if metric.targets:\n targets = [\n t[1:] if t.startswith('q') else t for t in metric.targets\n ]\n # TODO: Remove when calibrations don't prepend this.\n # Github issue: https://github.com/quantumlib/Cirq/issues/2963\n qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets)\n results[name][qubits] = flat_values\n else:\n assert len(results[name]) == 0, (\n 'Only one metric of a given name can have no targets. '\n 'Found multiple for key {}'.format(name))\n results[name][()] = flat_values\n return results\n\n def __getitem__(self, key: str) -> Dict[Tuple['cirq.GridQubit', ...], Any]:\n \"\"\"Supports getting calibrations by index.\n\n Calibration may be accessed by key:\n\n `calibration['t1']`.\n\n This returns a map from tuples of `cirq.GridQubit`s to a list of the\n values of the metric. If there are no targets, the only key will only\n be an empty tuple.\n \"\"\"\n if not isinstance(key, str):\n raise TypeError(\n 'Calibration metrics only have string keys. Key was {}'.format(\n key))\n if key not in self._metric_dict:\n raise KeyError('Metric named {} not in calibration'.format(key))\n return self._metric_dict[key]\n\n def __iter__(self) -> Iterator:\n return iter(self._metric_dict)\n\n def __len__(self) -> int:\n return len(self._metric_dict)\n\n def __str__(self) -> str:\n\n return 'Calibration(keys={})'.format(list(sorted(self.keys())))\n\n def timestamp_str(self,\n tz: Optional[datetime.tzinfo] = None,\n timespec: str = 'auto') -> str:\n \"\"\"Return a string for the calibration timestamp.\n\n Args:\n tz: The timezone for the string. If None, the method uses the\n platform's local date and time.\n timespec: See datetime.isoformat for valid values.\n\n Returns:\n The string in ISO 8601 format YYYY-MM-DDTHH:MM:SS.ffffff.\n \"\"\"\n dt = datetime.datetime.fromtimestamp(self.timestamp / 1000, tz)\n dt += datetime.timedelta(microseconds=self.timestamp % 1000000)\n return dt.isoformat(sep=' ', timespec=timespec)\n\n def heatmap(self, key: str) -> vis.Heatmap:\n \"\"\"Return a heatmap for metrics that target single qubits.\n\n Args:\n key: The metric key to return a heatmap for.\n\n Returns:\n A `cirq.Heatmap` for the metric.\n\n Raises:\n AssertionError if the heatmap is not for single qubits or the metric\n values are not single floats.\n \"\"\"\n metrics = self[key]\n assert all(len(k) == 1 for k in metrics.keys()), (\n 'Heatmaps are only supported if all the targets in a metric'\n ' are single qubits.')\n assert all(len(k) == 1 for k in metrics.values()), (\n 'Heatmaps are only supported if all the values in a metric'\n ' are single metric values.')\n value_map = {qubit: value for (qubit,), (value,) in metrics.items()}\n return vis.Heatmap(value_map)\n", "path": "cirq/google/engine/calibration.py"}], "after_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Calibration wrapper for calibrations returned from the Quantum Engine.\"\"\"\n\nfrom collections import abc, defaultdict\nimport datetime\n\nfrom typing import Any, Dict, Iterator, Optional, Tuple, TYPE_CHECKING\n\nfrom cirq import devices, vis\nfrom cirq.google.api import v2\n\nif TYPE_CHECKING:\n import cirq\n\n\nclass Calibration(abc.Mapping):\n \"\"\"A convenience wrapper for calibrations that acts like a dictionary.\n\n Calibrations act as dictionaries whose keys are the names of the metric,\n and whose values are the metric values. The metric values themselves are\n represented as a dictionary. These metric value dictionaries have\n keys that are tuples of `cirq.GridQubit`s and values that are lists of the\n metric values for those qubits. If a metric acts globally and is attached\n to no specified number of qubits, the map will be from the empty tuple\n to the metrics values.\n\n Calibrations act just like a python dictionary. For example you can get\n a list of all of the metric names using\n\n `calibration.keys()`\n\n and query a single value by looking up the name by index:\n\n `calibration['t1']`\n\n Attributes:\n timestamp: The time that this calibration was run, in milliseconds since\n the epoch.\n \"\"\"\n\n def __init__(self, calibration: v2.metrics_pb2.MetricsSnapshot) -> None:\n self.timestamp = calibration.timestamp_ms\n self._metric_dict = self._compute_metric_dict(calibration.metrics)\n\n def _compute_metric_dict(\n self, metrics: v2.metrics_pb2.MetricsSnapshot\n ) -> Dict[str, Dict[Tuple['cirq.GridQubit', ...], Any]]:\n results: Dict[str, Dict[Tuple[devices.\n GridQubit, ...], Any]] = defaultdict(dict)\n for metric in metrics:\n name = metric.name\n # Flatten the values to a list, removing keys containing type names\n # (e.g. proto version of each value is {<type>: value}).\n flat_values = [\n getattr(v, v.WhichOneof('val')) for v in metric.values\n ]\n if metric.targets:\n qubits = tuple(\n v2.grid_qubit_from_proto_id(t) for t in metric.targets)\n results[name][qubits] = flat_values\n else:\n assert len(results[name]) == 0, (\n 'Only one metric of a given name can have no targets. '\n 'Found multiple for key {}'.format(name))\n results[name][()] = flat_values\n return results\n\n def __getitem__(self, key: str) -> Dict[Tuple['cirq.GridQubit', ...], Any]:\n \"\"\"Supports getting calibrations by index.\n\n Calibration may be accessed by key:\n\n `calibration['t1']`.\n\n This returns a map from tuples of `cirq.GridQubit`s to a list of the\n values of the metric. If there are no targets, the only key will only\n be an empty tuple.\n \"\"\"\n if not isinstance(key, str):\n raise TypeError(\n 'Calibration metrics only have string keys. Key was {}'.format(\n key))\n if key not in self._metric_dict:\n raise KeyError('Metric named {} not in calibration'.format(key))\n return self._metric_dict[key]\n\n def __iter__(self) -> Iterator:\n return iter(self._metric_dict)\n\n def __len__(self) -> int:\n return len(self._metric_dict)\n\n def __str__(self) -> str:\n\n return 'Calibration(keys={})'.format(list(sorted(self.keys())))\n\n def timestamp_str(self,\n tz: Optional[datetime.tzinfo] = None,\n timespec: str = 'auto') -> str:\n \"\"\"Return a string for the calibration timestamp.\n\n Args:\n tz: The timezone for the string. If None, the method uses the\n platform's local date and time.\n timespec: See datetime.isoformat for valid values.\n\n Returns:\n The string in ISO 8601 format YYYY-MM-DDTHH:MM:SS.ffffff.\n \"\"\"\n dt = datetime.datetime.fromtimestamp(self.timestamp / 1000, tz)\n dt += datetime.timedelta(microseconds=self.timestamp % 1000000)\n return dt.isoformat(sep=' ', timespec=timespec)\n\n def heatmap(self, key: str) -> vis.Heatmap:\n \"\"\"Return a heatmap for metrics that target single qubits.\n\n Args:\n key: The metric key to return a heatmap for.\n\n Returns:\n A `cirq.Heatmap` for the metric.\n\n Raises:\n AssertionError if the heatmap is not for single qubits or the metric\n values are not single floats.\n \"\"\"\n metrics = self[key]\n assert all(len(k) == 1 for k in metrics.keys()), (\n 'Heatmaps are only supported if all the targets in a metric'\n ' are single qubits.')\n assert all(len(k) == 1 for k in metrics.values()), (\n 'Heatmaps are only supported if all the values in a metric'\n ' are single metric values.')\n value_map = {qubit: value for (qubit,), (value,) in metrics.items()}\n return vis.Heatmap(value_map)\n", "path": "cirq/google/engine/calibration.py"}]} | 2,045 | 215 |
gh_patches_debug_35158 | rasdani/github-patches | git_diff | python-discord__bot-527 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Restricting/redirecting output of commands to bot-commands for regular users
**Edit:** I've decided to make this a general issue, since the `!free` command turns out to be a bit disruptive as well. In python-discussions, there have just been four consecutive calls to `!free` and it looks very disruptive (see below).
My idea would be to create an easy way to redirect the output of certain commands, free and help, to bot commands for regular users, with the bot sending a temporary message to the user in channel redirecting them there. The bot could then delete that message (and, maybe, the invoking message as well) after a certain amount of time has passed.
**Four times !free in python-discussion:**

**Old message for context:**
Now the `!help` command works outside of bot-commands again, we may want to think about restricting the use/output to bot-commands for regular users. The help-embeds are quite sizeable and, therefore, quite disruptive in on-topic channels.
I want to propose to redirect the **output** of help (the help-embed) to bot-commands for non-staff members, prepended by a mention of the user invoking the command. In addition, we could display a small, non-disruptive embed in the original context channel redirecting the user to bot commands. I think this should also be the case for help-calls on specific commands, as the embeds for those can be quite sizeable as well (e.g., `!help site`).
Personally, I'd like this restriction to only apply to regular users, so staff can pull up help on a command quickly in channel and use help in the staff channels.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/free.py`
Content:
```
1 import logging
2 from datetime import datetime
3 from operator import itemgetter
4
5 from discord import Colour, Embed, Member, utils
6 from discord.ext.commands import Bot, Cog, Context, command
7
8 from bot.constants import Categories, Channels, Free, STAFF_ROLES
9 from bot.decorators import redirect_output
10
11 log = logging.getLogger(__name__)
12
13 TIMEOUT = Free.activity_timeout
14 RATE = Free.cooldown_rate
15 PER = Free.cooldown_per
16
17
18 class Free(Cog):
19 """Tries to figure out which help channels are free."""
20
21 PYTHON_HELP_ID = Categories.python_help
22
23 @command(name="free", aliases=('f',))
24 @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES)
25 async def free(self, ctx: Context, user: Member = None, seek: int = 2) -> None:
26 """
27 Lists free help channels by likeliness of availability.
28
29 seek is used only when this command is invoked in a help channel.
30 You cannot override seek without mentioning a user first.
31
32 When seek is 2, we are avoiding considering the last active message
33 in a channel to be the one that invoked this command.
34
35 When seek is 3 or more, a user has been mentioned on the assumption
36 that they asked if the channel is free or they asked their question
37 in an active channel, and we want the message before that happened.
38 """
39 free_channels = []
40 python_help = utils.get(ctx.guild.categories, id=self.PYTHON_HELP_ID)
41
42 if user is not None and seek == 2:
43 seek = 3
44 elif not 0 < seek < 10:
45 seek = 3
46
47 # Iterate through all the help channels
48 # to check latest activity
49 for channel in python_help.channels:
50 # Seek further back in the help channel
51 # the command was invoked in
52 if channel.id == ctx.channel.id:
53 messages = await channel.history(limit=seek).flatten()
54 msg = messages[seek - 1]
55 # Otherwise get last message
56 else:
57 msg = await channel.history(limit=1).next() # noqa (False positive)
58
59 inactive = (datetime.utcnow() - msg.created_at).seconds
60 if inactive > TIMEOUT:
61 free_channels.append((inactive, channel))
62
63 embed = Embed()
64 embed.colour = Colour.blurple()
65 embed.title = "**Looking for a free help channel?**"
66
67 if user is not None:
68 embed.description = f"**Hey {user.mention}!**\n\n"
69 else:
70 embed.description = ""
71
72 # Display all potentially inactive channels
73 # in descending order of inactivity
74 if free_channels:
75 embed.description += "**The following channel{0} look{1} free:**\n\n**".format(
76 's' if len(free_channels) > 1 else '',
77 '' if len(free_channels) > 1 else 's'
78 )
79
80 # Sort channels in descending order by seconds
81 # Get position in list, inactivity, and channel object
82 # For each channel, add to embed.description
83 sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)
84 for i, (inactive, channel) in enumerate(sorted_channels, 1):
85 minutes, seconds = divmod(inactive, 60)
86 if minutes > 59:
87 hours, minutes = divmod(minutes, 60)
88 embed.description += f"{i}. {channel.mention} inactive for {hours}h{minutes}m{seconds}s\n\n"
89 else:
90 embed.description += f"{i}. {channel.mention} inactive for {minutes}m{seconds}s\n\n"
91
92 embed.description += ("**\nThese channels aren't guaranteed to be free, "
93 "so use your best judgement and check for yourself.")
94 else:
95 embed.description = ("**Doesn't look like any channels are available right now. "
96 "You're welcome to check for yourself to be sure. "
97 "If all channels are truly busy, please be patient "
98 "as one will likely be available soon.**")
99
100 await ctx.send(embed=embed)
101
102
103 def setup(bot: Bot) -> None:
104 """Free cog load."""
105 bot.add_cog(Free())
106 log.info("Cog loaded: Free")
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/cogs/free.py b/bot/cogs/free.py
--- a/bot/cogs/free.py
+++ b/bot/cogs/free.py
@@ -72,30 +72,27 @@
# Display all potentially inactive channels
# in descending order of inactivity
if free_channels:
- embed.description += "**The following channel{0} look{1} free:**\n\n**".format(
- 's' if len(free_channels) > 1 else '',
- '' if len(free_channels) > 1 else 's'
- )
-
# Sort channels in descending order by seconds
# Get position in list, inactivity, and channel object
# For each channel, add to embed.description
sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)
- for i, (inactive, channel) in enumerate(sorted_channels, 1):
+
+ for (inactive, channel) in sorted_channels[:3]:
minutes, seconds = divmod(inactive, 60)
if minutes > 59:
hours, minutes = divmod(minutes, 60)
- embed.description += f"{i}. {channel.mention} inactive for {hours}h{minutes}m{seconds}s\n\n"
+ embed.description += f"{channel.mention} **{hours}h {minutes}m {seconds}s** inactive\n"
else:
- embed.description += f"{i}. {channel.mention} inactive for {minutes}m{seconds}s\n\n"
+ embed.description += f"{channel.mention} **{minutes}m {seconds}s** inactive\n"
- embed.description += ("**\nThese channels aren't guaranteed to be free, "
- "so use your best judgement and check for yourself.")
+ embed.set_footer(text="Please confirm these channels are free before posting")
else:
- embed.description = ("**Doesn't look like any channels are available right now. "
- "You're welcome to check for yourself to be sure. "
- "If all channels are truly busy, please be patient "
- "as one will likely be available soon.**")
+ embed.description = (
+ "Doesn't look like any channels are available right now. "
+ "You're welcome to check for yourself to be sure. "
+ "If all channels are truly busy, please be patient "
+ "as one will likely be available soon."
+ )
await ctx.send(embed=embed)
| {"golden_diff": "diff --git a/bot/cogs/free.py b/bot/cogs/free.py\n--- a/bot/cogs/free.py\n+++ b/bot/cogs/free.py\n@@ -72,30 +72,27 @@\n # Display all potentially inactive channels\n # in descending order of inactivity\n if free_channels:\n- embed.description += \"**The following channel{0} look{1} free:**\\n\\n**\".format(\n- 's' if len(free_channels) > 1 else '',\n- '' if len(free_channels) > 1 else 's'\n- )\n-\n # Sort channels in descending order by seconds\n # Get position in list, inactivity, and channel object\n # For each channel, add to embed.description\n sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)\n- for i, (inactive, channel) in enumerate(sorted_channels, 1):\n+\n+ for (inactive, channel) in sorted_channels[:3]:\n minutes, seconds = divmod(inactive, 60)\n if minutes > 59:\n hours, minutes = divmod(minutes, 60)\n- embed.description += f\"{i}. {channel.mention} inactive for {hours}h{minutes}m{seconds}s\\n\\n\"\n+ embed.description += f\"{channel.mention} **{hours}h {minutes}m {seconds}s** inactive\\n\"\n else:\n- embed.description += f\"{i}. {channel.mention} inactive for {minutes}m{seconds}s\\n\\n\"\n+ embed.description += f\"{channel.mention} **{minutes}m {seconds}s** inactive\\n\"\n \n- embed.description += (\"**\\nThese channels aren't guaranteed to be free, \"\n- \"so use your best judgement and check for yourself.\")\n+ embed.set_footer(text=\"Please confirm these channels are free before posting\")\n else:\n- embed.description = (\"**Doesn't look like any channels are available right now. \"\n- \"You're welcome to check for yourself to be sure. \"\n- \"If all channels are truly busy, please be patient \"\n- \"as one will likely be available soon.**\")\n+ embed.description = (\n+ \"Doesn't look like any channels are available right now. \"\n+ \"You're welcome to check for yourself to be sure. \"\n+ \"If all channels are truly busy, please be patient \"\n+ \"as one will likely be available soon.\"\n+ )\n \n await ctx.send(embed=embed)\n", "issue": "Restricting/redirecting output of commands to bot-commands for regular users\n**Edit:** I've decided to make this a general issue, since the `!free` command turns out to be a bit disruptive as well. In python-discussions, there have just been four consecutive calls to `!free` and it looks very disruptive (see below).\r\n\r\nMy idea would be to create an easy way to redirect the output of certain commands, free and help, to bot commands for regular users, with the bot sending a temporary message to the user in channel redirecting them there. The bot could then delete that message (and, maybe, the invoking message as well) after a certain amount of time has passed.\r\n\r\n**Four times !free in python-discussion:**\r\n\r\n\r\n**Old message for context:**\r\nNow the `!help` command works outside of bot-commands again, we may want to think about restricting the use/output to bot-commands for regular users. The help-embeds are quite sizeable and, therefore, quite disruptive in on-topic channels.\r\n\r\nI want to propose to redirect the **output** of help (the help-embed) to bot-commands for non-staff members, prepended by a mention of the user invoking the command. In addition, we could display a small, non-disruptive embed in the original context channel redirecting the user to bot commands. I think this should also be the case for help-calls on specific commands, as the embeds for those can be quite sizeable as well (e.g., `!help site`).\r\n\r\nPersonally, I'd like this restriction to only apply to regular users, so staff can pull up help on a command quickly in channel and use help in the staff channels.\n", "before_files": [{"content": "import logging\nfrom datetime import datetime\nfrom operator import itemgetter\n\nfrom discord import Colour, Embed, Member, utils\nfrom discord.ext.commands import Bot, Cog, Context, command\n\nfrom bot.constants import Categories, Channels, Free, STAFF_ROLES\nfrom bot.decorators import redirect_output\n\nlog = logging.getLogger(__name__)\n\nTIMEOUT = Free.activity_timeout\nRATE = Free.cooldown_rate\nPER = Free.cooldown_per\n\n\nclass Free(Cog):\n \"\"\"Tries to figure out which help channels are free.\"\"\"\n\n PYTHON_HELP_ID = Categories.python_help\n\n @command(name=\"free\", aliases=('f',))\n @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES)\n async def free(self, ctx: Context, user: Member = None, seek: int = 2) -> None:\n \"\"\"\n Lists free help channels by likeliness of availability.\n\n seek is used only when this command is invoked in a help channel.\n You cannot override seek without mentioning a user first.\n\n When seek is 2, we are avoiding considering the last active message\n in a channel to be the one that invoked this command.\n\n When seek is 3 or more, a user has been mentioned on the assumption\n that they asked if the channel is free or they asked their question\n in an active channel, and we want the message before that happened.\n \"\"\"\n free_channels = []\n python_help = utils.get(ctx.guild.categories, id=self.PYTHON_HELP_ID)\n\n if user is not None and seek == 2:\n seek = 3\n elif not 0 < seek < 10:\n seek = 3\n\n # Iterate through all the help channels\n # to check latest activity\n for channel in python_help.channels:\n # Seek further back in the help channel\n # the command was invoked in\n if channel.id == ctx.channel.id:\n messages = await channel.history(limit=seek).flatten()\n msg = messages[seek - 1]\n # Otherwise get last message\n else:\n msg = await channel.history(limit=1).next() # noqa (False positive)\n\n inactive = (datetime.utcnow() - msg.created_at).seconds\n if inactive > TIMEOUT:\n free_channels.append((inactive, channel))\n\n embed = Embed()\n embed.colour = Colour.blurple()\n embed.title = \"**Looking for a free help channel?**\"\n\n if user is not None:\n embed.description = f\"**Hey {user.mention}!**\\n\\n\"\n else:\n embed.description = \"\"\n\n # Display all potentially inactive channels\n # in descending order of inactivity\n if free_channels:\n embed.description += \"**The following channel{0} look{1} free:**\\n\\n**\".format(\n 's' if len(free_channels) > 1 else '',\n '' if len(free_channels) > 1 else 's'\n )\n\n # Sort channels in descending order by seconds\n # Get position in list, inactivity, and channel object\n # For each channel, add to embed.description\n sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)\n for i, (inactive, channel) in enumerate(sorted_channels, 1):\n minutes, seconds = divmod(inactive, 60)\n if minutes > 59:\n hours, minutes = divmod(minutes, 60)\n embed.description += f\"{i}. {channel.mention} inactive for {hours}h{minutes}m{seconds}s\\n\\n\"\n else:\n embed.description += f\"{i}. {channel.mention} inactive for {minutes}m{seconds}s\\n\\n\"\n\n embed.description += (\"**\\nThese channels aren't guaranteed to be free, \"\n \"so use your best judgement and check for yourself.\")\n else:\n embed.description = (\"**Doesn't look like any channels are available right now. \"\n \"You're welcome to check for yourself to be sure. \"\n \"If all channels are truly busy, please be patient \"\n \"as one will likely be available soon.**\")\n\n await ctx.send(embed=embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Free cog load.\"\"\"\n bot.add_cog(Free())\n log.info(\"Cog loaded: Free\")\n", "path": "bot/cogs/free.py"}], "after_files": [{"content": "import logging\nfrom datetime import datetime\nfrom operator import itemgetter\n\nfrom discord import Colour, Embed, Member, utils\nfrom discord.ext.commands import Bot, Cog, Context, command\n\nfrom bot.constants import Categories, Channels, Free, STAFF_ROLES\nfrom bot.decorators import redirect_output\n\nlog = logging.getLogger(__name__)\n\nTIMEOUT = Free.activity_timeout\nRATE = Free.cooldown_rate\nPER = Free.cooldown_per\n\n\nclass Free(Cog):\n \"\"\"Tries to figure out which help channels are free.\"\"\"\n\n PYTHON_HELP_ID = Categories.python_help\n\n @command(name=\"free\", aliases=('f',))\n @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES)\n async def free(self, ctx: Context, user: Member = None, seek: int = 2) -> None:\n \"\"\"\n Lists free help channels by likeliness of availability.\n\n seek is used only when this command is invoked in a help channel.\n You cannot override seek without mentioning a user first.\n\n When seek is 2, we are avoiding considering the last active message\n in a channel to be the one that invoked this command.\n\n When seek is 3 or more, a user has been mentioned on the assumption\n that they asked if the channel is free or they asked their question\n in an active channel, and we want the message before that happened.\n \"\"\"\n free_channels = []\n python_help = utils.get(ctx.guild.categories, id=self.PYTHON_HELP_ID)\n\n if user is not None and seek == 2:\n seek = 3\n elif not 0 < seek < 10:\n seek = 3\n\n # Iterate through all the help channels\n # to check latest activity\n for channel in python_help.channels:\n # Seek further back in the help channel\n # the command was invoked in\n if channel.id == ctx.channel.id:\n messages = await channel.history(limit=seek).flatten()\n msg = messages[seek - 1]\n # Otherwise get last message\n else:\n msg = await channel.history(limit=1).next() # noqa (False positive)\n\n inactive = (datetime.utcnow() - msg.created_at).seconds\n if inactive > TIMEOUT:\n free_channels.append((inactive, channel))\n\n embed = Embed()\n embed.colour = Colour.blurple()\n embed.title = \"**Looking for a free help channel?**\"\n\n if user is not None:\n embed.description = f\"**Hey {user.mention}!**\\n\\n\"\n else:\n embed.description = \"\"\n\n # Display all potentially inactive channels\n # in descending order of inactivity\n if free_channels:\n # Sort channels in descending order by seconds\n # Get position in list, inactivity, and channel object\n # For each channel, add to embed.description\n sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)\n\n for (inactive, channel) in sorted_channels[:3]:\n minutes, seconds = divmod(inactive, 60)\n if minutes > 59:\n hours, minutes = divmod(minutes, 60)\n embed.description += f\"{channel.mention} **{hours}h {minutes}m {seconds}s** inactive\\n\"\n else:\n embed.description += f\"{channel.mention} **{minutes}m {seconds}s** inactive\\n\"\n\n embed.set_footer(text=\"Please confirm these channels are free before posting\")\n else:\n embed.description = (\n \"Doesn't look like any channels are available right now. \"\n \"You're welcome to check for yourself to be sure. \"\n \"If all channels are truly busy, please be patient \"\n \"as one will likely be available soon.\"\n )\n\n await ctx.send(embed=embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Free cog load.\"\"\"\n bot.add_cog(Free())\n log.info(\"Cog loaded: Free\")\n", "path": "bot/cogs/free.py"}]} | 1,845 | 551 |
gh_patches_debug_11955 | rasdani/github-patches | git_diff | MongoEngine__mongoengine-1430 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop Python 2.6 support
For the most relevant discussion about the topic, see #1294.
Plan:
1. In the upcoming minor release, I'm going to include `warnings.warn(msg, DeprecationWarning)`. with the message saying "Python v2.6 support is deprecated and is going to be dropped entirely in the upcoming v0.11.0 release. Update your Python version if you want to have access to the latest features and bug fixes in MongoEngine."
2. In v0.11.0 (most likely shipped with #1428), I'll update the way we do dict comprehensions and other relics of the past, thus making it truly incompatible with v2.6.
Cc @lafrech @gukoff
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mongoengine/python_support.py`
Content:
```
1 """Helper functions and types to aid with Python 2.5 - 3 support."""
2
3 import sys
4 import pymongo
5
6
7 if pymongo.version_tuple[0] < 3:
8 IS_PYMONGO_3 = False
9 else:
10 IS_PYMONGO_3 = True
11
12 PY3 = sys.version_info[0] == 3
13
14 if PY3:
15 import codecs
16 from io import BytesIO as StringIO
17
18 # return s converted to binary. b('test') should be equivalent to b'test'
19 def b(s):
20 return codecs.latin_1_encode(s)[0]
21
22 bin_type = bytes
23 txt_type = str
24 else:
25 try:
26 from cStringIO import StringIO
27 except ImportError:
28 from StringIO import StringIO
29
30 # Conversion to binary only necessary in Python 3
31 def b(s):
32 return s
33
34 bin_type = str
35 txt_type = unicode
36
37 str_types = (bin_type, txt_type)
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mongoengine/python_support.py b/mongoengine/python_support.py
--- a/mongoengine/python_support.py
+++ b/mongoengine/python_support.py
@@ -1,9 +1,22 @@
-"""Helper functions and types to aid with Python 2.5 - 3 support."""
+"""Helper functions and types to aid with Python 2.6 - 3 support."""
import sys
+import warnings
+
import pymongo
+# Show a deprecation warning for people using Python v2.6
+# TODO remove in mongoengine v0.11.0
+if sys.version_info[0] == 2 and sys.version_info[1] == 6:
+ warnings.warn(
+ 'Python v2.6 support is deprecated and is going to be dropped '
+ 'entirely in the upcoming v0.11.0 release. Update your Python '
+ 'version if you want to have access to the latest features and '
+ 'bug fixes in MongoEngine.',
+ DeprecationWarning
+ )
+
if pymongo.version_tuple[0] < 3:
IS_PYMONGO_3 = False
else:
| {"golden_diff": "diff --git a/mongoengine/python_support.py b/mongoengine/python_support.py\n--- a/mongoengine/python_support.py\n+++ b/mongoengine/python_support.py\n@@ -1,9 +1,22 @@\n-\"\"\"Helper functions and types to aid with Python 2.5 - 3 support.\"\"\"\n+\"\"\"Helper functions and types to aid with Python 2.6 - 3 support.\"\"\"\n \n import sys\n+import warnings\n+\n import pymongo\n \n \n+# Show a deprecation warning for people using Python v2.6\n+# TODO remove in mongoengine v0.11.0\n+if sys.version_info[0] == 2 and sys.version_info[1] == 6:\n+ warnings.warn(\n+ 'Python v2.6 support is deprecated and is going to be dropped '\n+ 'entirely in the upcoming v0.11.0 release. Update your Python '\n+ 'version if you want to have access to the latest features and '\n+ 'bug fixes in MongoEngine.',\n+ DeprecationWarning\n+ )\n+\n if pymongo.version_tuple[0] < 3:\n IS_PYMONGO_3 = False\n else:\n", "issue": "Drop Python 2.6 support\nFor the most relevant discussion about the topic, see #1294.\r\n\r\nPlan:\r\n1. In the upcoming minor release, I'm going to include `warnings.warn(msg, DeprecationWarning)`. with the message saying \"Python v2.6 support is deprecated and is going to be dropped entirely in the upcoming v0.11.0 release. Update your Python version if you want to have access to the latest features and bug fixes in MongoEngine.\"\r\n2. In v0.11.0 (most likely shipped with #1428), I'll update the way we do dict comprehensions and other relics of the past, thus making it truly incompatible with v2.6.\r\n\r\nCc @lafrech @gukoff \n", "before_files": [{"content": "\"\"\"Helper functions and types to aid with Python 2.5 - 3 support.\"\"\"\n\nimport sys\nimport pymongo\n\n\nif pymongo.version_tuple[0] < 3:\n IS_PYMONGO_3 = False\nelse:\n IS_PYMONGO_3 = True\n\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n import codecs\n from io import BytesIO as StringIO\n\n # return s converted to binary. b('test') should be equivalent to b'test'\n def b(s):\n return codecs.latin_1_encode(s)[0]\n\n bin_type = bytes\n txt_type = str\nelse:\n try:\n from cStringIO import StringIO\n except ImportError:\n from StringIO import StringIO\n\n # Conversion to binary only necessary in Python 3\n def b(s):\n return s\n\n bin_type = str\n txt_type = unicode\n\nstr_types = (bin_type, txt_type)\n", "path": "mongoengine/python_support.py"}], "after_files": [{"content": "\"\"\"Helper functions and types to aid with Python 2.6 - 3 support.\"\"\"\n\nimport sys\nimport warnings\n\nimport pymongo\n\n\n# Show a deprecation warning for people using Python v2.6\n# TODO remove in mongoengine v0.11.0\nif sys.version_info[0] == 2 and sys.version_info[1] == 6:\n warnings.warn(\n 'Python v2.6 support is deprecated and is going to be dropped '\n 'entirely in the upcoming v0.11.0 release. Update your Python '\n 'version if you want to have access to the latest features and '\n 'bug fixes in MongoEngine.',\n DeprecationWarning\n )\n\nif pymongo.version_tuple[0] < 3:\n IS_PYMONGO_3 = False\nelse:\n IS_PYMONGO_3 = True\n\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n import codecs\n from io import BytesIO as StringIO\n\n # return s converted to binary. b('test') should be equivalent to b'test'\n def b(s):\n return codecs.latin_1_encode(s)[0]\n\n bin_type = bytes\n txt_type = str\nelse:\n try:\n from cStringIO import StringIO\n except ImportError:\n from StringIO import StringIO\n\n # Conversion to binary only necessary in Python 3\n def b(s):\n return s\n\n bin_type = str\n txt_type = unicode\n\nstr_types = (bin_type, txt_type)\n", "path": "mongoengine/python_support.py"}]} | 702 | 253 |
gh_patches_debug_11989 | rasdani/github-patches | git_diff | sagemath__sage-36173 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unoptimal memory complexity of `sage.matrix.berlekamp`
The code here is unoptimal:
https://github.com/sagemath/sage/blob/6695becb762aebab78ef47d0fb12eae52be5d79d/src/sage/matrix/berlekamp_massey.py#L90-L98
For example, the following code uses a lot of memory:
```python
sage: from sage.matrix.berlekamp_massey import berlekamp_massey
sage: p = next_prime(2**64)
sage: ls = [GF(p).random_element() for _ in range(20000)]
sage: berlekamp_massey(ls);
```
To be more specific, the dictionaries are not necessarily and only `f[j - 2]` and `f[j - 1]` are used every time, same for `s`. So they can be stored as temporary variables.
### Additional Information
I am fixing it.
### Checklist
- [X] I have searched the existing issues for a bug report that matches the one I want to file, without success.
- [X] I have read the documentation and troubleshoot guide
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sage/matrix/berlekamp_massey.py`
Content:
```
1 """
2 Minimal Polynomials of Linear Recurrence Sequences
3
4 AUTHORS:
5
6 - William Stein
7 """
8 # ****************************************************************************
9 # Copyright (C) 2005 William Stein <[email protected]>
10 #
11 # Distributed under the terms of the GNU General Public License (GPL)
12 #
13 # This code is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 # General Public License for more details.
17 #
18 # The full text of the GPL is available at:
19 #
20 # https://www.gnu.org/licenses/
21 # ****************************************************************************
22
23 import sage.rings.rational_field
24
25
26 def berlekamp_massey(a):
27 r"""
28 Use the Berlekamp-Massey algorithm to find the minimal polynomial
29 of a linear recurrence sequence `a`.
30
31 The minimal polynomial of a linear recurrence `\{a_r\}` is
32 by definition the unique monic polynomial `g`, such that if
33 `\{a_r\}` satisfies a linear recurrence
34 `a_{j+k} + b_{j-1} a_{j-1+k} + \cdots + b_0 a_k=0`
35 (for all `k\geq 0`), then `g` divides the
36 polynomial `x^j + \sum_{i=0}^{j-1} b_i x^i`.
37
38 INPUT:
39
40 - ``a`` -- a list of even length of elements of a field (or domain)
41
42 OUTPUT:
43
44 the minimal polynomial of the sequence, as a polynomial over the
45 field in which the entries of `a` live
46
47 .. WARNING::
48
49 The result is only guaranteed to be correct on the full
50 sequence if there exists a linear recurrence of length less
51 than half the length of `a`.
52
53 EXAMPLES::
54
55 sage: from sage.matrix.berlekamp_massey import berlekamp_massey
56 sage: berlekamp_massey([1,2,1,2,1,2])
57 x^2 - 1
58 sage: berlekamp_massey([GF(7)(1), 19, 1, 19])
59 x^2 + 6
60 sage: berlekamp_massey([2,2,1,2,1,191,393,132])
61 x^4 - 36727/11711*x^3 + 34213/5019*x^2 + 7024942/35133*x - 335813/1673
62 sage: berlekamp_massey(prime_range(2, 38)) # needs sage.libs.pari
63 x^6 - 14/9*x^5 - 7/9*x^4 + 157/54*x^3 - 25/27*x^2 - 73/18*x + 37/9
64
65 TESTS::
66
67 sage: berlekamp_massey("banana")
68 Traceback (most recent call last):
69 ...
70 TypeError: argument must be a list or tuple
71 sage: berlekamp_massey([1,2,5])
72 Traceback (most recent call last):
73 ...
74 ValueError: argument must have an even number of terms
75 """
76 if not isinstance(a, (list, tuple)):
77 raise TypeError("argument must be a list or tuple")
78 if len(a) % 2:
79 raise ValueError("argument must have an even number of terms")
80
81 M = len(a) // 2
82
83 try:
84 K = a[0].parent().fraction_field()
85 except AttributeError:
86 K = sage.rings.rational_field.RationalField()
87 R = K['x']
88 x = R.gen()
89
90 f = {-1: R(a), 0: x**(2 * M)}
91 s = {-1: 1, 0: 0}
92 j = 0
93 while f[j].degree() >= M:
94 j += 1
95 qj, f[j] = f[j - 2].quo_rem(f[j - 1])
96 s[j] = s[j - 2] - qj * s[j - 1]
97 t = s[j].reverse()
98 return ~(t[t.degree()]) * t # make monic (~ is inverse in python)
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sage/matrix/berlekamp_massey.py b/src/sage/matrix/berlekamp_massey.py
--- a/src/sage/matrix/berlekamp_massey.py
+++ b/src/sage/matrix/berlekamp_massey.py
@@ -84,15 +84,11 @@
K = a[0].parent().fraction_field()
except AttributeError:
K = sage.rings.rational_field.RationalField()
- R = K['x']
- x = R.gen()
-
- f = {-1: R(a), 0: x**(2 * M)}
- s = {-1: 1, 0: 0}
- j = 0
- while f[j].degree() >= M:
- j += 1
- qj, f[j] = f[j - 2].quo_rem(f[j - 1])
- s[j] = s[j - 2] - qj * s[j - 1]
- t = s[j].reverse()
- return ~(t[t.degree()]) * t # make monic (~ is inverse in python)
+
+ R, x = K['x'].objgen()
+ f0, f1 = R(a), x**(2 * M)
+ s0, s1 = 1, 0
+ while f1.degree() >= M:
+ f0, (q, f1) = f1, f0.quo_rem(f1)
+ s0, s1 = s1, s0 - q * s1
+ return s1.reverse().monic()
| {"golden_diff": "diff --git a/src/sage/matrix/berlekamp_massey.py b/src/sage/matrix/berlekamp_massey.py\n--- a/src/sage/matrix/berlekamp_massey.py\n+++ b/src/sage/matrix/berlekamp_massey.py\n@@ -84,15 +84,11 @@\n K = a[0].parent().fraction_field()\n except AttributeError:\n K = sage.rings.rational_field.RationalField()\n- R = K['x']\n- x = R.gen()\n-\n- f = {-1: R(a), 0: x**(2 * M)}\n- s = {-1: 1, 0: 0}\n- j = 0\n- while f[j].degree() >= M:\n- j += 1\n- qj, f[j] = f[j - 2].quo_rem(f[j - 1])\n- s[j] = s[j - 2] - qj * s[j - 1]\n- t = s[j].reverse()\n- return ~(t[t.degree()]) * t # make monic (~ is inverse in python)\n+\n+ R, x = K['x'].objgen()\n+ f0, f1 = R(a), x**(2 * M)\n+ s0, s1 = 1, 0\n+ while f1.degree() >= M:\n+ f0, (q, f1) = f1, f0.quo_rem(f1)\n+ s0, s1 = s1, s0 - q * s1\n+ return s1.reverse().monic()\n", "issue": "Unoptimal memory complexity of `sage.matrix.berlekamp`\nThe code here is unoptimal:\r\n\r\nhttps://github.com/sagemath/sage/blob/6695becb762aebab78ef47d0fb12eae52be5d79d/src/sage/matrix/berlekamp_massey.py#L90-L98\r\n\r\nFor example, the following code uses a lot of memory:\r\n\r\n```python\r\nsage: from sage.matrix.berlekamp_massey import berlekamp_massey\r\nsage: p = next_prime(2**64)\r\nsage: ls = [GF(p).random_element() for _ in range(20000)]\r\nsage: berlekamp_massey(ls);\r\n```\r\n\r\nTo be more specific, the dictionaries are not necessarily and only `f[j - 2]` and `f[j - 1]` are used every time, same for `s`. So they can be stored as temporary variables.\r\n\r\n### Additional Information\r\n\r\nI am fixing it.\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the existing issues for a bug report that matches the one I want to file, without success.\r\n- [X] I have read the documentation and troubleshoot guide\n", "before_files": [{"content": "\"\"\"\nMinimal Polynomials of Linear Recurrence Sequences\n\nAUTHORS:\n\n- William Stein\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2005 William Stein <[email protected]>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# The full text of the GPL is available at:\n#\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nimport sage.rings.rational_field\n\n\ndef berlekamp_massey(a):\n r\"\"\"\n Use the Berlekamp-Massey algorithm to find the minimal polynomial\n of a linear recurrence sequence `a`.\n\n The minimal polynomial of a linear recurrence `\\{a_r\\}` is\n by definition the unique monic polynomial `g`, such that if\n `\\{a_r\\}` satisfies a linear recurrence\n `a_{j+k} + b_{j-1} a_{j-1+k} + \\cdots + b_0 a_k=0`\n (for all `k\\geq 0`), then `g` divides the\n polynomial `x^j + \\sum_{i=0}^{j-1} b_i x^i`.\n\n INPUT:\n\n - ``a`` -- a list of even length of elements of a field (or domain)\n\n OUTPUT:\n\n the minimal polynomial of the sequence, as a polynomial over the\n field in which the entries of `a` live\n\n .. WARNING::\n\n The result is only guaranteed to be correct on the full\n sequence if there exists a linear recurrence of length less\n than half the length of `a`.\n\n EXAMPLES::\n\n sage: from sage.matrix.berlekamp_massey import berlekamp_massey\n sage: berlekamp_massey([1,2,1,2,1,2])\n x^2 - 1\n sage: berlekamp_massey([GF(7)(1), 19, 1, 19])\n x^2 + 6\n sage: berlekamp_massey([2,2,1,2,1,191,393,132])\n x^4 - 36727/11711*x^3 + 34213/5019*x^2 + 7024942/35133*x - 335813/1673\n sage: berlekamp_massey(prime_range(2, 38)) # needs sage.libs.pari\n x^6 - 14/9*x^5 - 7/9*x^4 + 157/54*x^3 - 25/27*x^2 - 73/18*x + 37/9\n\n TESTS::\n\n sage: berlekamp_massey(\"banana\")\n Traceback (most recent call last):\n ...\n TypeError: argument must be a list or tuple\n sage: berlekamp_massey([1,2,5])\n Traceback (most recent call last):\n ...\n ValueError: argument must have an even number of terms\n \"\"\"\n if not isinstance(a, (list, tuple)):\n raise TypeError(\"argument must be a list or tuple\")\n if len(a) % 2:\n raise ValueError(\"argument must have an even number of terms\")\n\n M = len(a) // 2\n\n try:\n K = a[0].parent().fraction_field()\n except AttributeError:\n K = sage.rings.rational_field.RationalField()\n R = K['x']\n x = R.gen()\n\n f = {-1: R(a), 0: x**(2 * M)}\n s = {-1: 1, 0: 0}\n j = 0\n while f[j].degree() >= M:\n j += 1\n qj, f[j] = f[j - 2].quo_rem(f[j - 1])\n s[j] = s[j - 2] - qj * s[j - 1]\n t = s[j].reverse()\n return ~(t[t.degree()]) * t # make monic (~ is inverse in python)\n", "path": "src/sage/matrix/berlekamp_massey.py"}], "after_files": [{"content": "\"\"\"\nMinimal Polynomials of Linear Recurrence Sequences\n\nAUTHORS:\n\n- William Stein\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2005 William Stein <[email protected]>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# The full text of the GPL is available at:\n#\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nimport sage.rings.rational_field\n\n\ndef berlekamp_massey(a):\n r\"\"\"\n Use the Berlekamp-Massey algorithm to find the minimal polynomial\n of a linear recurrence sequence `a`.\n\n The minimal polynomial of a linear recurrence `\\{a_r\\}` is\n by definition the unique monic polynomial `g`, such that if\n `\\{a_r\\}` satisfies a linear recurrence\n `a_{j+k} + b_{j-1} a_{j-1+k} + \\cdots + b_0 a_k=0`\n (for all `k\\geq 0`), then `g` divides the\n polynomial `x^j + \\sum_{i=0}^{j-1} b_i x^i`.\n\n INPUT:\n\n - ``a`` -- a list of even length of elements of a field (or domain)\n\n OUTPUT:\n\n the minimal polynomial of the sequence, as a polynomial over the\n field in which the entries of `a` live\n\n .. WARNING::\n\n The result is only guaranteed to be correct on the full\n sequence if there exists a linear recurrence of length less\n than half the length of `a`.\n\n EXAMPLES::\n\n sage: from sage.matrix.berlekamp_massey import berlekamp_massey\n sage: berlekamp_massey([1,2,1,2,1,2])\n x^2 - 1\n sage: berlekamp_massey([GF(7)(1),19,1,19])\n x^2 + 6\n sage: berlekamp_massey([2,2,1,2,1,191,393,132])\n x^4 - 36727/11711*x^3 + 34213/5019*x^2 + 7024942/35133*x - 335813/1673\n sage: berlekamp_massey(prime_range(2,38))\n x^6 - 14/9*x^5 - 7/9*x^4 + 157/54*x^3 - 25/27*x^2 - 73/18*x + 37/9\n\n TESTS::\n\n sage: berlekamp_massey(\"banana\")\n Traceback (most recent call last):\n ...\n TypeError: argument must be a list or tuple\n sage: berlekamp_massey([1,2,5])\n Traceback (most recent call last):\n ...\n ValueError: argument must have an even number of terms\n \"\"\"\n if not isinstance(a, (list, tuple)):\n raise TypeError(\"argument must be a list or tuple\")\n if len(a) % 2:\n raise ValueError(\"argument must have an even number of terms\")\n\n M = len(a) // 2\n\n try:\n K = a[0].parent().fraction_field()\n except AttributeError:\n K = sage.rings.rational_field.RationalField()\n\n R, x = K['x'].objgen()\n f0, f1 = R(a), x**(2 * M)\n s0, s1 = 1, 0\n while f1.degree() >= M:\n f0, (q, f1) = f1, f0.quo_rem(f1)\n s0, s1 = s1, s0 - q * s1\n return s1.reverse().monic()\n", "path": "src/sage/matrix/berlekamp_massey.py"}]} | 1,705 | 362 |
gh_patches_debug_2024 | rasdani/github-patches | git_diff | facebookresearch__fairseq-214 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Size Mismatch in AdaptiveSoftmax when targets are not specified
Following up on #212 , I'm updating `sequence_generator.py` to generate text from a pre-trained language model (initially trained with adaptive softmax). When computing log probabilities, and the targets are set to none, I receive a size mismatch exception in the line below, possibly because the dictionary size is smaller than the adaptive softmax cut-off:
https://github.com/pytorch/fairseq/blob/388c520be21752cacb9fe3b1712038f32e0e9a5f/fairseq/modules/adaptive_softmax.py#L126
I imagine this could be solved by some sort of truncation to the output of tail[i].input
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fairseq/modules/adaptive_softmax.py`
Content:
```
1 # Copyright (c) 2017-present, Facebook, Inc.
2 # All rights reserved.
3 #
4 # This source code is licensed under the license found in the LICENSE file in
5 # the root directory of this source tree. An additional grant of patent rights
6 # can be found in the PATENTS file in the same directory.
7
8
9 import torch.nn.functional as F
10 from torch import nn
11
12
13 class AdaptiveSoftmax(nn.Module):
14 """
15 This is an implementation of the efficient softmax approximation for
16 graphical processing units (GPU), described in the paper "Efficient softmax
17 approximation for GPUs" (http://arxiv.org/abs/1609.04309).
18 """
19
20 def __init__(self, vocab_size, input_dim, cutoff, dropout):
21 super().__init__()
22
23 if vocab_size > cutoff[-1]:
24 cutoff = cutoff + [vocab_size]
25
26 output_dim = cutoff[0] + len(cutoff) - 1
27
28 self.vocab_size = vocab_size
29 self.cutoff = cutoff
30 self.dropout = dropout
31
32 self.lsm = nn.LogSoftmax(dim=1)
33 self.head = nn.Linear(input_dim, output_dim, bias=False)
34 self.tail = nn.ModuleList()
35
36 for i in range(len(cutoff) - 1):
37 self.tail.append(
38 nn.Sequential(
39 nn.Linear(input_dim, input_dim // 4 ** i, bias=False),
40 nn.Dropout(dropout),
41 nn.Linear(input_dim // 4 ** i, cutoff[i + 1] - cutoff[i], bias=False)
42 )
43 )
44
45 def init_weights(m):
46 if hasattr(m, 'weight'):
47 nn.init.xavier_uniform_(m.weight)
48
49 self.apply(init_weights)
50
51 def adapt_target(self, target):
52 """
53 In order to be efficient, the AdaptiveSoftMax does not compute the
54 scores for all the word of the vocabulary for all the examples. It is
55 thus necessary to call the method adapt_target of the AdaptiveSoftMax
56 layer inside each forward pass.
57 """
58
59 target = target.view(-1)
60 new_target = [target.clone()]
61 target_idxs = []
62
63 for i in range(len(self.cutoff) - 1):
64 mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))
65 new_target[0][mask] = self.cutoff[0] + i - 1
66
67 if mask.any():
68 target_idxs.append(mask.nonzero().squeeze(1))
69 new_target.append(target[mask].add(-self.cutoff[i]))
70 else:
71 target_idxs.append(None)
72 new_target.append(None)
73
74 return new_target, target_idxs
75
76 def forward(self, input, target):
77 """
78 Args:
79 input: (b x t x d)
80 target: (b x t)
81 Returns:
82 2 lists: output for each cutoff section and new targets by cut off
83 """
84
85 input = input.contiguous().view(-1, input.size(-1))
86 input = F.dropout(input, p=self.dropout, training=self.training)
87
88 new_target, target_idxs = self.adapt_target(target)
89 output = [self.head(input)]
90
91 for i in range(len(target_idxs)):
92 if target_idxs[i] is not None:
93 output.append(self.tail[i](input.index_select(0, target_idxs[i])))
94 else:
95 output.append(None)
96
97 return output, new_target
98
99 def get_log_prob(self, input, target):
100 """
101 Computes the log probabilities for all the words of the vocabulary,
102 given a 2D tensor of hidden vectors.
103 """
104
105 bsz, length, dim = input.size()
106 input = input.contiguous().view(-1, dim)
107
108 if target is not None:
109 _, target_idxs = self.adapt_target(target)
110 else:
111 target_idxs = None
112
113 head_y = self.head(input)
114 log_probs = head_y.new_zeros(input.size(0), self.vocab_size)
115
116 head_sz = self.cutoff[0] + len(self.tail)
117 log_probs[:, :head_sz] = self.lsm(head_y)
118 tail_priors = log_probs[:, self.cutoff[0] - 1: head_sz - 1].clone()
119
120 for i in range(len(self.tail)):
121 start = self.cutoff[i]
122 end = self.cutoff[i + 1]
123
124 if target_idxs is None:
125 tail_out = log_probs[:, start:end]
126 tail_out.copy_(self.tail[i](input))
127 log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])
128 elif target_idxs[i] is not None:
129 idxs = target_idxs[i]
130 tail_out = log_probs[idxs, start:end]
131 tail_out.copy_(self.tail[i](input[idxs]))
132 log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])
133
134 log_probs = log_probs.view(bsz, length, -1)
135 return log_probs
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fairseq/modules/adaptive_softmax.py b/fairseq/modules/adaptive_softmax.py
--- a/fairseq/modules/adaptive_softmax.py
+++ b/fairseq/modules/adaptive_softmax.py
@@ -22,6 +22,9 @@
if vocab_size > cutoff[-1]:
cutoff = cutoff + [vocab_size]
+ else:
+ assert vocab_size == cutoff[
+ -1], 'cannot specify cutoff smaller than vocab size'
output_dim = cutoff[0] + len(cutoff) - 1
| {"golden_diff": "diff --git a/fairseq/modules/adaptive_softmax.py b/fairseq/modules/adaptive_softmax.py\n--- a/fairseq/modules/adaptive_softmax.py\n+++ b/fairseq/modules/adaptive_softmax.py\n@@ -22,6 +22,9 @@\n \n if vocab_size > cutoff[-1]:\n cutoff = cutoff + [vocab_size]\n+ else:\n+ assert vocab_size == cutoff[\n+ -1], 'cannot specify cutoff smaller than vocab size'\n \n output_dim = cutoff[0] + len(cutoff) - 1\n", "issue": "Size Mismatch in AdaptiveSoftmax when targets are not specified\nFollowing up on #212 , I'm updating `sequence_generator.py` to generate text from a pre-trained language model (initially trained with adaptive softmax). When computing log probabilities, and the targets are set to none, I receive a size mismatch exception in the line below, possibly because the dictionary size is smaller than the adaptive softmax cut-off: \r\n\r\nhttps://github.com/pytorch/fairseq/blob/388c520be21752cacb9fe3b1712038f32e0e9a5f/fairseq/modules/adaptive_softmax.py#L126\r\n\r\nI imagine this could be solved by some sort of truncation to the output of tail[i].input\n", "before_files": [{"content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\n\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass AdaptiveSoftmax(nn.Module):\n \"\"\"\n This is an implementation of the efficient softmax approximation for\n graphical processing units (GPU), described in the paper \"Efficient softmax\n approximation for GPUs\" (http://arxiv.org/abs/1609.04309).\n \"\"\"\n\n def __init__(self, vocab_size, input_dim, cutoff, dropout):\n super().__init__()\n\n if vocab_size > cutoff[-1]:\n cutoff = cutoff + [vocab_size]\n\n output_dim = cutoff[0] + len(cutoff) - 1\n\n self.vocab_size = vocab_size\n self.cutoff = cutoff\n self.dropout = dropout\n\n self.lsm = nn.LogSoftmax(dim=1)\n self.head = nn.Linear(input_dim, output_dim, bias=False)\n self.tail = nn.ModuleList()\n\n for i in range(len(cutoff) - 1):\n self.tail.append(\n nn.Sequential(\n nn.Linear(input_dim, input_dim // 4 ** i, bias=False),\n nn.Dropout(dropout),\n nn.Linear(input_dim // 4 ** i, cutoff[i + 1] - cutoff[i], bias=False)\n )\n )\n\n def init_weights(m):\n if hasattr(m, 'weight'):\n nn.init.xavier_uniform_(m.weight)\n\n self.apply(init_weights)\n\n def adapt_target(self, target):\n \"\"\"\n In order to be efficient, the AdaptiveSoftMax does not compute the\n scores for all the word of the vocabulary for all the examples. It is\n thus necessary to call the method adapt_target of the AdaptiveSoftMax\n layer inside each forward pass.\n \"\"\"\n\n target = target.view(-1)\n new_target = [target.clone()]\n target_idxs = []\n\n for i in range(len(self.cutoff) - 1):\n mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))\n new_target[0][mask] = self.cutoff[0] + i - 1\n\n if mask.any():\n target_idxs.append(mask.nonzero().squeeze(1))\n new_target.append(target[mask].add(-self.cutoff[i]))\n else:\n target_idxs.append(None)\n new_target.append(None)\n\n return new_target, target_idxs\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: (b x t x d)\n target: (b x t)\n Returns:\n 2 lists: output for each cutoff section and new targets by cut off\n \"\"\"\n\n input = input.contiguous().view(-1, input.size(-1))\n input = F.dropout(input, p=self.dropout, training=self.training)\n\n new_target, target_idxs = self.adapt_target(target)\n output = [self.head(input)]\n\n for i in range(len(target_idxs)):\n if target_idxs[i] is not None:\n output.append(self.tail[i](input.index_select(0, target_idxs[i])))\n else:\n output.append(None)\n\n return output, new_target\n\n def get_log_prob(self, input, target):\n \"\"\"\n Computes the log probabilities for all the words of the vocabulary,\n given a 2D tensor of hidden vectors.\n \"\"\"\n\n bsz, length, dim = input.size()\n input = input.contiguous().view(-1, dim)\n\n if target is not None:\n _, target_idxs = self.adapt_target(target)\n else:\n target_idxs = None\n\n head_y = self.head(input)\n log_probs = head_y.new_zeros(input.size(0), self.vocab_size)\n\n head_sz = self.cutoff[0] + len(self.tail)\n log_probs[:, :head_sz] = self.lsm(head_y)\n tail_priors = log_probs[:, self.cutoff[0] - 1: head_sz - 1].clone()\n\n for i in range(len(self.tail)):\n start = self.cutoff[i]\n end = self.cutoff[i + 1]\n\n if target_idxs is None:\n tail_out = log_probs[:, start:end]\n tail_out.copy_(self.tail[i](input))\n log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])\n elif target_idxs[i] is not None:\n idxs = target_idxs[i]\n tail_out = log_probs[idxs, start:end]\n tail_out.copy_(self.tail[i](input[idxs]))\n log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])\n\n log_probs = log_probs.view(bsz, length, -1)\n return log_probs\n", "path": "fairseq/modules/adaptive_softmax.py"}], "after_files": [{"content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\n\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass AdaptiveSoftmax(nn.Module):\n \"\"\"\n This is an implementation of the efficient softmax approximation for\n graphical processing units (GPU), described in the paper \"Efficient softmax\n approximation for GPUs\" (http://arxiv.org/abs/1609.04309).\n \"\"\"\n\n def __init__(self, vocab_size, input_dim, cutoff, dropout):\n super().__init__()\n\n if vocab_size > cutoff[-1]:\n cutoff = cutoff + [vocab_size]\n else:\n assert vocab_size == cutoff[\n -1], 'cannot specify cutoff smaller than vocab size'\n\n output_dim = cutoff[0] + len(cutoff) - 1\n\n self.vocab_size = vocab_size\n self.cutoff = cutoff\n self.dropout = dropout\n\n self.lsm = nn.LogSoftmax(dim=1)\n self.head = nn.Linear(input_dim, output_dim, bias=False)\n self.tail = nn.ModuleList()\n\n for i in range(len(cutoff) - 1):\n self.tail.append(\n nn.Sequential(\n nn.Linear(input_dim, input_dim // 4 ** i, bias=False),\n nn.Dropout(dropout),\n nn.Linear(input_dim // 4 ** i, cutoff[i + 1] - cutoff[i], bias=False)\n )\n )\n\n def init_weights(m):\n if hasattr(m, 'weight'):\n nn.init.xavier_uniform_(m.weight)\n\n self.apply(init_weights)\n\n def adapt_target(self, target):\n \"\"\"\n In order to be efficient, the AdaptiveSoftMax does not compute the\n scores for all the word of the vocabulary for all the examples. It is\n thus necessary to call the method adapt_target of the AdaptiveSoftMax\n layer inside each forward pass.\n \"\"\"\n\n target = target.view(-1)\n new_target = [target.clone()]\n target_idxs = []\n\n for i in range(len(self.cutoff) - 1):\n mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))\n new_target[0][mask] = self.cutoff[0] + i - 1\n\n if mask.any():\n target_idxs.append(mask.nonzero().squeeze(1))\n new_target.append(target[mask].add(-self.cutoff[i]))\n else:\n target_idxs.append(None)\n new_target.append(None)\n\n return new_target, target_idxs\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: (b x t x d)\n target: (b x t)\n Returns:\n 2 lists: output for each cutoff section and new targets by cut off\n \"\"\"\n\n input = input.contiguous().view(-1, input.size(-1))\n input = F.dropout(input, p=self.dropout, training=self.training)\n\n new_target, target_idxs = self.adapt_target(target)\n output = [self.head(input)]\n\n for i in range(len(target_idxs)):\n if target_idxs[i] is not None:\n output.append(self.tail[i](input.index_select(0, target_idxs[i])))\n else:\n output.append(None)\n\n return output, new_target\n\n def get_log_prob(self, input, target):\n \"\"\"\n Computes the log probabilities for all the words of the vocabulary,\n given a 2D tensor of hidden vectors.\n \"\"\"\n\n bsz, length, dim = input.size()\n input = input.contiguous().view(-1, dim)\n\n if target is not None:\n _, target_idxs = self.adapt_target(target)\n else:\n target_idxs = None\n\n head_y = self.head(input)\n log_probs = head_y.new_zeros(input.size(0), self.vocab_size)\n\n head_sz = self.cutoff[0] + len(self.tail)\n log_probs[:, :head_sz] = self.lsm(head_y)\n tail_priors = log_probs[:, self.cutoff[0] - 1: head_sz - 1].clone()\n\n for i in range(len(self.tail)):\n start = self.cutoff[i]\n end = self.cutoff[i + 1]\n\n if target_idxs is None:\n tail_out = log_probs[:, start:end]\n tail_out.copy_(self.tail[i](input))\n log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])\n elif target_idxs[i] is not None:\n idxs = target_idxs[i]\n tail_out = log_probs[idxs, start:end]\n tail_out.copy_(self.tail[i](input[idxs]))\n log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])\n\n log_probs = log_probs.view(bsz, length, -1)\n return log_probs\n", "path": "fairseq/modules/adaptive_softmax.py"}]} | 1,818 | 119 |
gh_patches_debug_34863 | rasdani/github-patches | git_diff | microsoft__lisa-836 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ubuntu 20.04 - platform.dist() is deprecated since Python 3.5 and removed in Python 3.8
Affected distro - ubuntu 20.04 (use python 3.8)
Affected case - WALA-VERIFY-VERBOSE-ENABLED-LOGS
Use distro.linux_distribution(full_distribution_name=False) instead
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py`
Content:
```
1 #!/usr/bin/env python
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the Apache License.
4 from azuremodules import *
5
6 import argparse
7 import os
8 import platform
9 import time
10
11 parser = argparse.ArgumentParser()
12
13 file_path = os.path.dirname(os.path.realpath(__file__))
14 constants_path = os.path.join(file_path, "constants.sh")
15 params = GetParams(constants_path)
16 passwd = params["PASSWORD"]
17
18 distro = platform.dist()
19
20
21 def RunTest():
22 UpdateState("TestRunning")
23 if(distro[0] == "CoreOS"):
24 versionOutPut = Run("waagent --version")
25 else:
26 output = Run("pgrep -fa python3.*waagent")
27 if ("python3" in output) :
28 versionOutPut = Run("/usr/bin/python3 /usr/sbin/waagent --version")
29 else :
30 versionOutPut = Run("/usr/sbin/waagent --version")
31
32 RunLog.info("Checking log waagent.log...")
33 if("2.0." in versionOutPut):
34 output = Run("grep -i 'iptables -I INPUT -p udp --dport' /var/log/waagent* | wc -l | tr -d '\n'")
35 RunLog.info("agent version is 2.0")
36 else:
37 output = Run("grep -i 'VERBOSE' /var/log/waagent* | wc -l | tr -d '\n'")
38 RunLog.info("agent version > 2.0")
39
40 if not (output == "0") :
41 RunLog.info('The log file contains the verbose logs')
42 ResultLog.info('PASS')
43 UpdateState("TestCompleted")
44 else :
45 RunLog.error('Verify waagent.log fail, the log file does not contain the verbose logs')
46 ResultLog.error('FAIL')
47 UpdateState("TestCompleted")
48
49
50 def Restartwaagent():
51 if (distro[0] == "CoreOS"):
52 Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf")
53 elif (DetectDistro()[0] == 'clear-linux-os'):
54 Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \
55 /usr/share/defaults/waagent/waagent.conf")
56 else:
57 Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf")
58 RunLog.info("Restart waagent service...")
59 result = Run("echo '"+passwd+"' | sudo -S find / -name systemctl |wc -l | tr -d '\n'")
60 if (distro[0] == "Ubuntu") or (distro[0] == "debian"):
61 Run("echo '"+passwd+"' | sudo -S service walinuxagent restart")
62 else:
63 if (result == "0") :
64 os.system("echo '"+passwd+"' | sudo -S service waagent restart")
65 else:
66 os.system("echo '"+passwd+"' | sudo -S systemctl restart waagent")
67 time.sleep(60)
68
69 Restartwaagent()
70 RunTest()
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py b/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py
--- a/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py
+++ b/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py
@@ -7,6 +7,7 @@
import os
import platform
import time
+import sys
parser = argparse.ArgumentParser()
@@ -14,13 +15,16 @@
constants_path = os.path.join(file_path, "constants.sh")
params = GetParams(constants_path)
passwd = params["PASSWORD"]
-
-distro = platform.dist()
+if sys.version_info[0] >= 3:
+ import distro
+ distro = distro.linux_distribution(full_distribution_name=False)
+else:
+ distro = platform.dist()
def RunTest():
UpdateState("TestRunning")
- if(distro[0] == "CoreOS"):
+ if(distro[0].upper() == "COREOS"):
versionOutPut = Run("waagent --version")
else:
output = Run("pgrep -fa python3.*waagent")
@@ -48,7 +52,7 @@
def Restartwaagent():
- if (distro[0] == "CoreOS"):
+ if (distro[0].upper() == "COREOS"):
Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf")
elif (DetectDistro()[0] == 'clear-linux-os'):
Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \
@@ -57,7 +61,7 @@
Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf")
RunLog.info("Restart waagent service...")
result = Run("echo '"+passwd+"' | sudo -S find / -name systemctl |wc -l | tr -d '\n'")
- if (distro[0] == "Ubuntu") or (distro[0] == "debian"):
+ if (distro[0].upper() == "UBUNTU") or (distro[0].upper() == "DEBIAN"):
Run("echo '"+passwd+"' | sudo -S service walinuxagent restart")
else:
if (result == "0") :
| {"golden_diff": "diff --git a/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py b/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py\n--- a/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py\n+++ b/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py\n@@ -7,6 +7,7 @@\n import os\n import platform\n import time\n+import sys\n \n parser = argparse.ArgumentParser()\n \n@@ -14,13 +15,16 @@\n constants_path = os.path.join(file_path, \"constants.sh\")\n params = GetParams(constants_path)\n passwd = params[\"PASSWORD\"]\n-\n-distro = platform.dist()\n+if sys.version_info[0] >= 3:\n+ import distro\n+ distro = distro.linux_distribution(full_distribution_name=False)\n+else:\n+ distro = platform.dist()\n \n \n def RunTest():\n UpdateState(\"TestRunning\")\n- if(distro[0] == \"CoreOS\"):\n+ if(distro[0].upper() == \"COREOS\"):\n versionOutPut = Run(\"waagent --version\")\n else:\n output = Run(\"pgrep -fa python3.*waagent\")\n@@ -48,7 +52,7 @@\n \n \n def Restartwaagent():\n- if (distro[0] == \"CoreOS\"):\n+ if (distro[0].upper() == \"COREOS\"):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf\")\n elif (DetectDistro()[0] == 'clear-linux-os'):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \\\n@@ -57,7 +61,7 @@\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf\")\n RunLog.info(\"Restart waagent service...\")\n result = Run(\"echo '\"+passwd+\"' | sudo -S find / -name systemctl |wc -l | tr -d '\\n'\")\n- if (distro[0] == \"Ubuntu\") or (distro[0] == \"debian\"):\n+ if (distro[0].upper() == \"UBUNTU\") or (distro[0].upper() == \"DEBIAN\"):\n Run(\"echo '\"+passwd+\"' | sudo -S service walinuxagent restart\")\n else:\n if (result == \"0\") :\n", "issue": "Ubuntu 20.04 - platform.dist() is deprecated since Python 3.5 and removed in Python 3.8\nAffected distro - ubuntu 20.04 (use python 3.8)\r\nAffected case - WALA-VERIFY-VERBOSE-ENABLED-LOGS\r\nUse distro.linux_distribution(full_distribution_name=False) instead\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the Apache License.\nfrom azuremodules import *\n\nimport argparse\nimport os\nimport platform\nimport time\n\nparser = argparse.ArgumentParser()\n\nfile_path = os.path.dirname(os.path.realpath(__file__))\nconstants_path = os.path.join(file_path, \"constants.sh\")\nparams = GetParams(constants_path)\npasswd = params[\"PASSWORD\"]\n\ndistro = platform.dist()\n\n\ndef RunTest():\n UpdateState(\"TestRunning\")\n if(distro[0] == \"CoreOS\"):\n versionOutPut = Run(\"waagent --version\")\n else:\n output = Run(\"pgrep -fa python3.*waagent\")\n if (\"python3\" in output) :\n versionOutPut = Run(\"/usr/bin/python3 /usr/sbin/waagent --version\")\n else :\n versionOutPut = Run(\"/usr/sbin/waagent --version\")\n\n RunLog.info(\"Checking log waagent.log...\")\n if(\"2.0.\" in versionOutPut):\n output = Run(\"grep -i 'iptables -I INPUT -p udp --dport' /var/log/waagent* | wc -l | tr -d '\\n'\")\n RunLog.info(\"agent version is 2.0\")\n else:\n output = Run(\"grep -i 'VERBOSE' /var/log/waagent* | wc -l | tr -d '\\n'\")\n RunLog.info(\"agent version > 2.0\")\n\n if not (output == \"0\") :\n RunLog.info('The log file contains the verbose logs')\n ResultLog.info('PASS')\n UpdateState(\"TestCompleted\")\n else :\n RunLog.error('Verify waagent.log fail, the log file does not contain the verbose logs')\n ResultLog.error('FAIL')\n UpdateState(\"TestCompleted\")\n\n\ndef Restartwaagent():\n if (distro[0] == \"CoreOS\"):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf\")\n elif (DetectDistro()[0] == 'clear-linux-os'):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \\\n /usr/share/defaults/waagent/waagent.conf\")\n else:\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf\")\n RunLog.info(\"Restart waagent service...\")\n result = Run(\"echo '\"+passwd+\"' | sudo -S find / -name systemctl |wc -l | tr -d '\\n'\")\n if (distro[0] == \"Ubuntu\") or (distro[0] == \"debian\"):\n Run(\"echo '\"+passwd+\"' | sudo -S service walinuxagent restart\")\n else:\n if (result == \"0\") :\n os.system(\"echo '\"+passwd+\"' | sudo -S service waagent restart\")\n else:\n os.system(\"echo '\"+passwd+\"' | sudo -S systemctl restart waagent\")\n time.sleep(60)\n\nRestartwaagent()\nRunTest()\n", "path": "Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the Apache License.\nfrom azuremodules import *\n\nimport argparse\nimport os\nimport platform\nimport time\nimport sys\n\nparser = argparse.ArgumentParser()\n\nfile_path = os.path.dirname(os.path.realpath(__file__))\nconstants_path = os.path.join(file_path, \"constants.sh\")\nparams = GetParams(constants_path)\npasswd = params[\"PASSWORD\"]\nif sys.version_info[0] >= 3:\n import distro\n distro = distro.linux_distribution(full_distribution_name=False)\nelse:\n distro = platform.dist()\n\n\ndef RunTest():\n UpdateState(\"TestRunning\")\n if(distro[0].upper() == \"COREOS\"):\n versionOutPut = Run(\"waagent --version\")\n else:\n output = Run(\"pgrep -fa python3.*waagent\")\n if (\"python3\" in output) :\n versionOutPut = Run(\"/usr/bin/python3 /usr/sbin/waagent --version\")\n else :\n versionOutPut = Run(\"/usr/sbin/waagent --version\")\n\n RunLog.info(\"Checking log waagent.log...\")\n if(\"2.0.\" in versionOutPut):\n output = Run(\"grep -i 'iptables -I INPUT -p udp --dport' /var/log/waagent* | wc -l | tr -d '\\n'\")\n RunLog.info(\"agent version is 2.0\")\n else:\n output = Run(\"grep -i 'VERBOSE' /var/log/waagent* | wc -l | tr -d '\\n'\")\n RunLog.info(\"agent version > 2.0\")\n\n if not (output == \"0\") :\n RunLog.info('The log file contains the verbose logs')\n ResultLog.info('PASS')\n UpdateState(\"TestCompleted\")\n else :\n RunLog.error('Verify waagent.log fail, the log file does not contain the verbose logs')\n ResultLog.error('FAIL')\n UpdateState(\"TestCompleted\")\n\n\ndef Restartwaagent():\n if (distro[0].upper() == \"COREOS\"):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf\")\n elif (DetectDistro()[0] == 'clear-linux-os'):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \\\n /usr/share/defaults/waagent/waagent.conf\")\n else:\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf\")\n RunLog.info(\"Restart waagent service...\")\n result = Run(\"echo '\"+passwd+\"' | sudo -S find / -name systemctl |wc -l | tr -d '\\n'\")\n if (distro[0].upper() == \"UBUNTU\") or (distro[0].upper() == \"DEBIAN\"):\n Run(\"echo '\"+passwd+\"' | sudo -S service walinuxagent restart\")\n else:\n if (result == \"0\") :\n os.system(\"echo '\"+passwd+\"' | sudo -S service waagent restart\")\n else:\n os.system(\"echo '\"+passwd+\"' | sudo -S systemctl restart waagent\")\n time.sleep(60)\n\nRestartwaagent()\nRunTest()\n", "path": "Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py"}]} | 1,169 | 564 |
gh_patches_debug_9987 | rasdani/github-patches | git_diff | cltk__cltk-906 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix broken NER doctest
Open to anyone.
This simple NER doctest: https://github.com/cltk/cltk/blob/9b9cdb42dcc1c707ab3db3ef8214837bb7c262b5/cltk/tag/ner.py#L78
is all of a sudden failing (example: https://travis-ci.org/cltk/cltk/jobs/525125856#L1935 ).
The test expects 4 padded spaces on the left.
I have no idea why this would break all of a sudden.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cltk/tag/ner.py`
Content:
```
1 """Named entity recognition (NER)."""
2
3 from cltk.corpus.utils.importer import CorpusImporter
4 from nltk.tokenize.punkt import PunktLanguageVars
5 from cltk.tokenize.word import WordTokenizer
6 import os
7 import importlib.machinery
8
9 __author__ = ['Natasha Voake <[email protected]>']
10 __license__ = 'MIT License. See LICENSE.'
11
12 NER_DICT = {'greek': '~/cltk_data/greek/model/greek_models_cltk/ner/proper_names.txt',
13 'latin': '~/cltk_data/latin/model/latin_models_cltk/ner/proper_names.txt'}
14
15
16 class NamedEntityReplacer(object):
17
18 def __init__(self):
19
20 self.entities = self._load_necessary_data()
21
22
23 def _load_necessary_data(self):
24 rel_path = os.path.join('~', 'cltk_data',
25 'french',
26 'text', 'french_data_cltk',
27 'named_entities_fr.py')
28 path = os.path.expanduser(rel_path)
29 # logger.info('Loading entries. This may take a minute.')
30 loader = importlib.machinery.SourceFileLoader('entities', path)
31 module = loader.load_module()
32 entities = module.entities
33 return entities
34
35 """tags named entities in a string and outputs a list of tuples in the following format:
36 (name, "entity", kind_of_entity)"""
37
38 def tag_ner_fr(self, input_text, output_type=list):
39
40 entities = self.entities
41
42 for entity in entities:
43 (name, kind) = entity
44
45 word_tokenizer = WordTokenizer('french')
46 tokenized_text = word_tokenizer.tokenize(input_text)
47 ner_tuple_list = []
48
49 match = False
50 for word in tokenized_text:
51 for name, kind in entities:
52 if word == name:
53 named_things = ([(name, 'entity', kind)])
54 ner_tuple_list.append(named_things)
55 match = True
56 break
57 else:
58 ner_tuple_list.append((word,))
59 return ner_tuple_list
60
61
62 def _check_latest_data(lang):
63 """Check for presence of proper names dir, clone if not."""
64
65 assert lang in NER_DICT.keys(), \
66 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))
67
68 ner_file_path = os.path.expanduser(NER_DICT[lang])
69
70 if not os.path.isfile(ner_file_path):
71 corpus_importer = CorpusImporter(lang)
72 corpus_importer.import_corpus('{}_models_cltk'.format(lang))
73
74
75 def tag_ner(lang, input_text, output_type=list):
76 """Run NER for chosen language.
77 Choosing output_type=list, returns a list of tuples:
78
79 >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list)
80 [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')]
81 """
82
83 _check_latest_data(lang)
84
85 assert lang in NER_DICT.keys(), \
86 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))
87 types = [str, list]
88 assert type(input_text) in types, 'Input must be: {}.'.format(', '.join(types))
89 assert output_type in types, 'Output must be a {}.'.format(', '.join(types))
90
91 if type(input_text) == str:
92 punkt = PunktLanguageVars()
93 tokens = punkt.word_tokenize(input_text)
94 new_tokens = []
95 for word in tokens:
96 if word.endswith('.'):
97 new_tokens.append(word[:-1])
98 new_tokens.append('.')
99 else:
100 new_tokens.append(word)
101 input_text = new_tokens
102
103 ner_file_path = os.path.expanduser(NER_DICT[lang])
104 with open(ner_file_path) as file_open:
105 ner_str = file_open.read()
106 ner_list = ner_str.split('\n')
107
108 ner_tuple_list = []
109 for count, word_token in enumerate(input_text):
110 match = False
111 for ner_word in ner_list:
112 # the replacer slows things down, but is necessary
113 if word_token == ner_word:
114 ner_tuple = (word_token, 'Entity')
115 ner_tuple_list.append(ner_tuple)
116 match = True
117 break
118 if not match:
119 ner_tuple_list.append((word_token,))
120
121 if output_type is str:
122 string = ''
123 for tup in ner_tuple_list:
124 start_space = ' '
125 final_space = ''
126 # this is some mediocre string reconstitution
127 # maybe not worth the effort
128 if tup[0] in [',', '.', ';', ':', '?', '!']:
129 start_space = ''
130 if len(tup) == 2:
131 string += start_space + tup[0] + '/' + tup[1] + final_space
132 else:
133 string += start_space + tup[0] + final_space
134 return string
135
136 return ner_tuple_list
137
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cltk/tag/ner.py b/cltk/tag/ner.py
--- a/cltk/tag/ner.py
+++ b/cltk/tag/ner.py
@@ -74,10 +74,6 @@
def tag_ner(lang, input_text, output_type=list):
"""Run NER for chosen language.
- Choosing output_type=list, returns a list of tuples:
-
- >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list)
- [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')]
"""
_check_latest_data(lang)
@@ -134,4 +130,3 @@
return string
return ner_tuple_list
-
| {"golden_diff": "diff --git a/cltk/tag/ner.py b/cltk/tag/ner.py\n--- a/cltk/tag/ner.py\n+++ b/cltk/tag/ner.py\n@@ -74,10 +74,6 @@\n \n def tag_ner(lang, input_text, output_type=list):\n \"\"\"Run NER for chosen language.\n- Choosing output_type=list, returns a list of tuples:\n- \n- >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list)\n- [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')]\n \"\"\"\n \n _check_latest_data(lang)\n@@ -134,4 +130,3 @@\n return string\n \n return ner_tuple_list\n-\n", "issue": "Fix broken NER doctest\nOpen to anyone.\r\n\r\nThis simple NER doctest: https://github.com/cltk/cltk/blob/9b9cdb42dcc1c707ab3db3ef8214837bb7c262b5/cltk/tag/ner.py#L78\r\n\r\nis all of a sudden failing (example: https://travis-ci.org/cltk/cltk/jobs/525125856#L1935 ).\r\n\r\nThe test expects 4 padded spaces on the left.\r\n\r\nI have no idea why this would break all of a sudden.\r\n\r\n\n", "before_files": [{"content": "\"\"\"Named entity recognition (NER).\"\"\"\n\nfrom cltk.corpus.utils.importer import CorpusImporter\nfrom nltk.tokenize.punkt import PunktLanguageVars\nfrom cltk.tokenize.word import WordTokenizer\nimport os\nimport importlib.machinery\n\n__author__ = ['Natasha Voake <[email protected]>']\n__license__ = 'MIT License. See LICENSE.'\n\nNER_DICT = {'greek': '~/cltk_data/greek/model/greek_models_cltk/ner/proper_names.txt',\n 'latin': '~/cltk_data/latin/model/latin_models_cltk/ner/proper_names.txt'}\n\n\nclass NamedEntityReplacer(object):\n\n def __init__(self):\n\n self.entities = self._load_necessary_data()\n\n\n def _load_necessary_data(self):\n rel_path = os.path.join('~', 'cltk_data',\n 'french',\n 'text', 'french_data_cltk',\n 'named_entities_fr.py')\n path = os.path.expanduser(rel_path)\n # logger.info('Loading entries. This may take a minute.')\n loader = importlib.machinery.SourceFileLoader('entities', path)\n module = loader.load_module()\n entities = module.entities\n return entities\n\n \"\"\"tags named entities in a string and outputs a list of tuples in the following format:\n (name, \"entity\", kind_of_entity)\"\"\"\n\n def tag_ner_fr(self, input_text, output_type=list):\n\n entities = self.entities\n\n for entity in entities:\n (name, kind) = entity\n\n word_tokenizer = WordTokenizer('french')\n tokenized_text = word_tokenizer.tokenize(input_text)\n ner_tuple_list = []\n\n match = False\n for word in tokenized_text:\n for name, kind in entities:\n if word == name:\n named_things = ([(name, 'entity', kind)])\n ner_tuple_list.append(named_things)\n match = True\n break\n else:\n ner_tuple_list.append((word,))\n return ner_tuple_list\n\n\ndef _check_latest_data(lang):\n \"\"\"Check for presence of proper names dir, clone if not.\"\"\"\n\n assert lang in NER_DICT.keys(), \\\n 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))\n\n ner_file_path = os.path.expanduser(NER_DICT[lang])\n\n if not os.path.isfile(ner_file_path):\n corpus_importer = CorpusImporter(lang)\n corpus_importer.import_corpus('{}_models_cltk'.format(lang))\n\n\ndef tag_ner(lang, input_text, output_type=list):\n \"\"\"Run NER for chosen language.\n Choosing output_type=list, returns a list of tuples:\n \n >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list)\n [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')]\n \"\"\"\n\n _check_latest_data(lang)\n\n assert lang in NER_DICT.keys(), \\\n 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))\n types = [str, list]\n assert type(input_text) in types, 'Input must be: {}.'.format(', '.join(types))\n assert output_type in types, 'Output must be a {}.'.format(', '.join(types))\n\n if type(input_text) == str:\n punkt = PunktLanguageVars()\n tokens = punkt.word_tokenize(input_text)\n new_tokens = []\n for word in tokens:\n if word.endswith('.'):\n new_tokens.append(word[:-1])\n new_tokens.append('.')\n else:\n new_tokens.append(word)\n input_text = new_tokens\n\n ner_file_path = os.path.expanduser(NER_DICT[lang])\n with open(ner_file_path) as file_open:\n ner_str = file_open.read()\n ner_list = ner_str.split('\\n')\n\n ner_tuple_list = []\n for count, word_token in enumerate(input_text):\n match = False\n for ner_word in ner_list:\n # the replacer slows things down, but is necessary\n if word_token == ner_word:\n ner_tuple = (word_token, 'Entity')\n ner_tuple_list.append(ner_tuple)\n match = True\n break\n if not match:\n ner_tuple_list.append((word_token,))\n\n if output_type is str:\n string = ''\n for tup in ner_tuple_list:\n start_space = ' '\n final_space = ''\n # this is some mediocre string reconstitution\n # maybe not worth the effort\n if tup[0] in [',', '.', ';', ':', '?', '!']:\n start_space = ''\n if len(tup) == 2:\n string += start_space + tup[0] + '/' + tup[1] + final_space\n else:\n string += start_space + tup[0] + final_space\n return string\n\n return ner_tuple_list\n\n", "path": "cltk/tag/ner.py"}], "after_files": [{"content": "\"\"\"Named entity recognition (NER).\"\"\"\n\nfrom cltk.corpus.utils.importer import CorpusImporter\nfrom nltk.tokenize.punkt import PunktLanguageVars\nfrom cltk.tokenize.word import WordTokenizer\nimport os\nimport importlib.machinery\n\n__author__ = ['Natasha Voake <[email protected]>']\n__license__ = 'MIT License. See LICENSE.'\n\nNER_DICT = {'greek': '~/cltk_data/greek/model/greek_models_cltk/ner/proper_names.txt',\n 'latin': '~/cltk_data/latin/model/latin_models_cltk/ner/proper_names.txt'}\n\n\nclass NamedEntityReplacer(object):\n\n def __init__(self):\n\n self.entities = self._load_necessary_data()\n\n\n def _load_necessary_data(self):\n rel_path = os.path.join('~', 'cltk_data',\n 'french',\n 'text', 'french_data_cltk',\n 'named_entities_fr.py')\n path = os.path.expanduser(rel_path)\n # logger.info('Loading entries. This may take a minute.')\n loader = importlib.machinery.SourceFileLoader('entities', path)\n module = loader.load_module()\n entities = module.entities\n return entities\n\n \"\"\"tags named entities in a string and outputs a list of tuples in the following format:\n (name, \"entity\", kind_of_entity)\"\"\"\n\n def tag_ner_fr(self, input_text, output_type=list):\n\n entities = self.entities\n\n for entity in entities:\n (name, kind) = entity\n\n word_tokenizer = WordTokenizer('french')\n tokenized_text = word_tokenizer.tokenize(input_text)\n ner_tuple_list = []\n\n match = False\n for word in tokenized_text:\n for name, kind in entities:\n if word == name:\n named_things = ([(name, 'entity', kind)])\n ner_tuple_list.append(named_things)\n match = True\n break\n else:\n ner_tuple_list.append((word,))\n return ner_tuple_list\n\n\ndef _check_latest_data(lang):\n \"\"\"Check for presence of proper names dir, clone if not.\"\"\"\n\n assert lang in NER_DICT.keys(), \\\n 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))\n\n ner_file_path = os.path.expanduser(NER_DICT[lang])\n\n if not os.path.isfile(ner_file_path):\n corpus_importer = CorpusImporter(lang)\n corpus_importer.import_corpus('{}_models_cltk'.format(lang))\n\n\ndef tag_ner(lang, input_text, output_type=list):\n \"\"\"Run NER for chosen language.\n \"\"\"\n\n _check_latest_data(lang)\n\n assert lang in NER_DICT.keys(), \\\n 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))\n types = [str, list]\n assert type(input_text) in types, 'Input must be: {}.'.format(', '.join(types))\n assert output_type in types, 'Output must be a {}.'.format(', '.join(types))\n\n if type(input_text) == str:\n punkt = PunktLanguageVars()\n tokens = punkt.word_tokenize(input_text)\n new_tokens = []\n for word in tokens:\n if word.endswith('.'):\n new_tokens.append(word[:-1])\n new_tokens.append('.')\n else:\n new_tokens.append(word)\n input_text = new_tokens\n\n ner_file_path = os.path.expanduser(NER_DICT[lang])\n with open(ner_file_path) as file_open:\n ner_str = file_open.read()\n ner_list = ner_str.split('\\n')\n\n ner_tuple_list = []\n for count, word_token in enumerate(input_text):\n match = False\n for ner_word in ner_list:\n # the replacer slows things down, but is necessary\n if word_token == ner_word:\n ner_tuple = (word_token, 'Entity')\n ner_tuple_list.append(ner_tuple)\n match = True\n break\n if not match:\n ner_tuple_list.append((word_token,))\n\n if output_type is str:\n string = ''\n for tup in ner_tuple_list:\n start_space = ' '\n final_space = ''\n # this is some mediocre string reconstitution\n # maybe not worth the effort\n if tup[0] in [',', '.', ';', ':', '?', '!']:\n start_space = ''\n if len(tup) == 2:\n string += start_space + tup[0] + '/' + tup[1] + final_space\n else:\n string += start_space + tup[0] + final_space\n return string\n\n return ner_tuple_list\n", "path": "cltk/tag/ner.py"}]} | 1,786 | 189 |
gh_patches_debug_10497 | rasdani/github-patches | git_diff | lhotse-speech__lhotse-138 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: broken import from augmentations
Hi I installed the latest pip version of lhotse and I am getting an import error when using the lhotse CLI:
Setup:
```
python3.7.0
lhotse==0.2.0
```
To reproduce, try either from the following lines:
```
lhotse convert-kaldi <data-dir> 16000 <other-data-dir>
python -c "from lhotse.augmentation import available_wav_augmentations"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lhotse/augmentation/__init__.py`
Content:
```
1 from .common import AugmentFn
2 from .torchaudio import *
3 from .wavaugment import WavAugmenter, is_wav_augment_available
4
```
Path: `setup.py`
Content:
```
1 # coding=utf-8
2 import os
3 from pathlib import Path
4
5 from setuptools import find_packages, setup
6
7 project_root = Path(__file__).parent
8
9 install_requires = (project_root / 'requirements.txt').read_text().splitlines()
10 docs_require = (project_root / 'docs' / 'requirements.txt').read_text().splitlines()
11 tests_require = ['pytest==5.4.3', 'flake8==3.8.3', 'coverage==5.1', 'hypothesis==5.41.2']
12 dev_requires = docs_require + tests_require + ['jupyterlab', 'matplotlib', 'isort']
13
14 if os.environ.get('READTHEDOCS', False):
15 # When building documentation, omit torchaudio installation and mock it instead.
16 # This works around the inability to install libsoundfile1 in read-the-docs env,
17 # which caused the documentation builds to silently crash.
18 install_requires = [req for req in install_requires if not req.startswith('torchaudio')]
19
20 setup(
21 name='lhotse',
22 version='0.2.0',
23 python_requires='>=3.7.0',
24 description='Data preparation for speech processing models training.',
25 author='The Lhotse Development Team',
26 author_email="[email protected]",
27 long_description=(project_root / 'README.md').read_text(),
28 long_description_content_type="text/markdown",
29 license='Apache-2.0 License',
30 packages=find_packages(),
31 # The line below makes every script in the list an executable that's inserted in PATH
32 # as long as the virtualenv/conda env is active; they can be used like any other shell program
33 scripts=['lhotse/bin/lhotse'],
34 install_requires=install_requires,
35 extras_require={
36 'docs': docs_require,
37 'tests': tests_require,
38 'dev': docs_require + tests_require
39 },
40 classifiers=[
41 "Development Status :: 3 - Alpha",
42 "Programming Language :: Python :: 3.7",
43 "Programming Language :: Python :: 3.8",
44 "Intended Audience :: Science/Research",
45 "Operating System :: POSIX :: Linux",
46 "Operating System :: MacOS :: MacOS X",
47 "License :: OSI Approved :: Apache Software License",
48 "Topic :: Multimedia :: Sound/Audio :: Speech",
49 "Topic :: Scientific/Engineering :: Artificial Intelligence",
50 "Topic :: Software Development :: Libraries :: Python Modules",
51 "Typing :: Typed"
52 ],
53 )
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lhotse/augmentation/__init__.py b/lhotse/augmentation/__init__.py
--- a/lhotse/augmentation/__init__.py
+++ b/lhotse/augmentation/__init__.py
@@ -1,3 +1,3 @@
from .common import AugmentFn
from .torchaudio import *
-from .wavaugment import WavAugmenter, is_wav_augment_available
+from .wavaugment import WavAugmenter, is_wav_augment_available, available_wav_augmentations
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,7 @@
setup(
name='lhotse',
- version='0.2.0',
+ version='0.2.1',
python_requires='>=3.7.0',
description='Data preparation for speech processing models training.',
author='The Lhotse Development Team',
| {"golden_diff": "diff --git a/lhotse/augmentation/__init__.py b/lhotse/augmentation/__init__.py\n--- a/lhotse/augmentation/__init__.py\n+++ b/lhotse/augmentation/__init__.py\n@@ -1,3 +1,3 @@\n from .common import AugmentFn\n from .torchaudio import *\n-from .wavaugment import WavAugmenter, is_wav_augment_available\n+from .wavaugment import WavAugmenter, is_wav_augment_available, available_wav_augmentations\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,7 @@\n \n setup(\n name='lhotse',\n- version='0.2.0',\n+ version='0.2.1',\n python_requires='>=3.7.0',\n description='Data preparation for speech processing models training.',\n author='The Lhotse Development Team',\n", "issue": "Bug: broken import from augmentations\nHi I installed the latest pip version of lhotse and I am getting an import error when using the lhotse CLI:\r\n\r\nSetup:\r\n```\r\npython3.7.0 \r\nlhotse==0.2.0\r\n```\r\n\r\nTo reproduce, try either from the following lines:\r\n```\r\nlhotse convert-kaldi <data-dir> 16000 <other-data-dir>\r\npython -c \"from lhotse.augmentation import available_wav_augmentations\"\r\n```\n", "before_files": [{"content": "from .common import AugmentFn\nfrom .torchaudio import *\nfrom .wavaugment import WavAugmenter, is_wav_augment_available\n", "path": "lhotse/augmentation/__init__.py"}, {"content": "# coding=utf-8\nimport os\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nproject_root = Path(__file__).parent\n\ninstall_requires = (project_root / 'requirements.txt').read_text().splitlines()\ndocs_require = (project_root / 'docs' / 'requirements.txt').read_text().splitlines()\ntests_require = ['pytest==5.4.3', 'flake8==3.8.3', 'coverage==5.1', 'hypothesis==5.41.2']\ndev_requires = docs_require + tests_require + ['jupyterlab', 'matplotlib', 'isort']\n\nif os.environ.get('READTHEDOCS', False):\n # When building documentation, omit torchaudio installation and mock it instead.\n # This works around the inability to install libsoundfile1 in read-the-docs env,\n # which caused the documentation builds to silently crash.\n install_requires = [req for req in install_requires if not req.startswith('torchaudio')]\n\nsetup(\n name='lhotse',\n version='0.2.0',\n python_requires='>=3.7.0',\n description='Data preparation for speech processing models training.',\n author='The Lhotse Development Team',\n author_email=\"[email protected]\",\n long_description=(project_root / 'README.md').read_text(),\n long_description_content_type=\"text/markdown\",\n license='Apache-2.0 License',\n packages=find_packages(),\n # The line below makes every script in the list an executable that's inserted in PATH\n # as long as the virtualenv/conda env is active; they can be used like any other shell program\n scripts=['lhotse/bin/lhotse'],\n install_requires=install_requires,\n extras_require={\n 'docs': docs_require,\n 'tests': tests_require,\n 'dev': docs_require + tests_require\n },\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Multimedia :: Sound/Audio :: Speech\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Typing :: Typed\"\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from .common import AugmentFn\nfrom .torchaudio import *\nfrom .wavaugment import WavAugmenter, is_wav_augment_available, available_wav_augmentations\n", "path": "lhotse/augmentation/__init__.py"}, {"content": "# coding=utf-8\nimport os\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nproject_root = Path(__file__).parent\n\ninstall_requires = (project_root / 'requirements.txt').read_text().splitlines()\ndocs_require = (project_root / 'docs' / 'requirements.txt').read_text().splitlines()\ntests_require = ['pytest==5.4.3', 'flake8==3.8.3', 'coverage==5.1', 'hypothesis==5.41.2']\ndev_requires = docs_require + tests_require + ['jupyterlab', 'matplotlib', 'isort']\n\nif os.environ.get('READTHEDOCS', False):\n # When building documentation, omit torchaudio installation and mock it instead.\n # This works around the inability to install libsoundfile1 in read-the-docs env,\n # which caused the documentation builds to silently crash.\n install_requires = [req for req in install_requires if not req.startswith('torchaudio')]\n\nsetup(\n name='lhotse',\n version='0.2.1',\n python_requires='>=3.7.0',\n description='Data preparation for speech processing models training.',\n author='The Lhotse Development Team',\n author_email=\"[email protected]\",\n long_description=(project_root / 'README.md').read_text(),\n long_description_content_type=\"text/markdown\",\n license='Apache-2.0 License',\n packages=find_packages(),\n # The line below makes every script in the list an executable that's inserted in PATH\n # as long as the virtualenv/conda env is active; they can be used like any other shell program\n scripts=['lhotse/bin/lhotse'],\n install_requires=install_requires,\n extras_require={\n 'docs': docs_require,\n 'tests': tests_require,\n 'dev': docs_require + tests_require\n },\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Multimedia :: Sound/Audio :: Speech\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Typing :: Typed\"\n ],\n)\n", "path": "setup.py"}]} | 1,057 | 219 |
gh_patches_debug_15590 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-3688 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update package metadata in PyPi
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import find_packages
2 from setuptools import setup
3
4
5 version = '6.0.0rc2.dev0'
6
7
8 setup(
9 name='Products.CMFPlone',
10 version=version,
11 description="The Plone Content Management System (core)",
12 long_description=open("README.rst").read() + "\n" +
13 open("CHANGES.rst").read(),
14 classifiers=[
15 "Development Status :: 5 - Production/Stable",
16 "Environment :: Web Environment",
17 "Framework :: Plone",
18 "Framework :: Plone :: 6.0",
19 "Framework :: Plone :: Core",
20 "Framework :: Zope :: 5",
21 "License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
22 "Operating System :: OS Independent",
23 "Programming Language :: Python",
24 "Programming Language :: Python :: 3.8",
25 "Programming Language :: Python :: 3.9",
26 "Programming Language :: Python :: 3.10",
27 "Programming Language :: Python :: 3.11",
28 ],
29 python_requires='>=3.8',
30 keywords='Plone CMF Python Zope CMS Webapplication',
31 author='Plone Foundation',
32 author_email='[email protected]',
33 url='https://plone.org',
34 license='GPL version 2',
35 packages=find_packages(),
36 namespace_packages=['Products'],
37 include_package_data=True,
38 zip_safe=False,
39 install_requires=[
40 'borg.localrole',
41 'five.customerize',
42 'lxml',
43 'plone.api >= 1.4.4',
44 'plone.app.content',
45 'plone.app.contentlisting',
46 'plone.app.contentmenu >= 2.0.1',
47 'plone.app.contentrules',
48 'plone.app.contenttypes',
49 'plone.app.customerize',
50 'plone.app.dexterity',
51 'plone.app.discussion',
52 'plone.app.i18n',
53 'plone.app.layout >= 2.5.15',
54 'plone.app.linkintegrity >=1.0.3',
55 'plone.app.locales',
56 'plone.app.multilingual',
57 'plone.app.portlets',
58 'plone.app.redirector',
59 'plone.app.registry',
60 'plone.app.theming',
61 'plone.app.users',
62 'plone.app.uuid',
63 'plone.app.viewletmanager',
64 'plone.app.vocabularies',
65 'plone.app.workflow',
66 'plone.base',
67 'plone.browserlayer >= 2.1.5',
68 'plone.contentrules',
69 'plone.folder',
70 'plone.i18n >= 4.0.5',
71 'plone.indexer',
72 'plone.intelligenttext',
73 'plone.locking',
74 'plone.memoize',
75 'plone.outputfilters',
76 'plone.portlet.collection',
77 'plone.portlet.static',
78 'plone.portlets',
79 'plone.protect >= 3.0.0',
80 'plone.resource',
81 'plone.schema',
82 'plone.session',
83 'plone.staticresources',
84 'plone.theme',
85 'plonetheme.barceloneta',
86 'Products.CMFEditions',
87 'Products.DCWorkflow',
88 'Products.ExtendedPathIndex',
89 'Products.isurlinportal',
90 'Products.MimetypesRegistry',
91 'Products.PlonePAS',
92 'Products.PortalTransforms',
93 'Products.SiteErrorLog',
94 'Products.statusmessages',
95 'setuptools>=36.2',
96 'plone.autoinclude',
97 'webresource>=1.1',
98 'Zope[wsgi] >= 5.0',
99 'zope.app.locales >= 3.6.0',
100 'zope.cachedescriptors',
101 'zope.deferredimport',
102 'zope.deprecation',
103 'zope.dottedname',
104 'zope.i18n',
105 'zope.i18nmessageid',
106 'zope.structuredtext',
107 ],
108 extras_require={
109 'test': [
110 'lxml',
111 'mock',
112 'plone.app.robotframework>=1.0',
113 'robotframework-debuglibrary',
114 'plone.app.testing',
115 'zope.globalrequest',
116 'zope.testing',
117 'gunicorn',
118 ]
119 },
120 )
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -32,6 +32,19 @@
author_email='[email protected]',
url='https://plone.org',
license='GPL version 2',
+ project_urls={
+ "Homepage": "https://plone.org",
+ "Documentation": "https://docs.plone.org",
+ "PyPI": "https://pypi.python.org/pypi/Products.CMFPlone",
+ "Source": "https://github.com/plone/Products.CMFPlone",
+ "Issues": "https://github.com/plone/plone.org/Products.CMFPlone",
+ "Forum": "https://community.plone.org/",
+ "Chat": "https://discord.gg/zFY3EBbjaj",
+ "Mastodon": "https://plone.social/@plone",
+ "Twitter": "https://twitter.com/plone",
+ "Videos": "https://youtube.com/@plonecms",
+ "Sponsor": "https://github.com/sponsors/plone",
+ },
packages=find_packages(),
namespace_packages=['Products'],
include_package_data=True,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,6 +32,19 @@\n author_email='[email protected]',\n url='https://plone.org',\n license='GPL version 2',\n+ project_urls={\n+ \"Homepage\": \"https://plone.org\",\n+ \"Documentation\": \"https://docs.plone.org\",\n+ \"PyPI\": \"https://pypi.python.org/pypi/Products.CMFPlone\",\n+ \"Source\": \"https://github.com/plone/Products.CMFPlone\",\n+ \"Issues\": \"https://github.com/plone/plone.org/Products.CMFPlone\",\n+ \"Forum\": \"https://community.plone.org/\",\n+ \"Chat\": \"https://discord.gg/zFY3EBbjaj\",\n+ \"Mastodon\": \"https://plone.social/@plone\",\n+ \"Twitter\": \"https://twitter.com/plone\",\n+ \"Videos\": \"https://youtube.com/@plonecms\",\n+ \"Sponsor\": \"https://github.com/sponsors/plone\",\n+ },\n packages=find_packages(),\n namespace_packages=['Products'],\n include_package_data=True,\n", "issue": "Update package metadata in PyPi\n\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nversion = '6.0.0rc2.dev0'\n\n\nsetup(\n name='Products.CMFPlone',\n version=version,\n description=\"The Plone Content Management System (core)\",\n long_description=open(\"README.rst\").read() + \"\\n\" +\n open(\"CHANGES.rst\").read(),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Plone\",\n \"Framework :: Plone :: 6.0\",\n \"Framework :: Plone :: Core\",\n \"Framework :: Zope :: 5\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires='>=3.8',\n keywords='Plone CMF Python Zope CMS Webapplication',\n author='Plone Foundation',\n author_email='[email protected]',\n url='https://plone.org',\n license='GPL version 2',\n packages=find_packages(),\n namespace_packages=['Products'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'borg.localrole',\n 'five.customerize',\n 'lxml',\n 'plone.api >= 1.4.4',\n 'plone.app.content',\n 'plone.app.contentlisting',\n 'plone.app.contentmenu >= 2.0.1',\n 'plone.app.contentrules',\n 'plone.app.contenttypes',\n 'plone.app.customerize',\n 'plone.app.dexterity',\n 'plone.app.discussion',\n 'plone.app.i18n',\n 'plone.app.layout >= 2.5.15',\n 'plone.app.linkintegrity >=1.0.3',\n 'plone.app.locales',\n 'plone.app.multilingual',\n 'plone.app.portlets',\n 'plone.app.redirector',\n 'plone.app.registry',\n 'plone.app.theming',\n 'plone.app.users',\n 'plone.app.uuid',\n 'plone.app.viewletmanager',\n 'plone.app.vocabularies',\n 'plone.app.workflow',\n 'plone.base',\n 'plone.browserlayer >= 2.1.5',\n 'plone.contentrules',\n 'plone.folder',\n 'plone.i18n >= 4.0.5',\n 'plone.indexer',\n 'plone.intelligenttext',\n 'plone.locking',\n 'plone.memoize',\n 'plone.outputfilters',\n 'plone.portlet.collection',\n 'plone.portlet.static',\n 'plone.portlets',\n 'plone.protect >= 3.0.0',\n 'plone.resource',\n 'plone.schema',\n 'plone.session',\n 'plone.staticresources',\n 'plone.theme',\n 'plonetheme.barceloneta',\n 'Products.CMFEditions',\n 'Products.DCWorkflow',\n 'Products.ExtendedPathIndex',\n 'Products.isurlinportal',\n 'Products.MimetypesRegistry',\n 'Products.PlonePAS',\n 'Products.PortalTransforms',\n 'Products.SiteErrorLog',\n 'Products.statusmessages',\n 'setuptools>=36.2',\n 'plone.autoinclude',\n 'webresource>=1.1',\n 'Zope[wsgi] >= 5.0',\n 'zope.app.locales >= 3.6.0',\n 'zope.cachedescriptors',\n 'zope.deferredimport',\n 'zope.deprecation',\n 'zope.dottedname',\n 'zope.i18n',\n 'zope.i18nmessageid',\n 'zope.structuredtext',\n ],\n extras_require={\n 'test': [\n 'lxml',\n 'mock',\n 'plone.app.robotframework>=1.0',\n 'robotframework-debuglibrary',\n 'plone.app.testing',\n 'zope.globalrequest',\n 'zope.testing',\n 'gunicorn',\n ]\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nversion = '6.0.0rc2.dev0'\n\n\nsetup(\n name='Products.CMFPlone',\n version=version,\n description=\"The Plone Content Management System (core)\",\n long_description=open(\"README.rst\").read() + \"\\n\" +\n open(\"CHANGES.rst\").read(),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Plone\",\n \"Framework :: Plone :: 6.0\",\n \"Framework :: Plone :: Core\",\n \"Framework :: Zope :: 5\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires='>=3.8',\n keywords='Plone CMF Python Zope CMS Webapplication',\n author='Plone Foundation',\n author_email='[email protected]',\n url='https://plone.org',\n license='GPL version 2',\n project_urls={\n \"Homepage\": \"https://plone.org\",\n \"Documentation\": \"https://docs.plone.org\",\n \"PyPI\": \"https://pypi.python.org/pypi/Products.CMFPlone\",\n \"Source\": \"https://github.com/plone/Products.CMFPlone\",\n \"Issues\": \"https://github.com/plone/plone.org/Products.CMFPlone\",\n \"Forum\": \"https://community.plone.org/\",\n \"Chat\": \"https://discord.gg/zFY3EBbjaj\",\n \"Mastodon\": \"https://plone.social/@plone\",\n \"Twitter\": \"https://twitter.com/plone\",\n \"Videos\": \"https://youtube.com/@plonecms\",\n \"Sponsor\": \"https://github.com/sponsors/plone\",\n },\n packages=find_packages(),\n namespace_packages=['Products'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'borg.localrole',\n 'five.customerize',\n 'lxml',\n 'plone.api >= 1.4.4',\n 'plone.app.content',\n 'plone.app.contentlisting',\n 'plone.app.contentmenu >= 2.0.1',\n 'plone.app.contentrules',\n 'plone.app.contenttypes',\n 'plone.app.customerize',\n 'plone.app.dexterity',\n 'plone.app.discussion',\n 'plone.app.i18n',\n 'plone.app.layout >= 2.5.15',\n 'plone.app.linkintegrity >=1.0.3',\n 'plone.app.locales',\n 'plone.app.multilingual',\n 'plone.app.portlets',\n 'plone.app.redirector',\n 'plone.app.registry',\n 'plone.app.theming',\n 'plone.app.users',\n 'plone.app.uuid',\n 'plone.app.viewletmanager',\n 'plone.app.vocabularies',\n 'plone.app.workflow',\n 'plone.base',\n 'plone.browserlayer >= 2.1.5',\n 'plone.contentrules',\n 'plone.folder',\n 'plone.i18n >= 4.0.5',\n 'plone.indexer',\n 'plone.intelligenttext',\n 'plone.locking',\n 'plone.memoize',\n 'plone.outputfilters',\n 'plone.portlet.collection',\n 'plone.portlet.static',\n 'plone.portlets',\n 'plone.protect >= 3.0.0',\n 'plone.resource',\n 'plone.schema',\n 'plone.session',\n 'plone.staticresources',\n 'plone.theme',\n 'plonetheme.barceloneta',\n 'Products.CMFEditions',\n 'Products.DCWorkflow',\n 'Products.ExtendedPathIndex',\n 'Products.isurlinportal',\n 'Products.MimetypesRegistry',\n 'Products.PlonePAS',\n 'Products.PortalTransforms',\n 'Products.SiteErrorLog',\n 'Products.statusmessages',\n 'setuptools>=36.2',\n 'plone.autoinclude',\n 'webresource>=1.1',\n 'Zope[wsgi] >= 5.0',\n 'zope.app.locales >= 3.6.0',\n 'zope.cachedescriptors',\n 'zope.deferredimport',\n 'zope.deprecation',\n 'zope.dottedname',\n 'zope.i18n',\n 'zope.i18nmessageid',\n 'zope.structuredtext',\n ],\n extras_require={\n 'test': [\n 'lxml',\n 'mock',\n 'plone.app.robotframework>=1.0',\n 'robotframework-debuglibrary',\n 'plone.app.testing',\n 'zope.globalrequest',\n 'zope.testing',\n 'gunicorn',\n ]\n },\n)\n", "path": "setup.py"}]} | 1,474 | 268 |
gh_patches_debug_42382 | rasdani/github-patches | git_diff | lutris__lutris-2973 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add option to turn columns on/off in List View
When right-clicking to table headbar in List View, you expect to get a menu for turning columns on/off, but you just select first game in the list.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/gui/views/list.py`
Content:
```
1 """TreeView based game list"""
2 from gettext import gettext as _
3
4 # Third Party Libraries
5 # pylint: disable=no-member
6 from gi.repository import Gtk, Pango
7
8 # Lutris Modules
9 from lutris import settings
10 from lutris.gui.views import (
11 COL_ICON, COL_INSTALLED_AT, COL_INSTALLED_AT_TEXT, COL_LASTPLAYED, COL_LASTPLAYED_TEXT, COL_NAME, COL_PLATFORM,
12 COL_PLAYTIME, COL_PLAYTIME_TEXT, COL_RUNNER_HUMAN_NAME, COL_YEAR, COLUMN_NAMES
13 )
14 from lutris.gui.views.base import GameView
15 from lutris.gui.views.store import sort_func
16
17
18 class GameListView(Gtk.TreeView, GameView):
19
20 """Show the main list of games."""
21
22 __gsignals__ = GameView.__gsignals__
23
24 def __init__(self, store):
25 self.game_store = store
26 self.model = self.game_store.modelsort
27 super().__init__(self.model)
28 self.set_rules_hint(True)
29
30 # Icon column
31 image_cell = Gtk.CellRendererPixbuf()
32 column = Gtk.TreeViewColumn("", image_cell, pixbuf=COL_ICON)
33 column.set_reorderable(True)
34 column.set_sort_indicator(False)
35 self.append_column(column)
36
37 # Text columns
38 default_text_cell = self.set_text_cell()
39 name_cell = self.set_text_cell()
40 name_cell.set_padding(5, 0)
41
42 self.set_column(name_cell, _("Name"), COL_NAME, 200)
43 self.set_column(default_text_cell, _("Year"), COL_YEAR, 60)
44 self.set_column(default_text_cell, _("Runner"), COL_RUNNER_HUMAN_NAME, 120)
45 self.set_column(default_text_cell, _("Platform"), COL_PLATFORM, 120)
46 self.set_column(default_text_cell, _("Last Played"), COL_LASTPLAYED_TEXT, 120)
47 self.set_sort_with_column(COL_LASTPLAYED_TEXT, COL_LASTPLAYED)
48 self.set_column(default_text_cell, _("Installed At"), COL_INSTALLED_AT_TEXT, 120)
49 self.set_sort_with_column(COL_INSTALLED_AT_TEXT, COL_INSTALLED_AT)
50 self.set_column(default_text_cell, _("Play Time"), COL_PLAYTIME_TEXT, 100)
51 self.set_sort_with_column(COL_PLAYTIME_TEXT, COL_PLAYTIME)
52
53 self.get_selection().set_mode(Gtk.SelectionMode.SINGLE)
54
55 self.connect_signals()
56 self.connect("row-activated", self.on_row_activated)
57 self.get_selection().connect("changed", self.on_cursor_changed)
58
59 @staticmethod
60 def set_text_cell():
61 text_cell = Gtk.CellRendererText()
62 text_cell.set_padding(10, 0)
63 text_cell.set_property("ellipsize", Pango.EllipsizeMode.END)
64 return text_cell
65
66 def set_column(self, cell, header, column_id, default_width, sort_id=None):
67 column = Gtk.TreeViewColumn(header, cell, markup=column_id)
68 column.set_sort_indicator(True)
69 column.set_sort_column_id(column_id if sort_id is None else sort_id)
70 self.set_column_sort(column_id if sort_id is None else sort_id)
71 column.set_resizable(True)
72 column.set_reorderable(True)
73 width = settings.read_setting("%s_column_width" % COLUMN_NAMES[column_id], "list view")
74 column.set_fixed_width(int(width) if width else default_width)
75 self.append_column(column)
76 column.connect("notify::width", self.on_column_width_changed)
77 return column
78
79 def set_column_sort(self, col):
80 """Sort a column and fallback to sorting by name and runner."""
81 self.model.set_sort_func(col, sort_func, col)
82
83 def set_sort_with_column(self, col, sort_col):
84 """Sort a column by using another column's data"""
85 self.model.set_sort_func(col, sort_func, sort_col)
86
87 def get_selected_item(self):
88 """Return the currently selected game's id."""
89 selection = self.get_selection()
90 if not selection:
91 return None
92 _model, select_iter = selection.get_selected()
93 if select_iter:
94 return select_iter
95
96 def select(self):
97 self.set_cursor(self.current_path[0])
98
99 def set_selected_game(self, game_id):
100 row = self.game_store.get_row_by_id(game_id, filtered=True)
101 if row:
102 self.set_cursor(row.path)
103
104 def on_row_activated(self, widget, line=None, column=None):
105 """Handles double clicks"""
106 selected_item = self.get_selected_item()
107 if selected_item:
108 selected_game = self.get_selected_game(selected_item)
109 else:
110 selected_game = None
111 self.emit("game-activated", selected_game)
112
113 def on_cursor_changed(self, widget, _line=None, _column=None):
114 selected_item = self.get_selected_item()
115 if selected_item:
116 self.selected_game = self.get_selected_game(selected_item)
117 else:
118 self.selected_game = None
119 self.emit("game-selected", self.selected_game)
120
121 @staticmethod
122 def on_column_width_changed(col, *args):
123 col_name = col.get_title()
124 if col_name:
125 settings.write_setting(
126 col_name.replace(" ", "") + "_column_width",
127 col.get_fixed_width(),
128 "list view",
129 )
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/gui/views/list.py b/lutris/gui/views/list.py
--- a/lutris/gui/views/list.py
+++ b/lutris/gui/views/list.py
@@ -39,7 +39,7 @@
name_cell = self.set_text_cell()
name_cell.set_padding(5, 0)
- self.set_column(name_cell, _("Name"), COL_NAME, 200)
+ self.set_column(name_cell, _("Name"), COL_NAME, 200, always_visible=True)
self.set_column(default_text_cell, _("Year"), COL_YEAR, 60)
self.set_column(default_text_cell, _("Runner"), COL_RUNNER_HUMAN_NAME, 120)
self.set_column(default_text_cell, _("Platform"), COL_PLATFORM, 120)
@@ -63,7 +63,7 @@
text_cell.set_property("ellipsize", Pango.EllipsizeMode.END)
return text_cell
- def set_column(self, cell, header, column_id, default_width, sort_id=None):
+ def set_column(self, cell, header, column_id, default_width, always_visible=False, sort_id=None):
column = Gtk.TreeViewColumn(header, cell, markup=column_id)
column.set_sort_indicator(True)
column.set_sort_column_id(column_id if sort_id is None else sort_id)
@@ -71,9 +71,12 @@
column.set_resizable(True)
column.set_reorderable(True)
width = settings.read_setting("%s_column_width" % COLUMN_NAMES[column_id], "list view")
+ is_visible = settings.read_setting("%s_visible" % COLUMN_NAMES[column_id], "list view")
column.set_fixed_width(int(width) if width else default_width)
+ column.set_visible(is_visible == "True" or always_visible if is_visible else True)
self.append_column(column)
column.connect("notify::width", self.on_column_width_changed)
+ column.get_button().connect('button-press-event', self.on_column_header_button_pressed)
return column
def set_column_sort(self, col):
@@ -101,6 +104,13 @@
if row:
self.set_cursor(row.path)
+ def on_column_header_button_pressed(self, button, event):
+ """Handles column header button press events"""
+ if event.button == 3:
+ menu = GameListColumnToggleMenu(self.get_columns())
+ menu.popup_at_pointer(None)
+ return True
+
def on_row_activated(self, widget, line=None, column=None):
"""Handles double clicks"""
selected_item = self.get_selected_item()
@@ -127,3 +137,37 @@
col.get_fixed_width(),
"list view",
)
+
+
+class GameListColumnToggleMenu(Gtk.Menu):
+
+ def __init__(self, columns):
+ super().__init__()
+ self.columns = columns
+ self.column_map = {}
+ self.create_menuitems()
+ self.show_all()
+
+ def create_menuitems(self):
+ for column in self.columns:
+ title = column.get_title()
+ if title == "":
+ continue
+ checkbox = Gtk.CheckMenuItem(title)
+ checkbox.set_active(column.get_visible())
+ if title == _("Name"):
+ checkbox.set_sensitive(False)
+ else:
+ checkbox.connect("toggled", self.on_toggle_column)
+ self.column_map[checkbox] = column
+ self.append(checkbox)
+
+ def on_toggle_column(self, check_menu_item):
+ column = self.column_map[check_menu_item]
+ is_visible = check_menu_item.get_active()
+ column.set_visible(is_visible)
+ settings.write_setting(
+ column.get_title().replace(" ", "") + "_visible",
+ str(is_visible),
+ "list view",
+ )
| {"golden_diff": "diff --git a/lutris/gui/views/list.py b/lutris/gui/views/list.py\n--- a/lutris/gui/views/list.py\n+++ b/lutris/gui/views/list.py\n@@ -39,7 +39,7 @@\n name_cell = self.set_text_cell()\n name_cell.set_padding(5, 0)\n \n- self.set_column(name_cell, _(\"Name\"), COL_NAME, 200)\n+ self.set_column(name_cell, _(\"Name\"), COL_NAME, 200, always_visible=True)\n self.set_column(default_text_cell, _(\"Year\"), COL_YEAR, 60)\n self.set_column(default_text_cell, _(\"Runner\"), COL_RUNNER_HUMAN_NAME, 120)\n self.set_column(default_text_cell, _(\"Platform\"), COL_PLATFORM, 120)\n@@ -63,7 +63,7 @@\n text_cell.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return text_cell\n \n- def set_column(self, cell, header, column_id, default_width, sort_id=None):\n+ def set_column(self, cell, header, column_id, default_width, always_visible=False, sort_id=None):\n column = Gtk.TreeViewColumn(header, cell, markup=column_id)\n column.set_sort_indicator(True)\n column.set_sort_column_id(column_id if sort_id is None else sort_id)\n@@ -71,9 +71,12 @@\n column.set_resizable(True)\n column.set_reorderable(True)\n width = settings.read_setting(\"%s_column_width\" % COLUMN_NAMES[column_id], \"list view\")\n+ is_visible = settings.read_setting(\"%s_visible\" % COLUMN_NAMES[column_id], \"list view\")\n column.set_fixed_width(int(width) if width else default_width)\n+ column.set_visible(is_visible == \"True\" or always_visible if is_visible else True)\n self.append_column(column)\n column.connect(\"notify::width\", self.on_column_width_changed)\n+ column.get_button().connect('button-press-event', self.on_column_header_button_pressed)\n return column\n \n def set_column_sort(self, col):\n@@ -101,6 +104,13 @@\n if row:\n self.set_cursor(row.path)\n \n+ def on_column_header_button_pressed(self, button, event):\n+ \"\"\"Handles column header button press events\"\"\"\n+ if event.button == 3:\n+ menu = GameListColumnToggleMenu(self.get_columns())\n+ menu.popup_at_pointer(None)\n+ return True\n+\n def on_row_activated(self, widget, line=None, column=None):\n \"\"\"Handles double clicks\"\"\"\n selected_item = self.get_selected_item()\n@@ -127,3 +137,37 @@\n col.get_fixed_width(),\n \"list view\",\n )\n+\n+\n+class GameListColumnToggleMenu(Gtk.Menu):\n+\n+ def __init__(self, columns):\n+ super().__init__()\n+ self.columns = columns\n+ self.column_map = {}\n+ self.create_menuitems()\n+ self.show_all()\n+\n+ def create_menuitems(self):\n+ for column in self.columns:\n+ title = column.get_title()\n+ if title == \"\":\n+ continue\n+ checkbox = Gtk.CheckMenuItem(title)\n+ checkbox.set_active(column.get_visible())\n+ if title == _(\"Name\"):\n+ checkbox.set_sensitive(False)\n+ else:\n+ checkbox.connect(\"toggled\", self.on_toggle_column)\n+ self.column_map[checkbox] = column\n+ self.append(checkbox)\n+\n+ def on_toggle_column(self, check_menu_item):\n+ column = self.column_map[check_menu_item]\n+ is_visible = check_menu_item.get_active()\n+ column.set_visible(is_visible)\n+ settings.write_setting(\n+ column.get_title().replace(\" \", \"\") + \"_visible\",\n+ str(is_visible),\n+ \"list view\",\n+ )\n", "issue": "Add option to turn columns on/off in List View\nWhen right-clicking to table headbar in List View, you expect to get a menu for turning columns on/off, but you just select first game in the list.\n", "before_files": [{"content": "\"\"\"TreeView based game list\"\"\"\nfrom gettext import gettext as _\n\n# Third Party Libraries\n# pylint: disable=no-member\nfrom gi.repository import Gtk, Pango\n\n# Lutris Modules\nfrom lutris import settings\nfrom lutris.gui.views import (\n COL_ICON, COL_INSTALLED_AT, COL_INSTALLED_AT_TEXT, COL_LASTPLAYED, COL_LASTPLAYED_TEXT, COL_NAME, COL_PLATFORM,\n COL_PLAYTIME, COL_PLAYTIME_TEXT, COL_RUNNER_HUMAN_NAME, COL_YEAR, COLUMN_NAMES\n)\nfrom lutris.gui.views.base import GameView\nfrom lutris.gui.views.store import sort_func\n\n\nclass GameListView(Gtk.TreeView, GameView):\n\n \"\"\"Show the main list of games.\"\"\"\n\n __gsignals__ = GameView.__gsignals__\n\n def __init__(self, store):\n self.game_store = store\n self.model = self.game_store.modelsort\n super().__init__(self.model)\n self.set_rules_hint(True)\n\n # Icon column\n image_cell = Gtk.CellRendererPixbuf()\n column = Gtk.TreeViewColumn(\"\", image_cell, pixbuf=COL_ICON)\n column.set_reorderable(True)\n column.set_sort_indicator(False)\n self.append_column(column)\n\n # Text columns\n default_text_cell = self.set_text_cell()\n name_cell = self.set_text_cell()\n name_cell.set_padding(5, 0)\n\n self.set_column(name_cell, _(\"Name\"), COL_NAME, 200)\n self.set_column(default_text_cell, _(\"Year\"), COL_YEAR, 60)\n self.set_column(default_text_cell, _(\"Runner\"), COL_RUNNER_HUMAN_NAME, 120)\n self.set_column(default_text_cell, _(\"Platform\"), COL_PLATFORM, 120)\n self.set_column(default_text_cell, _(\"Last Played\"), COL_LASTPLAYED_TEXT, 120)\n self.set_sort_with_column(COL_LASTPLAYED_TEXT, COL_LASTPLAYED)\n self.set_column(default_text_cell, _(\"Installed At\"), COL_INSTALLED_AT_TEXT, 120)\n self.set_sort_with_column(COL_INSTALLED_AT_TEXT, COL_INSTALLED_AT)\n self.set_column(default_text_cell, _(\"Play Time\"), COL_PLAYTIME_TEXT, 100)\n self.set_sort_with_column(COL_PLAYTIME_TEXT, COL_PLAYTIME)\n\n self.get_selection().set_mode(Gtk.SelectionMode.SINGLE)\n\n self.connect_signals()\n self.connect(\"row-activated\", self.on_row_activated)\n self.get_selection().connect(\"changed\", self.on_cursor_changed)\n\n @staticmethod\n def set_text_cell():\n text_cell = Gtk.CellRendererText()\n text_cell.set_padding(10, 0)\n text_cell.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return text_cell\n\n def set_column(self, cell, header, column_id, default_width, sort_id=None):\n column = Gtk.TreeViewColumn(header, cell, markup=column_id)\n column.set_sort_indicator(True)\n column.set_sort_column_id(column_id if sort_id is None else sort_id)\n self.set_column_sort(column_id if sort_id is None else sort_id)\n column.set_resizable(True)\n column.set_reorderable(True)\n width = settings.read_setting(\"%s_column_width\" % COLUMN_NAMES[column_id], \"list view\")\n column.set_fixed_width(int(width) if width else default_width)\n self.append_column(column)\n column.connect(\"notify::width\", self.on_column_width_changed)\n return column\n\n def set_column_sort(self, col):\n \"\"\"Sort a column and fallback to sorting by name and runner.\"\"\"\n self.model.set_sort_func(col, sort_func, col)\n\n def set_sort_with_column(self, col, sort_col):\n \"\"\"Sort a column by using another column's data\"\"\"\n self.model.set_sort_func(col, sort_func, sort_col)\n\n def get_selected_item(self):\n \"\"\"Return the currently selected game's id.\"\"\"\n selection = self.get_selection()\n if not selection:\n return None\n _model, select_iter = selection.get_selected()\n if select_iter:\n return select_iter\n\n def select(self):\n self.set_cursor(self.current_path[0])\n\n def set_selected_game(self, game_id):\n row = self.game_store.get_row_by_id(game_id, filtered=True)\n if row:\n self.set_cursor(row.path)\n\n def on_row_activated(self, widget, line=None, column=None):\n \"\"\"Handles double clicks\"\"\"\n selected_item = self.get_selected_item()\n if selected_item:\n selected_game = self.get_selected_game(selected_item)\n else:\n selected_game = None\n self.emit(\"game-activated\", selected_game)\n\n def on_cursor_changed(self, widget, _line=None, _column=None):\n selected_item = self.get_selected_item()\n if selected_item:\n self.selected_game = self.get_selected_game(selected_item)\n else:\n self.selected_game = None\n self.emit(\"game-selected\", self.selected_game)\n\n @staticmethod\n def on_column_width_changed(col, *args):\n col_name = col.get_title()\n if col_name:\n settings.write_setting(\n col_name.replace(\" \", \"\") + \"_column_width\",\n col.get_fixed_width(),\n \"list view\",\n )\n", "path": "lutris/gui/views/list.py"}], "after_files": [{"content": "\"\"\"TreeView based game list\"\"\"\nfrom gettext import gettext as _\n\n# Third Party Libraries\n# pylint: disable=no-member\nfrom gi.repository import Gtk, Pango\n\n# Lutris Modules\nfrom lutris import settings\nfrom lutris.gui.views import (\n COL_ICON, COL_INSTALLED_AT, COL_INSTALLED_AT_TEXT, COL_LASTPLAYED, COL_LASTPLAYED_TEXT, COL_NAME, COL_PLATFORM,\n COL_PLAYTIME, COL_PLAYTIME_TEXT, COL_RUNNER_HUMAN_NAME, COL_YEAR, COLUMN_NAMES\n)\nfrom lutris.gui.views.base import GameView\nfrom lutris.gui.views.store import sort_func\n\n\nclass GameListView(Gtk.TreeView, GameView):\n\n \"\"\"Show the main list of games.\"\"\"\n\n __gsignals__ = GameView.__gsignals__\n\n def __init__(self, store):\n self.game_store = store\n self.model = self.game_store.modelsort\n super().__init__(self.model)\n self.set_rules_hint(True)\n\n # Icon column\n image_cell = Gtk.CellRendererPixbuf()\n column = Gtk.TreeViewColumn(\"\", image_cell, pixbuf=COL_ICON)\n column.set_reorderable(True)\n column.set_sort_indicator(False)\n self.append_column(column)\n\n # Text columns\n default_text_cell = self.set_text_cell()\n name_cell = self.set_text_cell()\n name_cell.set_padding(5, 0)\n\n self.set_column(name_cell, _(\"Name\"), COL_NAME, 200, always_visible=True)\n self.set_column(default_text_cell, _(\"Year\"), COL_YEAR, 60)\n self.set_column(default_text_cell, _(\"Runner\"), COL_RUNNER_HUMAN_NAME, 120)\n self.set_column(default_text_cell, _(\"Platform\"), COL_PLATFORM, 120)\n self.set_column(default_text_cell, _(\"Last Played\"), COL_LASTPLAYED_TEXT, 120)\n self.set_sort_with_column(COL_LASTPLAYED_TEXT, COL_LASTPLAYED)\n self.set_column(default_text_cell, _(\"Installed At\"), COL_INSTALLED_AT_TEXT, 120)\n self.set_sort_with_column(COL_INSTALLED_AT_TEXT, COL_INSTALLED_AT)\n self.set_column(default_text_cell, _(\"Play Time\"), COL_PLAYTIME_TEXT, 100)\n self.set_sort_with_column(COL_PLAYTIME_TEXT, COL_PLAYTIME)\n\n self.get_selection().set_mode(Gtk.SelectionMode.SINGLE)\n\n self.connect_signals()\n self.connect(\"row-activated\", self.on_row_activated)\n self.get_selection().connect(\"changed\", self.on_cursor_changed)\n\n @staticmethod\n def set_text_cell():\n text_cell = Gtk.CellRendererText()\n text_cell.set_padding(10, 0)\n text_cell.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return text_cell\n\n def set_column(self, cell, header, column_id, default_width, always_visible=False, sort_id=None):\n column = Gtk.TreeViewColumn(header, cell, markup=column_id)\n column.set_sort_indicator(True)\n column.set_sort_column_id(column_id if sort_id is None else sort_id)\n self.set_column_sort(column_id if sort_id is None else sort_id)\n column.set_resizable(True)\n column.set_reorderable(True)\n width = settings.read_setting(\"%s_column_width\" % COLUMN_NAMES[column_id], \"list view\")\n is_visible = settings.read_setting(\"%s_visible\" % COLUMN_NAMES[column_id], \"list view\")\n column.set_fixed_width(int(width) if width else default_width)\n column.set_visible(is_visible == \"True\" or always_visible if is_visible else True)\n self.append_column(column)\n column.connect(\"notify::width\", self.on_column_width_changed)\n column.get_button().connect('button-press-event', self.on_column_header_button_pressed)\n return column\n\n def set_column_sort(self, col):\n \"\"\"Sort a column and fallback to sorting by name and runner.\"\"\"\n self.model.set_sort_func(col, sort_func, col)\n\n def set_sort_with_column(self, col, sort_col):\n \"\"\"Sort a column by using another column's data\"\"\"\n self.model.set_sort_func(col, sort_func, sort_col)\n\n def get_selected_item(self):\n \"\"\"Return the currently selected game's id.\"\"\"\n selection = self.get_selection()\n if not selection:\n return None\n _model, select_iter = selection.get_selected()\n if select_iter:\n return select_iter\n\n def select(self):\n self.set_cursor(self.current_path[0])\n\n def set_selected_game(self, game_id):\n row = self.game_store.get_row_by_id(game_id, filtered=True)\n if row:\n self.set_cursor(row.path)\n\n def on_column_header_button_pressed(self, button, event):\n \"\"\"Handles column header button press events\"\"\"\n if event.button == 3:\n menu = GameListColumnToggleMenu(self.get_columns())\n menu.popup_at_pointer(None)\n return True\n\n def on_row_activated(self, widget, line=None, column=None):\n \"\"\"Handles double clicks\"\"\"\n selected_item = self.get_selected_item()\n if selected_item:\n selected_game = self.get_selected_game(selected_item)\n else:\n selected_game = None\n self.emit(\"game-activated\", selected_game)\n\n def on_cursor_changed(self, widget, _line=None, _column=None):\n selected_item = self.get_selected_item()\n if selected_item:\n self.selected_game = self.get_selected_game(selected_item)\n else:\n self.selected_game = None\n self.emit(\"game-selected\", self.selected_game)\n\n @staticmethod\n def on_column_width_changed(col, *args):\n col_name = col.get_title()\n if col_name:\n settings.write_setting(\n col_name.replace(\" \", \"\") + \"_column_width\",\n col.get_fixed_width(),\n \"list view\",\n )\n\n\nclass GameListColumnToggleMenu(Gtk.Menu):\n\n def __init__(self, columns):\n super().__init__()\n self.columns = columns\n self.column_map = {}\n self.create_menuitems()\n self.show_all()\n\n def create_menuitems(self):\n for column in self.columns:\n title = column.get_title()\n if title == \"\":\n continue\n checkbox = Gtk.CheckMenuItem(title)\n checkbox.set_active(column.get_visible())\n if title == _(\"Name\"):\n checkbox.set_sensitive(False)\n else:\n checkbox.connect(\"toggled\", self.on_toggle_column)\n self.column_map[checkbox] = column\n self.append(checkbox)\n\n def on_toggle_column(self, check_menu_item):\n column = self.column_map[check_menu_item]\n is_visible = check_menu_item.get_active()\n column.set_visible(is_visible)\n settings.write_setting(\n column.get_title().replace(\" \", \"\") + \"_visible\",\n str(is_visible),\n \"list view\",\n )\n", "path": "lutris/gui/views/list.py"}]} | 1,718 | 839 |
gh_patches_debug_4556 | rasdani/github-patches | git_diff | scrapy__scrapy-4599 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KeyError in is_generator_with_return_value
```Python traceback
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks
result = g.send(result)
File "/app/python/lib/python3.8/site-packages/scrapy/core/downloader/middleware.py", line 42, in process_request
defer.returnValue((yield download_func(request=request, spider=spider)))
File "/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py", line 1362, in returnValue
raise _DefGen_Return(val)
twisted.internet.defer._DefGen_Return: <200 https://www.example.com>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/app/python/lib/python3.8/site-packages/scrapy/utils/defer.py", line 55, in mustbe_deferred
result = f(*args, **kw)
File "/app/python/lib/python3.8/site-packages/scrapy/core/spidermw.py", line 60, in process_spider_input
return scrape_func(response, request, spider)
File "/app/python/lib/python3.8/site-packages/scrapy/core/scraper.py", line 148, in call_spider
warn_on_generator_with_return_value(spider, callback)
File "/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py", line 202, in warn_on_generator_with_return_value
if is_generator_with_return_value(callable):
File "/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py", line 180, in is_generator_with_return_value
return _generator_callbacks_cache[callable]
File "/app/python/lib/python3.8/site-packages/scrapy/utils/datatypes.py", line 281, in __getitem__
return super(LocalWeakReferencedCache, self).__getitem__(key)
File "/usr/local/lib/python3.8/weakref.py", line 383, in __getitem__
return self.data[ref(key)]
KeyError: <weakref at 0x7f06ff011720; to 'method' at 0x7f07042b5e00 (parse_foo)>
```
This is Scrapy 2.0.1. The problem happens only sometimes, but in different spiders in the same project.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/datatypes.py`
Content:
```
1 """
2 This module contains data types used by Scrapy which are not included in the
3 Python Standard Library.
4
5 This module must not depend on any module outside the Standard Library.
6 """
7
8 import collections
9 import weakref
10 from collections.abc import Mapping
11
12
13 class CaselessDict(dict):
14
15 __slots__ = ()
16
17 def __init__(self, seq=None):
18 super(CaselessDict, self).__init__()
19 if seq:
20 self.update(seq)
21
22 def __getitem__(self, key):
23 return dict.__getitem__(self, self.normkey(key))
24
25 def __setitem__(self, key, value):
26 dict.__setitem__(self, self.normkey(key), self.normvalue(value))
27
28 def __delitem__(self, key):
29 dict.__delitem__(self, self.normkey(key))
30
31 def __contains__(self, key):
32 return dict.__contains__(self, self.normkey(key))
33 has_key = __contains__
34
35 def __copy__(self):
36 return self.__class__(self)
37 copy = __copy__
38
39 def normkey(self, key):
40 """Method to normalize dictionary key access"""
41 return key.lower()
42
43 def normvalue(self, value):
44 """Method to normalize values prior to be setted"""
45 return value
46
47 def get(self, key, def_val=None):
48 return dict.get(self, self.normkey(key), self.normvalue(def_val))
49
50 def setdefault(self, key, def_val=None):
51 return dict.setdefault(self, self.normkey(key), self.normvalue(def_val))
52
53 def update(self, seq):
54 seq = seq.items() if isinstance(seq, Mapping) else seq
55 iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)
56 super(CaselessDict, self).update(iseq)
57
58 @classmethod
59 def fromkeys(cls, keys, value=None):
60 return cls((k, value) for k in keys)
61
62 def pop(self, key, *args):
63 return dict.pop(self, self.normkey(key), *args)
64
65
66 class LocalCache(collections.OrderedDict):
67 """Dictionary with a finite number of keys.
68
69 Older items expires first.
70 """
71
72 def __init__(self, limit=None):
73 super(LocalCache, self).__init__()
74 self.limit = limit
75
76 def __setitem__(self, key, value):
77 if self.limit:
78 while len(self) >= self.limit:
79 self.popitem(last=False)
80 super(LocalCache, self).__setitem__(key, value)
81
82
83 class LocalWeakReferencedCache(weakref.WeakKeyDictionary):
84 """
85 A weakref.WeakKeyDictionary implementation that uses LocalCache as its
86 underlying data structure, making it ordered and capable of being size-limited.
87
88 Useful for memoization, while avoiding keeping received
89 arguments in memory only because of the cached references.
90
91 Note: like LocalCache and unlike weakref.WeakKeyDictionary,
92 it cannot be instantiated with an initial dictionary.
93 """
94
95 def __init__(self, limit=None):
96 super(LocalWeakReferencedCache, self).__init__()
97 self.data = LocalCache(limit=limit)
98
99 def __setitem__(self, key, value):
100 try:
101 super(LocalWeakReferencedCache, self).__setitem__(key, value)
102 except TypeError:
103 pass # key is not weak-referenceable, skip caching
104
105 def __getitem__(self, key):
106 try:
107 return super(LocalWeakReferencedCache, self).__getitem__(key)
108 except TypeError:
109 return None # key is not weak-referenceable, it's not cached
110
111
112 class SequenceExclude:
113 """Object to test if an item is NOT within some sequence."""
114
115 def __init__(self, seq):
116 self.seq = seq
117
118 def __contains__(self, item):
119 return item not in self.seq
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/utils/datatypes.py b/scrapy/utils/datatypes.py
--- a/scrapy/utils/datatypes.py
+++ b/scrapy/utils/datatypes.py
@@ -105,8 +105,8 @@
def __getitem__(self, key):
try:
return super(LocalWeakReferencedCache, self).__getitem__(key)
- except TypeError:
- return None # key is not weak-referenceable, it's not cached
+ except (TypeError, KeyError):
+ return None # key is either not weak-referenceable or not cached
class SequenceExclude:
| {"golden_diff": "diff --git a/scrapy/utils/datatypes.py b/scrapy/utils/datatypes.py\n--- a/scrapy/utils/datatypes.py\n+++ b/scrapy/utils/datatypes.py\n@@ -105,8 +105,8 @@\n def __getitem__(self, key):\n try:\n return super(LocalWeakReferencedCache, self).__getitem__(key)\n- except TypeError:\n- return None # key is not weak-referenceable, it's not cached\n+ except (TypeError, KeyError):\n+ return None # key is either not weak-referenceable or not cached\n \n \n class SequenceExclude:\n", "issue": "KeyError in is_generator_with_return_value\n\r\n```Python traceback\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py\", line 1418, in _inlineCallbacks\r\n result = g.send(result)\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/core/downloader/middleware.py\", line 42, in process_request\r\n defer.returnValue((yield download_func(request=request, spider=spider)))\r\n File \"/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py\", line 1362, in returnValue\r\n raise _DefGen_Return(val)\r\ntwisted.internet.defer._DefGen_Return: <200 https://www.example.com>\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/utils/defer.py\", line 55, in mustbe_deferred\r\n result = f(*args, **kw)\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/core/spidermw.py\", line 60, in process_spider_input\r\n return scrape_func(response, request, spider)\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/core/scraper.py\", line 148, in call_spider\r\n warn_on_generator_with_return_value(spider, callback)\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py\", line 202, in warn_on_generator_with_return_value\r\n if is_generator_with_return_value(callable):\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py\", line 180, in is_generator_with_return_value\r\n return _generator_callbacks_cache[callable]\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/utils/datatypes.py\", line 281, in __getitem__\r\n return super(LocalWeakReferencedCache, self).__getitem__(key)\r\n File \"/usr/local/lib/python3.8/weakref.py\", line 383, in __getitem__\r\n return self.data[ref(key)]\r\nKeyError: <weakref at 0x7f06ff011720; to 'method' at 0x7f07042b5e00 (parse_foo)>\r\n```\r\n\r\nThis is Scrapy 2.0.1. The problem happens only sometimes, but in different spiders in the same project.\n", "before_files": [{"content": "\"\"\"\nThis module contains data types used by Scrapy which are not included in the\nPython Standard Library.\n\nThis module must not depend on any module outside the Standard Library.\n\"\"\"\n\nimport collections\nimport weakref\nfrom collections.abc import Mapping\n\n\nclass CaselessDict(dict):\n\n __slots__ = ()\n\n def __init__(self, seq=None):\n super(CaselessDict, self).__init__()\n if seq:\n self.update(seq)\n\n def __getitem__(self, key):\n return dict.__getitem__(self, self.normkey(key))\n\n def __setitem__(self, key, value):\n dict.__setitem__(self, self.normkey(key), self.normvalue(value))\n\n def __delitem__(self, key):\n dict.__delitem__(self, self.normkey(key))\n\n def __contains__(self, key):\n return dict.__contains__(self, self.normkey(key))\n has_key = __contains__\n\n def __copy__(self):\n return self.__class__(self)\n copy = __copy__\n\n def normkey(self, key):\n \"\"\"Method to normalize dictionary key access\"\"\"\n return key.lower()\n\n def normvalue(self, value):\n \"\"\"Method to normalize values prior to be setted\"\"\"\n return value\n\n def get(self, key, def_val=None):\n return dict.get(self, self.normkey(key), self.normvalue(def_val))\n\n def setdefault(self, key, def_val=None):\n return dict.setdefault(self, self.normkey(key), self.normvalue(def_val))\n\n def update(self, seq):\n seq = seq.items() if isinstance(seq, Mapping) else seq\n iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)\n super(CaselessDict, self).update(iseq)\n\n @classmethod\n def fromkeys(cls, keys, value=None):\n return cls((k, value) for k in keys)\n\n def pop(self, key, *args):\n return dict.pop(self, self.normkey(key), *args)\n\n\nclass LocalCache(collections.OrderedDict):\n \"\"\"Dictionary with a finite number of keys.\n\n Older items expires first.\n \"\"\"\n\n def __init__(self, limit=None):\n super(LocalCache, self).__init__()\n self.limit = limit\n\n def __setitem__(self, key, value):\n if self.limit:\n while len(self) >= self.limit:\n self.popitem(last=False)\n super(LocalCache, self).__setitem__(key, value)\n\n\nclass LocalWeakReferencedCache(weakref.WeakKeyDictionary):\n \"\"\"\n A weakref.WeakKeyDictionary implementation that uses LocalCache as its\n underlying data structure, making it ordered and capable of being size-limited.\n\n Useful for memoization, while avoiding keeping received\n arguments in memory only because of the cached references.\n\n Note: like LocalCache and unlike weakref.WeakKeyDictionary,\n it cannot be instantiated with an initial dictionary.\n \"\"\"\n\n def __init__(self, limit=None):\n super(LocalWeakReferencedCache, self).__init__()\n self.data = LocalCache(limit=limit)\n\n def __setitem__(self, key, value):\n try:\n super(LocalWeakReferencedCache, self).__setitem__(key, value)\n except TypeError:\n pass # key is not weak-referenceable, skip caching\n\n def __getitem__(self, key):\n try:\n return super(LocalWeakReferencedCache, self).__getitem__(key)\n except TypeError:\n return None # key is not weak-referenceable, it's not cached\n\n\nclass SequenceExclude:\n \"\"\"Object to test if an item is NOT within some sequence.\"\"\"\n\n def __init__(self, seq):\n self.seq = seq\n\n def __contains__(self, item):\n return item not in self.seq\n", "path": "scrapy/utils/datatypes.py"}], "after_files": [{"content": "\"\"\"\nThis module contains data types used by Scrapy which are not included in the\nPython Standard Library.\n\nThis module must not depend on any module outside the Standard Library.\n\"\"\"\n\nimport collections\nimport weakref\nfrom collections.abc import Mapping\n\n\nclass CaselessDict(dict):\n\n __slots__ = ()\n\n def __init__(self, seq=None):\n super(CaselessDict, self).__init__()\n if seq:\n self.update(seq)\n\n def __getitem__(self, key):\n return dict.__getitem__(self, self.normkey(key))\n\n def __setitem__(self, key, value):\n dict.__setitem__(self, self.normkey(key), self.normvalue(value))\n\n def __delitem__(self, key):\n dict.__delitem__(self, self.normkey(key))\n\n def __contains__(self, key):\n return dict.__contains__(self, self.normkey(key))\n has_key = __contains__\n\n def __copy__(self):\n return self.__class__(self)\n copy = __copy__\n\n def normkey(self, key):\n \"\"\"Method to normalize dictionary key access\"\"\"\n return key.lower()\n\n def normvalue(self, value):\n \"\"\"Method to normalize values prior to be setted\"\"\"\n return value\n\n def get(self, key, def_val=None):\n return dict.get(self, self.normkey(key), self.normvalue(def_val))\n\n def setdefault(self, key, def_val=None):\n return dict.setdefault(self, self.normkey(key), self.normvalue(def_val))\n\n def update(self, seq):\n seq = seq.items() if isinstance(seq, Mapping) else seq\n iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)\n super(CaselessDict, self).update(iseq)\n\n @classmethod\n def fromkeys(cls, keys, value=None):\n return cls((k, value) for k in keys)\n\n def pop(self, key, *args):\n return dict.pop(self, self.normkey(key), *args)\n\n\nclass LocalCache(collections.OrderedDict):\n \"\"\"Dictionary with a finite number of keys.\n\n Older items expires first.\n \"\"\"\n\n def __init__(self, limit=None):\n super(LocalCache, self).__init__()\n self.limit = limit\n\n def __setitem__(self, key, value):\n if self.limit:\n while len(self) >= self.limit:\n self.popitem(last=False)\n super(LocalCache, self).__setitem__(key, value)\n\n\nclass LocalWeakReferencedCache(weakref.WeakKeyDictionary):\n \"\"\"\n A weakref.WeakKeyDictionary implementation that uses LocalCache as its\n underlying data structure, making it ordered and capable of being size-limited.\n\n Useful for memoization, while avoiding keeping received\n arguments in memory only because of the cached references.\n\n Note: like LocalCache and unlike weakref.WeakKeyDictionary,\n it cannot be instantiated with an initial dictionary.\n \"\"\"\n\n def __init__(self, limit=None):\n super(LocalWeakReferencedCache, self).__init__()\n self.data = LocalCache(limit=limit)\n\n def __setitem__(self, key, value):\n try:\n super(LocalWeakReferencedCache, self).__setitem__(key, value)\n except TypeError:\n pass # key is not weak-referenceable, skip caching\n\n def __getitem__(self, key):\n try:\n return super(LocalWeakReferencedCache, self).__getitem__(key)\n except (TypeError, KeyError):\n return None # key is either not weak-referenceable or not cached\n\n\nclass SequenceExclude:\n \"\"\"Object to test if an item is NOT within some sequence.\"\"\"\n\n def __init__(self, seq):\n self.seq = seq\n\n def __contains__(self, item):\n return item not in self.seq\n", "path": "scrapy/utils/datatypes.py"}]} | 1,895 | 130 |
gh_patches_debug_11812 | rasdani/github-patches | git_diff | craiga__will-of-the-prophets-196 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Visiting /roll raises an error if no rolls exist
```
Environment:
Request Method: GET
Request URL: http://localhost:8000/roll/
Django Version: 2.2
Python Version: 3.7.3
Installed Applications:
['raven.contrib.django.raven_compat',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sass_processor',
'widget_tweaks',
'tz_detect',
'debug_toolbar',
's3direct',
'bootstrap',
'will_of_the_prophets']
Installed Middleware:
('raven.contrib.django.middleware.DjangoRestFrameworkCompatMiddleware',
'raven.contrib.django.middleware.SentryMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'tz_detect.middleware.TimezoneMiddleware')
Traceback:
File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/exception.py" in inner
34. response = get_response(request)
File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/base.py" in _get_response
115. response = self.process_exception_by_middleware(e, request)
File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/base.py" in _get_response
113. response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/base.py" in view
71. return self.dispatch(request, *args, **kwargs)
File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/contrib/auth/mixins.py" in dispatch
52. return super().dispatch(request, *args, **kwargs)
File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/base.py" in dispatch
97. return handler(request, *args, **kwargs)
File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/edit.py" in get
168. return super().get(request, *args, **kwargs)
File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/edit.py" in get
133. return self.render_to_response(self.get_context_data())
File "/Users/craiga/will_of_the_prophets/will_of_the_prophets/views.py" in get_context_data
82. board=board.Board(now=last_roll.embargo),
Exception Type: AttributeError at /roll/
Exception Value: 'NoneType' object has no attribute 'embargo'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `will_of_the_prophets/views.py`
Content:
```
1 """Views."""
2
3 from django.conf import settings
4 from django.contrib.auth.mixins import LoginRequiredMixin
5 from django.shortcuts import render
6 from django.urls import reverse
7 from django.utils import timezone
8 from django.views.decorators.cache import cache_control
9 from django.views.decorators.clickjacking import xframe_options_exempt
10 from django.views.decorators.http import condition
11 from django.views.generic.edit import CreateView
12
13 from will_of_the_prophets import board, forms, models
14
15
16 def get_last_modified(request):
17 """Get board's last modified datetime."""
18 try:
19 return (
20 models.Roll.objects.filter(embargo__lte=timezone.now())
21 .latest("embargo")
22 .embargo
23 )
24 except models.Roll.DoesNotExist:
25 return None
26
27
28 @xframe_options_exempt
29 @condition(last_modified_func=get_last_modified)
30 @cache_control(max_age=3600)
31 def public_board(request):
32 """
33 Board for the public.
34
35 Does not take embargoed rolls into account.
36 """
37 response = render(
38 request,
39 "will_of_the_prophets/public_board.html",
40 {
41 "board": board.Board(),
42 "special_square_types": models.SpecialSquareType.objects.all(),
43 },
44 )
45
46 canonical_url = settings.PUBLIC_BOARD_CANONICAL_URL
47 if canonical_url:
48 response["Link"] = f'<{canonical_url}>; rel="canonical"'
49
50 return response
51
52
53 @xframe_options_exempt
54 @condition(last_modified_func=get_last_modified)
55 @cache_control(max_age=3600)
56 def roll_frequency(request):
57 """
58 Show roll frequency.
59 """
60 roll_count = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
61 for roll in board.Board().rolls:
62 roll_count[roll] += 1
63
64 return render(
65 request,
66 "will_of_the_prophets/roll_frequency.html",
67 {"roll_frequency": roll_count},
68 )
69
70
71 class RollView(LoginRequiredMixin, CreateView):
72 """View for rolling the die."""
73
74 form_class = forms.RollForm
75 template_name = "will_of_the_prophets/roll.html"
76
77 def get_context_data(self, **kwargs):
78 last_roll = models.Roll.objects.order_by("-embargo").first()
79 return super().get_context_data(
80 **kwargs,
81 last_roll=last_roll,
82 board=board.Board(now=last_roll.embargo),
83 special_square_types=models.SpecialSquareType.objects.all(),
84 )
85
86 def get_success_url(self):
87 return reverse("roll") + "#chula"
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/will_of_the_prophets/views.py b/will_of_the_prophets/views.py
--- a/will_of_the_prophets/views.py
+++ b/will_of_the_prophets/views.py
@@ -76,10 +76,14 @@
def get_context_data(self, **kwargs):
last_roll = models.Roll.objects.order_by("-embargo").first()
+ last_roll_embargo = None
+ if last_roll:
+ last_roll_embargo = last_roll.embargo
+
return super().get_context_data(
**kwargs,
last_roll=last_roll,
- board=board.Board(now=last_roll.embargo),
+ board=board.Board(now=last_roll_embargo),
special_square_types=models.SpecialSquareType.objects.all(),
)
| {"golden_diff": "diff --git a/will_of_the_prophets/views.py b/will_of_the_prophets/views.py\n--- a/will_of_the_prophets/views.py\n+++ b/will_of_the_prophets/views.py\n@@ -76,10 +76,14 @@\n \n def get_context_data(self, **kwargs):\n last_roll = models.Roll.objects.order_by(\"-embargo\").first()\n+ last_roll_embargo = None\n+ if last_roll:\n+ last_roll_embargo = last_roll.embargo\n+\n return super().get_context_data(\n **kwargs,\n last_roll=last_roll,\n- board=board.Board(now=last_roll.embargo),\n+ board=board.Board(now=last_roll_embargo),\n special_square_types=models.SpecialSquareType.objects.all(),\n )\n", "issue": "Visiting /roll raises an error if no rolls exist\n```\r\nEnvironment:\r\n\r\n\r\nRequest Method: GET\r\nRequest URL: http://localhost:8000/roll/\r\n\r\nDjango Version: 2.2\r\nPython Version: 3.7.3\r\nInstalled Applications:\r\n['raven.contrib.django.raven_compat',\r\n 'django.contrib.admin',\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.messages',\r\n 'django.contrib.staticfiles',\r\n 'sass_processor',\r\n 'widget_tweaks',\r\n 'tz_detect',\r\n 'debug_toolbar',\r\n 's3direct',\r\n 'bootstrap',\r\n 'will_of_the_prophets']\r\nInstalled Middleware:\r\n('raven.contrib.django.middleware.DjangoRestFrameworkCompatMiddleware',\r\n 'raven.contrib.django.middleware.SentryMiddleware',\r\n 'whitenoise.middleware.WhiteNoiseMiddleware',\r\n 'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',\r\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\r\n 'django.middleware.security.SecurityMiddleware',\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\r\n 'tz_detect.middleware.TimezoneMiddleware')\r\n\r\n\r\n\r\nTraceback:\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/exception.py\" in inner\r\n 34. response = get_response(request)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/base.py\" in _get_response\r\n 115. response = self.process_exception_by_middleware(e, request)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/base.py\" in _get_response\r\n 113. response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/base.py\" in view\r\n 71. return self.dispatch(request, *args, **kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/contrib/auth/mixins.py\" in dispatch\r\n 52. return super().dispatch(request, *args, **kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/base.py\" in dispatch\r\n 97. return handler(request, *args, **kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/edit.py\" in get\r\n 168. return super().get(request, *args, **kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/edit.py\" in get\r\n 133. return self.render_to_response(self.get_context_data())\r\n\r\nFile \"/Users/craiga/will_of_the_prophets/will_of_the_prophets/views.py\" in get_context_data\r\n 82. board=board.Board(now=last_roll.embargo),\r\n\r\nException Type: AttributeError at /roll/\r\nException Value: 'NoneType' object has no attribute 'embargo'\r\n```\n", "before_files": [{"content": "\"\"\"Views.\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.views.decorators.cache import cache_control\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.decorators.http import condition\nfrom django.views.generic.edit import CreateView\n\nfrom will_of_the_prophets import board, forms, models\n\n\ndef get_last_modified(request):\n \"\"\"Get board's last modified datetime.\"\"\"\n try:\n return (\n models.Roll.objects.filter(embargo__lte=timezone.now())\n .latest(\"embargo\")\n .embargo\n )\n except models.Roll.DoesNotExist:\n return None\n\n\n@xframe_options_exempt\n@condition(last_modified_func=get_last_modified)\n@cache_control(max_age=3600)\ndef public_board(request):\n \"\"\"\n Board for the public.\n\n Does not take embargoed rolls into account.\n \"\"\"\n response = render(\n request,\n \"will_of_the_prophets/public_board.html\",\n {\n \"board\": board.Board(),\n \"special_square_types\": models.SpecialSquareType.objects.all(),\n },\n )\n\n canonical_url = settings.PUBLIC_BOARD_CANONICAL_URL\n if canonical_url:\n response[\"Link\"] = f'<{canonical_url}>; rel=\"canonical\"'\n\n return response\n\n\n@xframe_options_exempt\n@condition(last_modified_func=get_last_modified)\n@cache_control(max_age=3600)\ndef roll_frequency(request):\n \"\"\"\n Show roll frequency.\n \"\"\"\n roll_count = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n for roll in board.Board().rolls:\n roll_count[roll] += 1\n\n return render(\n request,\n \"will_of_the_prophets/roll_frequency.html\",\n {\"roll_frequency\": roll_count},\n )\n\n\nclass RollView(LoginRequiredMixin, CreateView):\n \"\"\"View for rolling the die.\"\"\"\n\n form_class = forms.RollForm\n template_name = \"will_of_the_prophets/roll.html\"\n\n def get_context_data(self, **kwargs):\n last_roll = models.Roll.objects.order_by(\"-embargo\").first()\n return super().get_context_data(\n **kwargs,\n last_roll=last_roll,\n board=board.Board(now=last_roll.embargo),\n special_square_types=models.SpecialSquareType.objects.all(),\n )\n\n def get_success_url(self):\n return reverse(\"roll\") + \"#chula\"\n", "path": "will_of_the_prophets/views.py"}], "after_files": [{"content": "\"\"\"Views.\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.views.decorators.cache import cache_control\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.decorators.http import condition\nfrom django.views.generic.edit import CreateView\n\nfrom will_of_the_prophets import board, forms, models\n\n\ndef get_last_modified(request):\n \"\"\"Get board's last modified datetime.\"\"\"\n try:\n return (\n models.Roll.objects.filter(embargo__lte=timezone.now())\n .latest(\"embargo\")\n .embargo\n )\n except models.Roll.DoesNotExist:\n return None\n\n\n@xframe_options_exempt\n@condition(last_modified_func=get_last_modified)\n@cache_control(max_age=3600)\ndef public_board(request):\n \"\"\"\n Board for the public.\n\n Does not take embargoed rolls into account.\n \"\"\"\n response = render(\n request,\n \"will_of_the_prophets/public_board.html\",\n {\n \"board\": board.Board(),\n \"special_square_types\": models.SpecialSquareType.objects.all(),\n },\n )\n\n canonical_url = settings.PUBLIC_BOARD_CANONICAL_URL\n if canonical_url:\n response[\"Link\"] = f'<{canonical_url}>; rel=\"canonical\"'\n\n return response\n\n\n@xframe_options_exempt\n@condition(last_modified_func=get_last_modified)\n@cache_control(max_age=3600)\ndef roll_frequency(request):\n \"\"\"\n Show roll frequency.\n \"\"\"\n roll_count = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n for roll in board.Board().rolls:\n roll_count[roll] += 1\n\n return render(\n request,\n \"will_of_the_prophets/roll_frequency.html\",\n {\"roll_frequency\": roll_count},\n )\n\n\nclass RollView(LoginRequiredMixin, CreateView):\n \"\"\"View for rolling the die.\"\"\"\n\n form_class = forms.RollForm\n template_name = \"will_of_the_prophets/roll.html\"\n\n def get_context_data(self, **kwargs):\n last_roll = models.Roll.objects.order_by(\"-embargo\").first()\n last_roll_embargo = None\n if last_roll:\n last_roll_embargo = last_roll.embargo\n\n return super().get_context_data(\n **kwargs,\n last_roll=last_roll,\n board=board.Board(now=last_roll_embargo),\n special_square_types=models.SpecialSquareType.objects.all(),\n )\n\n def get_success_url(self):\n return reverse(\"roll\") + \"#chula\"\n", "path": "will_of_the_prophets/views.py"}]} | 1,798 | 176 |
gh_patches_debug_3598 | rasdani/github-patches | git_diff | Zeroto521__my-data-toolkit-580 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MAINT: Simplify `register_method_factory`
<!--
Thanks for contributing a pull request!
Please follow these standard acronyms to start the commit message:
- ENH: enhancement
- BUG: bug fix
- DOC: documentation
- TYP: type annotations
- TST: addition or modification of tests
- MAINT: maintenance commit (refactoring, typos, etc.)
- BLD: change related to building
- REL: related to releasing
- API: an (incompatible) API change
- DEP: deprecate something, or remove a deprecated object
- DEV: development tool or utility
- REV: revert an earlier commit
- PERF: performance improvement
- BOT: always commit via a bot
- CI: related to CI or CD
- CLN: Code cleanup
-->
- [ ] closes #xxxx
- [x] whatsnew entry
Delete duplicate `wraps` lines
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dtoolkit/accessor/register.py`
Content:
```
1 from __future__ import annotations
2
3 from functools import wraps
4 from typing import Callable
5
6 from pandas.api.extensions import register_dataframe_accessor
7 from pandas.api.extensions import register_index_accessor
8 from pandas.api.extensions import register_series_accessor
9 from pandas.util._decorators import doc
10
11 from dtoolkit._typing import SeriesOrFrame
12
13
14 def register_method_factory(register_accessor):
15 """
16 Let pandas-object like accessor which only hooks class also hooks function easily.
17
18 Read more in the `User Guide`_.
19
20 .. _User Guide: ../../guide/tips_about_accessor.ipynb#Extend-to-Pandas-like-Object
21
22 Parameters
23 ----------
24 register_accessor : Pandas-object like accessor
25
26 See Also
27 --------
28 register_dataframe_method
29 register_series_method
30 register_index_method
31 dtoolkit.geoaccessor.register_geoseries_method
32 dtoolkit.geoaccessor.register_geodataframe_method
33 """
34
35 # based on pandas_flavor/register.py
36 def register_accessor_method(method: Callable, name: str):
37 def method_accessor(pd_obj: SeriesOrFrame):
38 @wraps(method)
39 def wrapper(*args, **kwargs):
40 return method(pd_obj, *args, **kwargs)
41
42 return wrapper
43
44 # Register method as pandas object inner method.
45 register_accessor(name)(method_accessor)
46
47 # Must return method itself, otherwise would get None.
48 return method
49
50 def register_accessor_alias(name: str = None):
51 def wrapper(method: Callable):
52 return register_accessor_method(method, name or method.__name__)
53
54 return wrapper
55
56 @wraps(register_accessor)
57 def decorator(name: Callable | str = None):
58 if callable(name): # Supports `@register_*_method` using.
59 method = name # This 'name' variable actually is a function.
60 return register_accessor_method(method, method.__name__)
61
62 # Supports `@register_*_method()` and `@register_*_method(name="")` using.
63 return register_accessor_alias(name)
64
65 return decorator
66
67
68 @register_method_factory
69 @doc(klass=":class:`~pandas.Series`")
70 def register_series_method(name: str = None):
71 """
72 {klass} register accessor for human.
73
74 Write method normally, use method naturally.
75
76 Read more in the `User Guide`_.
77
78 .. _User Guide: ../../guide/tips_about_accessor.ipynb
79
80 Parameters
81 ----------
82 name : str, optional
83 Use the ``method`` name as the default accessor entrance if ``name`` is None.
84
85 See Also
86 --------
87 register_dataframe_method
88 register_series_method
89 register_index_method
90 pandas.api.extensions.register_dataframe_accessor
91 pandas.api.extensions.register_series_accessor
92 pandas.api.extensions.register_index_accessor
93
94 Examples
95 --------
96 In your library code::
97
98 from __future__ import annotations
99
100 from dtoolkit.accessor import register_dataframe_method
101 from dtoolkit.accessor import register_series_method
102 from dtoolkit.accessor import register_index_method
103 import pandas as pd
104
105 @register_index_method("col") # Support alias name also.
106 @register_series_method("col")
107 @register_dataframe_method(name="col")
108 @register_index_method # Use accessor method's `__name__` as the entrance.
109 @register_series_method
110 @register_dataframe_method
111 def cols(pd_obj) -> int | str | list[int | str] | None:
112 '''
113 An API to gather :attr:`~pandas.Series.name` and
114 :attr:`~pandas.DataFrame.columns` to one.
115 '''
116
117 if isinstance(pd_obj, (pd.Series, pd.Index)):
118 return pd_obj.name
119
120 return pd_obj.columns.tolist()
121
122 Back in an interactive IPython session:
123
124 .. code-block:: ipython
125
126 In [1]: import pandas as pd
127
128 In [2]: df = pd.DataFrame(
129 ...: {{
130 ...: "a": [1, 2],
131 ...: "b": [3, 4],
132 ...: }},
133 ...: index=pd.Index(
134 ...: ["x", "y"],
135 ...: name="c",
136 ...: ),
137 ...: )
138
139 In [3]: df
140 Out[3]:
141 a b
142 c
143 x 1 3
144 y 2 4
145
146 Get the columns of DataFrame via `cols` or `col` method
147
148 In [4]: df.col()
149 Out[4]: ['a', 'b']
150
151 Get name of Series via `cols` or `col` method
152
153 In [5]: df.a.col()
154 Out[5]: 'a'
155
156 Get name of Index via `cols` or `col` method
157
158 In [6]: df.index.col()
159 Out[6]: 'c'
160 """
161
162 return register_series_accessor(name)
163
164
165 @register_method_factory
166 @doc(register_series_method, klass=":class:`~pandas.DataFrame`")
167 def register_dataframe_method(name: str = None):
168 return register_dataframe_accessor(name)
169
170
171 @register_method_factory
172 @doc(register_series_method, klass=":class:`~pandas.Index`")
173 def register_index_method(name: str = None):
174 return register_index_accessor(name)
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dtoolkit/accessor/register.py b/dtoolkit/accessor/register.py
--- a/dtoolkit/accessor/register.py
+++ b/dtoolkit/accessor/register.py
@@ -34,6 +34,7 @@
# based on pandas_flavor/register.py
def register_accessor_method(method: Callable, name: str):
+ @wraps(method)
def method_accessor(pd_obj: SeriesOrFrame):
@wraps(method)
def wrapper(*args, **kwargs):
| {"golden_diff": "diff --git a/dtoolkit/accessor/register.py b/dtoolkit/accessor/register.py\n--- a/dtoolkit/accessor/register.py\n+++ b/dtoolkit/accessor/register.py\n@@ -34,6 +34,7 @@\n \n # based on pandas_flavor/register.py\n def register_accessor_method(method: Callable, name: str):\n+ @wraps(method)\n def method_accessor(pd_obj: SeriesOrFrame):\n @wraps(method)\n def wrapper(*args, **kwargs):\n", "issue": "MAINT: Simplify `register_method_factory`\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [ ] closes #xxxx\r\n- [x] whatsnew entry\r\n\r\nDelete duplicate `wraps` lines\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom functools import wraps\nfrom typing import Callable\n\nfrom pandas.api.extensions import register_dataframe_accessor\nfrom pandas.api.extensions import register_index_accessor\nfrom pandas.api.extensions import register_series_accessor\nfrom pandas.util._decorators import doc\n\nfrom dtoolkit._typing import SeriesOrFrame\n\n\ndef register_method_factory(register_accessor):\n \"\"\"\n Let pandas-object like accessor which only hooks class also hooks function easily.\n\n Read more in the `User Guide`_.\n\n .. _User Guide: ../../guide/tips_about_accessor.ipynb#Extend-to-Pandas-like-Object\n\n Parameters\n ----------\n register_accessor : Pandas-object like accessor\n\n See Also\n --------\n register_dataframe_method\n register_series_method\n register_index_method\n dtoolkit.geoaccessor.register_geoseries_method\n dtoolkit.geoaccessor.register_geodataframe_method\n \"\"\"\n\n # based on pandas_flavor/register.py\n def register_accessor_method(method: Callable, name: str):\n def method_accessor(pd_obj: SeriesOrFrame):\n @wraps(method)\n def wrapper(*args, **kwargs):\n return method(pd_obj, *args, **kwargs)\n\n return wrapper\n\n # Register method as pandas object inner method.\n register_accessor(name)(method_accessor)\n\n # Must return method itself, otherwise would get None.\n return method\n\n def register_accessor_alias(name: str = None):\n def wrapper(method: Callable):\n return register_accessor_method(method, name or method.__name__)\n\n return wrapper\n\n @wraps(register_accessor)\n def decorator(name: Callable | str = None):\n if callable(name): # Supports `@register_*_method` using.\n method = name # This 'name' variable actually is a function.\n return register_accessor_method(method, method.__name__)\n\n # Supports `@register_*_method()` and `@register_*_method(name=\"\")` using.\n return register_accessor_alias(name)\n\n return decorator\n\n\n@register_method_factory\n@doc(klass=\":class:`~pandas.Series`\")\ndef register_series_method(name: str = None):\n \"\"\"\n {klass} register accessor for human.\n\n Write method normally, use method naturally.\n\n Read more in the `User Guide`_.\n\n .. _User Guide: ../../guide/tips_about_accessor.ipynb\n\n Parameters\n ----------\n name : str, optional\n Use the ``method`` name as the default accessor entrance if ``name`` is None.\n\n See Also\n --------\n register_dataframe_method\n register_series_method\n register_index_method\n pandas.api.extensions.register_dataframe_accessor\n pandas.api.extensions.register_series_accessor\n pandas.api.extensions.register_index_accessor\n\n Examples\n --------\n In your library code::\n\n from __future__ import annotations\n\n from dtoolkit.accessor import register_dataframe_method\n from dtoolkit.accessor import register_series_method\n from dtoolkit.accessor import register_index_method\n import pandas as pd\n\n @register_index_method(\"col\") # Support alias name also.\n @register_series_method(\"col\")\n @register_dataframe_method(name=\"col\")\n @register_index_method # Use accessor method's `__name__` as the entrance.\n @register_series_method\n @register_dataframe_method\n def cols(pd_obj) -> int | str | list[int | str] | None:\n '''\n An API to gather :attr:`~pandas.Series.name` and\n :attr:`~pandas.DataFrame.columns` to one.\n '''\n\n if isinstance(pd_obj, (pd.Series, pd.Index)):\n return pd_obj.name\n\n return pd_obj.columns.tolist()\n\n Back in an interactive IPython session:\n\n .. code-block:: ipython\n\n In [1]: import pandas as pd\n\n In [2]: df = pd.DataFrame(\n ...: {{\n ...: \"a\": [1, 2],\n ...: \"b\": [3, 4],\n ...: }},\n ...: index=pd.Index(\n ...: [\"x\", \"y\"],\n ...: name=\"c\",\n ...: ),\n ...: )\n\n In [3]: df\n Out[3]:\n a b\n c\n x 1 3\n y 2 4\n\n Get the columns of DataFrame via `cols` or `col` method\n\n In [4]: df.col()\n Out[4]: ['a', 'b']\n\n Get name of Series via `cols` or `col` method\n\n In [5]: df.a.col()\n Out[5]: 'a'\n\n Get name of Index via `cols` or `col` method\n\n In [6]: df.index.col()\n Out[6]: 'c'\n \"\"\"\n\n return register_series_accessor(name)\n\n\n@register_method_factory\n@doc(register_series_method, klass=\":class:`~pandas.DataFrame`\")\ndef register_dataframe_method(name: str = None):\n return register_dataframe_accessor(name)\n\n\n@register_method_factory\n@doc(register_series_method, klass=\":class:`~pandas.Index`\")\ndef register_index_method(name: str = None):\n return register_index_accessor(name)\n", "path": "dtoolkit/accessor/register.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom functools import wraps\nfrom typing import Callable\n\nfrom pandas.api.extensions import register_dataframe_accessor\nfrom pandas.api.extensions import register_index_accessor\nfrom pandas.api.extensions import register_series_accessor\nfrom pandas.util._decorators import doc\n\nfrom dtoolkit._typing import SeriesOrFrame\n\n\ndef register_method_factory(register_accessor):\n \"\"\"\n Let pandas-object like accessor which only hooks class also hooks function easily.\n\n Read more in the `User Guide`_.\n\n .. _User Guide: ../../guide/tips_about_accessor.ipynb#Extend-to-Pandas-like-Object\n\n Parameters\n ----------\n register_accessor : Pandas-object like accessor\n\n See Also\n --------\n register_dataframe_method\n register_series_method\n register_index_method\n dtoolkit.geoaccessor.register_geoseries_method\n dtoolkit.geoaccessor.register_geodataframe_method\n \"\"\"\n\n # based on pandas_flavor/register.py\n def register_accessor_method(method: Callable, name: str):\n @wraps(method)\n def method_accessor(pd_obj: SeriesOrFrame):\n @wraps(method)\n def wrapper(*args, **kwargs):\n return method(pd_obj, *args, **kwargs)\n\n return wrapper\n\n # Register method as pandas object inner method.\n register_accessor(name)(method_accessor)\n\n # Must return method itself, otherwise would get None.\n return method\n\n def register_accessor_alias(name: str = None):\n def wrapper(method: Callable):\n return register_accessor_method(method, name or method.__name__)\n\n return wrapper\n\n @wraps(register_accessor)\n def decorator(name: Callable | str = None):\n if callable(name): # Supports `@register_*_method` using.\n method = name # This 'name' variable actually is a function.\n return register_accessor_method(method, method.__name__)\n\n # Supports `@register_*_method()` and `@register_*_method(name=\"\")` using.\n return register_accessor_alias(name)\n\n return decorator\n\n\n@register_method_factory\n@doc(klass=\":class:`~pandas.Series`\")\ndef register_series_method(name: str = None):\n \"\"\"\n {klass} register accessor for human.\n\n Write method normally, use method naturally.\n\n Read more in the `User Guide`_.\n\n .. _User Guide: ../../guide/tips_about_accessor.ipynb\n\n Parameters\n ----------\n name : str, optional\n Use the ``method`` name as the default accessor entrance if ``name`` is None.\n\n See Also\n --------\n register_dataframe_method\n register_series_method\n register_index_method\n pandas.api.extensions.register_dataframe_accessor\n pandas.api.extensions.register_series_accessor\n pandas.api.extensions.register_index_accessor\n\n Examples\n --------\n In your library code::\n\n from __future__ import annotations\n\n from dtoolkit.accessor import register_dataframe_method\n from dtoolkit.accessor import register_series_method\n from dtoolkit.accessor import register_index_method\n import pandas as pd\n\n @register_index_method(\"col\") # Support alias name also.\n @register_series_method(\"col\")\n @register_dataframe_method(name=\"col\")\n @register_index_method # Use accessor method's `__name__` as the entrance.\n @register_series_method\n @register_dataframe_method\n def cols(pd_obj) -> int | str | list[int | str] | None:\n '''\n An API to gather :attr:`~pandas.Series.name` and\n :attr:`~pandas.DataFrame.columns` to one.\n '''\n\n if isinstance(pd_obj, (pd.Series, pd.Index)):\n return pd_obj.name\n\n return pd_obj.columns.tolist()\n\n Back in an interactive IPython session:\n\n .. code-block:: ipython\n\n In [1]: import pandas as pd\n\n In [2]: df = pd.DataFrame(\n ...: {{\n ...: \"a\": [1, 2],\n ...: \"b\": [3, 4],\n ...: }},\n ...: index=pd.Index(\n ...: [\"x\", \"y\"],\n ...: name=\"c\",\n ...: ),\n ...: )\n\n In [3]: df\n Out[3]:\n a b\n c\n x 1 3\n y 2 4\n\n Get the columns of DataFrame via `cols` or `col` method\n\n In [4]: df.col()\n Out[4]: ['a', 'b']\n\n Get name of Series via `cols` or `col` method\n\n In [5]: df.a.col()\n Out[5]: 'a'\n\n Get name of Index via `cols` or `col` method\n\n In [6]: df.index.col()\n Out[6]: 'c'\n \"\"\"\n\n return register_series_accessor(name)\n\n\n@register_method_factory\n@doc(register_series_method, klass=\":class:`~pandas.DataFrame`\")\ndef register_dataframe_method(name: str = None):\n return register_dataframe_accessor(name)\n\n\n@register_method_factory\n@doc(register_series_method, klass=\":class:`~pandas.Index`\")\ndef register_index_method(name: str = None):\n return register_index_accessor(name)\n", "path": "dtoolkit/accessor/register.py"}]} | 2,019 | 109 |
gh_patches_debug_5416 | rasdani/github-patches | git_diff | rasterio__rasterio-1192 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
query params in https url is not working
Hi
I wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b
First I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url,
It can be found here:
https://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32
I added in my env the following code:
```
if parts.query:
path += "?" + parts.query
```
and it solved the issue,
Is there a reason for cutting the querystring?
Thanks, Guy
query params in https url is not working
Hi
I wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b
First I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url,
It can be found here:
https://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32
I added in my env the following code:
```
if parts.query:
path += "?" + parts.query
```
and it solved the issue,
Is there a reason for cutting the querystring?
Thanks, Guy
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/vfs.py`
Content:
```
1 """Implementation of Apache VFS schemes and URLs."""
2
3 import os
4
5 from rasterio.compat import urlparse
6
7
8 # NB: As not to propagate fallacies of distributed computing, Rasterio
9 # does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only
10 # the following local filesystem schemes are supported.
11 SCHEMES = {
12 'gzip': 'gzip',
13 'gzip+file': 'gzip',
14 'zip': 'zip',
15 'zip+file': 'zip',
16 'tar': 'tar',
17 'tar+file': 'tar',
18 'https': 'curl',
19 'http': 'curl',
20 's3': 's3'}
21
22 FILE_SCHEMES = [
23 '', 'file', 'gzip', 'gzip+file', 'zip', 'zip+file', 'tar', 'tar+file']
24
25
26 def parse_path(uri, vfs=None):
27 """Parse a URI or Apache VFS URL into its parts
28
29 Returns: tuple
30 (path, archive, scheme)
31 """
32 archive = scheme = None
33 path = uri
34 if vfs:
35 parts = urlparse(vfs)
36 scheme = parts.scheme
37 archive = parts.path
38 if parts.netloc and parts.netloc != 'localhost': # pragma: no cover
39 archive = parts.netloc + archive
40 else:
41 parts = urlparse(path)
42 scheme = parts.scheme
43 path = parts.path
44 if parts.netloc and parts.netloc != 'localhost':
45 path = parts.netloc + path
46 # There are certain URI schemes we favor over GDAL's names.
47 if scheme in SCHEMES:
48 parts = path.split('!')
49 path = parts.pop() if parts else None
50 archive = parts.pop() if parts else None
51 # For filesystem paths.
52 elif scheme.lower() in FILE_SCHEMES:
53 pass
54 # We permit GDAL's idiosyncratic URI-like dataset paths such as
55 # 'netcdf':... to fall right through with no parsed archive
56 # or scheme.
57 else:
58 archive = scheme = None
59 path = uri
60
61 return path, archive, scheme
62
63
64 def vsi_path(path, archive=None, scheme=None):
65 """Convert a parsed path to a GDAL VSI path."""
66 # If a VSF and archive file are specified, we convert the path to
67 # a GDAL VSI path (see cpl_vsi.h).
68 if scheme and scheme.startswith('http'):
69 result = "/vsicurl/{0}://{1}".format(scheme, path)
70 elif scheme and scheme == 's3':
71 result = "/vsis3/{0}".format(path)
72 elif scheme and scheme != 'file':
73 if archive:
74 result = '/vsi{0}/{1}/{2}'.format(
75 scheme, archive, path.lstrip('/'))
76 else:
77 result = '/vsi{0}/{1}'.format(scheme, path.lstrip('/'))
78 else:
79 result = path
80 return result
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/vfs.py b/rasterio/vfs.py
--- a/rasterio/vfs.py
+++ b/rasterio/vfs.py
@@ -41,6 +41,8 @@
parts = urlparse(path)
scheme = parts.scheme
path = parts.path
+ if parts.query:
+ path += "?" + parts.query
if parts.netloc and parts.netloc != 'localhost':
path = parts.netloc + path
# There are certain URI schemes we favor over GDAL's names.
| {"golden_diff": "diff --git a/rasterio/vfs.py b/rasterio/vfs.py\n--- a/rasterio/vfs.py\n+++ b/rasterio/vfs.py\n@@ -41,6 +41,8 @@\n parts = urlparse(path)\n scheme = parts.scheme\n path = parts.path\n+ if parts.query:\n+ path += \"?\" + parts.query\n if parts.netloc and parts.netloc != 'localhost':\n path = parts.netloc + path\n # There are certain URI schemes we favor over GDAL's names.\n", "issue": "query params in https url is not working\nHi \r\n\r\nI wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b\r\nFirst I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, \r\n\r\nIt can be found here:\r\nhttps://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32\r\n\r\nI added in my env the following code:\r\n```\r\n if parts.query: \r\n path += \"?\" + parts.query \r\n```\r\n\r\nand it solved the issue, \r\n\r\nIs there a reason for cutting the querystring?\r\n\r\n\r\n\r\nThanks, Guy\r\n\nquery params in https url is not working\nHi \r\n\r\nI wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b\r\nFirst I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, \r\n\r\nIt can be found here:\r\nhttps://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32\r\n\r\nI added in my env the following code:\r\n```\r\n if parts.query: \r\n path += \"?\" + parts.query \r\n```\r\n\r\nand it solved the issue, \r\n\r\nIs there a reason for cutting the querystring?\r\n\r\n\r\n\r\nThanks, Guy\r\n\n", "before_files": [{"content": "\"\"\"Implementation of Apache VFS schemes and URLs.\"\"\"\n\nimport os\n\nfrom rasterio.compat import urlparse\n\n\n# NB: As not to propagate fallacies of distributed computing, Rasterio\n# does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only\n# the following local filesystem schemes are supported.\nSCHEMES = {\n 'gzip': 'gzip',\n 'gzip+file': 'gzip',\n 'zip': 'zip',\n 'zip+file': 'zip',\n 'tar': 'tar',\n 'tar+file': 'tar',\n 'https': 'curl',\n 'http': 'curl',\n 's3': 's3'}\n\nFILE_SCHEMES = [\n '', 'file', 'gzip', 'gzip+file', 'zip', 'zip+file', 'tar', 'tar+file']\n\n\ndef parse_path(uri, vfs=None):\n \"\"\"Parse a URI or Apache VFS URL into its parts\n\n Returns: tuple\n (path, archive, scheme)\n \"\"\"\n archive = scheme = None\n path = uri\n if vfs:\n parts = urlparse(vfs)\n scheme = parts.scheme\n archive = parts.path\n if parts.netloc and parts.netloc != 'localhost': # pragma: no cover\n archive = parts.netloc + archive\n else:\n parts = urlparse(path)\n scheme = parts.scheme\n path = parts.path\n if parts.netloc and parts.netloc != 'localhost':\n path = parts.netloc + path\n # There are certain URI schemes we favor over GDAL's names.\n if scheme in SCHEMES:\n parts = path.split('!')\n path = parts.pop() if parts else None\n archive = parts.pop() if parts else None\n # For filesystem paths.\n elif scheme.lower() in FILE_SCHEMES:\n pass\n # We permit GDAL's idiosyncratic URI-like dataset paths such as\n # 'netcdf':... to fall right through with no parsed archive\n # or scheme.\n else:\n archive = scheme = None\n path = uri\n\n return path, archive, scheme\n\n\ndef vsi_path(path, archive=None, scheme=None):\n \"\"\"Convert a parsed path to a GDAL VSI path.\"\"\"\n # If a VSF and archive file are specified, we convert the path to\n # a GDAL VSI path (see cpl_vsi.h).\n if scheme and scheme.startswith('http'):\n result = \"/vsicurl/{0}://{1}\".format(scheme, path)\n elif scheme and scheme == 's3':\n result = \"/vsis3/{0}\".format(path)\n elif scheme and scheme != 'file':\n if archive:\n result = '/vsi{0}/{1}/{2}'.format(\n scheme, archive, path.lstrip('/'))\n else:\n result = '/vsi{0}/{1}'.format(scheme, path.lstrip('/'))\n else:\n result = path\n return result\n", "path": "rasterio/vfs.py"}], "after_files": [{"content": "\"\"\"Implementation of Apache VFS schemes and URLs.\"\"\"\n\nimport os\n\nfrom rasterio.compat import urlparse\n\n\n# NB: As not to propagate fallacies of distributed computing, Rasterio\n# does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only\n# the following local filesystem schemes are supported.\nSCHEMES = {\n 'gzip': 'gzip',\n 'gzip+file': 'gzip',\n 'zip': 'zip',\n 'zip+file': 'zip',\n 'tar': 'tar',\n 'tar+file': 'tar',\n 'https': 'curl',\n 'http': 'curl',\n 's3': 's3'}\n\nFILE_SCHEMES = [\n '', 'file', 'gzip', 'gzip+file', 'zip', 'zip+file', 'tar', 'tar+file']\n\n\ndef parse_path(uri, vfs=None):\n \"\"\"Parse a URI or Apache VFS URL into its parts\n\n Returns: tuple\n (path, archive, scheme)\n \"\"\"\n archive = scheme = None\n path = uri\n if vfs:\n parts = urlparse(vfs)\n scheme = parts.scheme\n archive = parts.path\n if parts.netloc and parts.netloc != 'localhost': # pragma: no cover\n archive = parts.netloc + archive\n else:\n parts = urlparse(path)\n scheme = parts.scheme\n path = parts.path\n if parts.query:\n path += \"?\" + parts.query\n if parts.netloc and parts.netloc != 'localhost':\n path = parts.netloc + path\n # There are certain URI schemes we favor over GDAL's names.\n if scheme in SCHEMES:\n parts = path.split('!')\n path = parts.pop() if parts else None\n archive = parts.pop() if parts else None\n # For filesystem paths.\n elif scheme.lower() in FILE_SCHEMES:\n pass\n # We permit GDAL's idiosyncratic URI-like dataset paths such as\n # 'netcdf':... to fall right through with no parsed archive\n # or scheme.\n else:\n archive = scheme = None\n path = uri\n\n return path, archive, scheme\n\n\ndef vsi_path(path, archive=None, scheme=None):\n \"\"\"Convert a parsed path to a GDAL VSI path.\"\"\"\n # If a VSF and archive file are specified, we convert the path to\n # a GDAL VSI path (see cpl_vsi.h).\n if scheme and scheme.startswith('http'):\n result = \"/vsicurl/{0}://{1}\".format(scheme, path)\n elif scheme and scheme == 's3':\n result = \"/vsis3/{0}\".format(path)\n elif scheme and scheme != 'file':\n if archive:\n result = '/vsi{0}/{1}/{2}'.format(\n scheme, archive, path.lstrip('/'))\n else:\n result = '/vsi{0}/{1}'.format(scheme, path.lstrip('/'))\n else:\n result = path\n return result\n", "path": "rasterio/vfs.py"}]} | 1,370 | 118 |
gh_patches_debug_2624 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-6488 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Possible erreur 500 à la résolution d'une alerte sur un contenu qui n'est plus public
Rapporté par Sentry. J'ai eu du mal à comprendre comment le bug a pu se produire, mais j'ai réussi à le reproduire (d'une façon peut-être un peu tirée par les cheveux...).
**Comment reproduire ?**
1. Se connecter en tant que `user1`
2. Signaler un billet
3. Se connecter en tant que `staff`
4. Ouvrir la page du billet signalé dans deux onglets différents
5. Sur un des onglets, dépublier le billet
6. Sur l'autre onglet, résoudre l'alerte (ne pas recharger la page juste avant, le billet n'est en fait plus publié, c'est là qu'est l'astuce)
Une erreur 500 va alors apparaître. Elle provient d'ici : https://github.com/zestedesavoir/zds-site/blob/c06671c4901a95c30f31067c09d5e4526fd86575/zds/tutorialv2/views/alerts.py#L88
Le contenu n'a plus de version publique, donc plus d'URL publique, et `content.get_absolute_url_online()` renvoie alors `''`.
La correction de ce bug passe sans doute par la vérification si l'alerte est déjà résolue ou si le contenu signalé a bien une version publique : si l'une de ces conditions n'est pas remplie, une erreur 404 devrait être levée.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/tutorialv2/views/alerts.py`
Content:
```
1 from datetime import datetime
2
3 from django.contrib import messages
4 from django.core.exceptions import PermissionDenied
5 from django.contrib.auth.mixins import LoginRequiredMixin
6 from django.db import transaction
7 from django.http import Http404
8 from django.shortcuts import get_object_or_404, redirect
9 from django.template.loader import render_to_string
10 from django.utils.decorators import method_decorator
11 from django.utils.translation import gettext_lazy as _
12 from django.views.generic import FormView
13
14 from zds.tutorialv2.models import TYPE_CHOICES_DICT
15 from zds.tutorialv2.models.database import PublishableContent
16 from zds.utils.models import Alert
17
18
19 class SendContentAlert(LoginRequiredMixin, FormView):
20 http_method_names = ["post"]
21
22 @method_decorator(transaction.atomic)
23 def dispatch(self, *args, **kwargs):
24 return super().dispatch(*args, **kwargs)
25
26 def post(self, request, *args, **kwargs):
27 try:
28 content_pk = int(self.kwargs["pk"])
29 except (KeyError, ValueError):
30 raise Http404("Identifiant manquant ou conversion en entier impossible.")
31 content = get_object_or_404(PublishableContent, pk=content_pk)
32
33 if len(request.POST["signal_text"].strip()) == 0:
34 messages.error(request, _("La raison du signalement ne peut pas être vide."))
35 else:
36 alert = Alert(
37 author=request.user,
38 content=content,
39 scope="CONTENT",
40 text=request.POST["signal_text"],
41 pubdate=datetime.now(),
42 )
43 alert.save()
44
45 human_content_type = TYPE_CHOICES_DICT[content.type].lower()
46 messages.success(self.request, _("Ce {} a bien été signalé aux modérateurs.").format(human_content_type))
47
48 return redirect(content.get_absolute_url_online())
49
50
51 class SolveContentAlert(LoginRequiredMixin, FormView):
52 @method_decorator(transaction.atomic)
53 def dispatch(self, *args, **kwargs):
54 return super().dispatch(*args, **kwargs)
55
56 def post(self, request, *args, **kwargs):
57 if not request.user.has_perm("tutorialv2.change_contentreaction"):
58 raise PermissionDenied
59 try:
60 alert = get_object_or_404(Alert, pk=int(request.POST["alert_pk"]))
61 content = PublishableContent.objects.get(pk=alert.content.id)
62 except (KeyError, ValueError):
63 raise Http404("L'alerte n'existe pas.")
64
65 resolve_reason = ""
66 msg_title = ""
67 msg_content = ""
68 if "text" in request.POST and request.POST["text"]:
69 resolve_reason = request.POST["text"]
70 authors = alert.content.authors.values_list("username", flat=True)
71 authors = ", ".join(authors)
72 msg_title = _("Résolution d'alerte : {0}").format(content.title)
73 msg_content = render_to_string(
74 "tutorialv2/messages/resolve_alert.md",
75 {
76 "content": content,
77 "url": content.get_absolute_url_online(),
78 "name": alert.author.username,
79 "target_name": authors,
80 "modo_name": request.user.username,
81 "message": "\n".join(["> " + line for line in resolve_reason.split("\n")]),
82 "alert_text": "\n".join(["> " + line for line in alert.text.split("\n")]),
83 },
84 )
85 alert.solve(request.user, resolve_reason, msg_title, msg_content)
86
87 messages.success(self.request, _("L'alerte a bien été résolue."))
88 return redirect(content.get_absolute_url_online())
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/tutorialv2/views/alerts.py b/zds/tutorialv2/views/alerts.py
--- a/zds/tutorialv2/views/alerts.py
+++ b/zds/tutorialv2/views/alerts.py
@@ -62,6 +62,9 @@
except (KeyError, ValueError):
raise Http404("L'alerte n'existe pas.")
+ if alert.solved:
+ raise Http404("L'alerte a déjà été résolue.")
+
resolve_reason = ""
msg_title = ""
msg_content = ""
| {"golden_diff": "diff --git a/zds/tutorialv2/views/alerts.py b/zds/tutorialv2/views/alerts.py\n--- a/zds/tutorialv2/views/alerts.py\n+++ b/zds/tutorialv2/views/alerts.py\n@@ -62,6 +62,9 @@\n except (KeyError, ValueError):\n raise Http404(\"L'alerte n'existe pas.\")\n \n+ if alert.solved:\n+ raise Http404(\"L'alerte a d\u00e9j\u00e0 \u00e9t\u00e9 r\u00e9solue.\")\n+\n resolve_reason = \"\"\n msg_title = \"\"\n msg_content = \"\"\n", "issue": "Possible erreur 500 \u00e0 la r\u00e9solution d'une alerte sur un contenu qui n'est plus public\nRapport\u00e9 par Sentry. J'ai eu du mal \u00e0 comprendre comment le bug a pu se produire, mais j'ai r\u00e9ussi \u00e0 le reproduire (d'une fa\u00e7on peut-\u00eatre un peu tir\u00e9e par les cheveux...).\r\n\r\n**Comment reproduire ?**\r\n\r\n1. Se connecter en tant que `user1`\r\n2. Signaler un billet\r\n3. Se connecter en tant que `staff`\r\n4. Ouvrir la page du billet signal\u00e9 dans deux onglets diff\u00e9rents\r\n5. Sur un des onglets, d\u00e9publier le billet\r\n6. Sur l'autre onglet, r\u00e9soudre l'alerte (ne pas recharger la page juste avant, le billet n'est en fait plus publi\u00e9, c'est l\u00e0 qu'est l'astuce)\r\n\r\nUne erreur 500 va alors appara\u00eetre. Elle provient d'ici : https://github.com/zestedesavoir/zds-site/blob/c06671c4901a95c30f31067c09d5e4526fd86575/zds/tutorialv2/views/alerts.py#L88\r\nLe contenu n'a plus de version publique, donc plus d'URL publique, et `content.get_absolute_url_online()` renvoie alors `''`.\r\n\r\nLa correction de ce bug passe sans doute par la v\u00e9rification si l'alerte est d\u00e9j\u00e0 r\u00e9solue ou si le contenu signal\u00e9 a bien une version publique : si l'une de ces conditions n'est pas remplie, une erreur 404 devrait \u00eatre lev\u00e9e.\r\n\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db import transaction\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.loader import render_to_string\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import FormView\n\nfrom zds.tutorialv2.models import TYPE_CHOICES_DICT\nfrom zds.tutorialv2.models.database import PublishableContent\nfrom zds.utils.models import Alert\n\n\nclass SendContentAlert(LoginRequiredMixin, FormView):\n http_method_names = [\"post\"]\n\n @method_decorator(transaction.atomic)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n try:\n content_pk = int(self.kwargs[\"pk\"])\n except (KeyError, ValueError):\n raise Http404(\"Identifiant manquant ou conversion en entier impossible.\")\n content = get_object_or_404(PublishableContent, pk=content_pk)\n\n if len(request.POST[\"signal_text\"].strip()) == 0:\n messages.error(request, _(\"La raison du signalement ne peut pas \u00eatre vide.\"))\n else:\n alert = Alert(\n author=request.user,\n content=content,\n scope=\"CONTENT\",\n text=request.POST[\"signal_text\"],\n pubdate=datetime.now(),\n )\n alert.save()\n\n human_content_type = TYPE_CHOICES_DICT[content.type].lower()\n messages.success(self.request, _(\"Ce {} a bien \u00e9t\u00e9 signal\u00e9 aux mod\u00e9rateurs.\").format(human_content_type))\n\n return redirect(content.get_absolute_url_online())\n\n\nclass SolveContentAlert(LoginRequiredMixin, FormView):\n @method_decorator(transaction.atomic)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n if not request.user.has_perm(\"tutorialv2.change_contentreaction\"):\n raise PermissionDenied\n try:\n alert = get_object_or_404(Alert, pk=int(request.POST[\"alert_pk\"]))\n content = PublishableContent.objects.get(pk=alert.content.id)\n except (KeyError, ValueError):\n raise Http404(\"L'alerte n'existe pas.\")\n\n resolve_reason = \"\"\n msg_title = \"\"\n msg_content = \"\"\n if \"text\" in request.POST and request.POST[\"text\"]:\n resolve_reason = request.POST[\"text\"]\n authors = alert.content.authors.values_list(\"username\", flat=True)\n authors = \", \".join(authors)\n msg_title = _(\"R\u00e9solution d'alerte : {0}\").format(content.title)\n msg_content = render_to_string(\n \"tutorialv2/messages/resolve_alert.md\",\n {\n \"content\": content,\n \"url\": content.get_absolute_url_online(),\n \"name\": alert.author.username,\n \"target_name\": authors,\n \"modo_name\": request.user.username,\n \"message\": \"\\n\".join([\"> \" + line for line in resolve_reason.split(\"\\n\")]),\n \"alert_text\": \"\\n\".join([\"> \" + line for line in alert.text.split(\"\\n\")]),\n },\n )\n alert.solve(request.user, resolve_reason, msg_title, msg_content)\n\n messages.success(self.request, _(\"L'alerte a bien \u00e9t\u00e9 r\u00e9solue.\"))\n return redirect(content.get_absolute_url_online())\n", "path": "zds/tutorialv2/views/alerts.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db import transaction\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.loader import render_to_string\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import FormView\n\nfrom zds.tutorialv2.models import TYPE_CHOICES_DICT\nfrom zds.tutorialv2.models.database import PublishableContent\nfrom zds.utils.models import Alert\n\n\nclass SendContentAlert(LoginRequiredMixin, FormView):\n http_method_names = [\"post\"]\n\n @method_decorator(transaction.atomic)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n try:\n content_pk = int(self.kwargs[\"pk\"])\n except (KeyError, ValueError):\n raise Http404(\"Identifiant manquant ou conversion en entier impossible.\")\n content = get_object_or_404(PublishableContent, pk=content_pk)\n\n if len(request.POST[\"signal_text\"].strip()) == 0:\n messages.error(request, _(\"La raison du signalement ne peut pas \u00eatre vide.\"))\n else:\n alert = Alert(\n author=request.user,\n content=content,\n scope=\"CONTENT\",\n text=request.POST[\"signal_text\"],\n pubdate=datetime.now(),\n )\n alert.save()\n\n human_content_type = TYPE_CHOICES_DICT[content.type].lower()\n messages.success(self.request, _(\"Ce {} a bien \u00e9t\u00e9 signal\u00e9 aux mod\u00e9rateurs.\").format(human_content_type))\n\n return redirect(content.get_absolute_url_online())\n\n\nclass SolveContentAlert(LoginRequiredMixin, FormView):\n @method_decorator(transaction.atomic)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n if not request.user.has_perm(\"tutorialv2.change_contentreaction\"):\n raise PermissionDenied\n try:\n alert = get_object_or_404(Alert, pk=int(request.POST[\"alert_pk\"]))\n content = PublishableContent.objects.get(pk=alert.content.id)\n except (KeyError, ValueError):\n raise Http404(\"L'alerte n'existe pas.\")\n\n if alert.solved:\n raise Http404(\"L'alerte a d\u00e9j\u00e0 \u00e9t\u00e9 r\u00e9solue.\")\n\n resolve_reason = \"\"\n msg_title = \"\"\n msg_content = \"\"\n if \"text\" in request.POST and request.POST[\"text\"]:\n resolve_reason = request.POST[\"text\"]\n authors = alert.content.authors.values_list(\"username\", flat=True)\n authors = \", \".join(authors)\n msg_title = _(\"R\u00e9solution d'alerte : {0}\").format(content.title)\n msg_content = render_to_string(\n \"tutorialv2/messages/resolve_alert.md\",\n {\n \"content\": content,\n \"url\": content.get_absolute_url_online(),\n \"name\": alert.author.username,\n \"target_name\": authors,\n \"modo_name\": request.user.username,\n \"message\": \"\\n\".join([\"> \" + line for line in resolve_reason.split(\"\\n\")]),\n \"alert_text\": \"\\n\".join([\"> \" + line for line in alert.text.split(\"\\n\")]),\n },\n )\n alert.solve(request.user, resolve_reason, msg_title, msg_content)\n\n messages.success(self.request, _(\"L'alerte a bien \u00e9t\u00e9 r\u00e9solue.\"))\n return redirect(content.get_absolute_url_online())\n", "path": "zds/tutorialv2/views/alerts.py"}]} | 1,562 | 127 |
gh_patches_debug_1631 | rasdani/github-patches | git_diff | vyperlang__vyper-3340 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: compiler dislikes `x not in [a, b]` in 0.3.8, whereas it was fine in 0.3.7
### Version Information
* vyper Version (output of `vyper --version`): 0.3.8
* OS: osx
* Python Version (output of `python --version`): 3.10.4
### What's your issue about?
<img width="705" alt="image" src="https://user-images.githubusercontent.com/11488427/230437774-c3b68030-9319-4169-b344-dbb470002102.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vyper/semantics/environment.py`
Content:
```
1 from typing import Dict
2
3 from vyper.semantics.analysis.base import VarInfo
4 from vyper.semantics.types import AddressT, BytesT, VyperType
5 from vyper.semantics.types.shortcuts import BYTES32_T, UINT256_T
6
7
8 # common properties for environment variables
9 class _EnvType(VyperType):
10 def __eq__(self, other):
11 return self is other
12
13 def __hash__(self):
14 return hash(id(self))
15
16
17 class _Block(_EnvType):
18 _id = "block"
19 _type_members = {
20 "coinbase": AddressT(),
21 "difficulty": UINT256_T,
22 "prevrandao": UINT256_T,
23 "number": UINT256_T,
24 "gaslimit": UINT256_T,
25 "basefee": UINT256_T,
26 "prevhash": BYTES32_T,
27 "timestamp": UINT256_T,
28 }
29
30
31 class _Chain(_EnvType):
32 _id = "chain"
33 _type_members = {"id": UINT256_T}
34
35
36 class _Msg(_EnvType):
37 _id = "msg"
38 _type_members = {"data": BytesT(), "gas": UINT256_T, "sender": AddressT(), "value": UINT256_T}
39
40
41 class _Tx(_EnvType):
42 _id = "tx"
43 _type_members = {"origin": AddressT(), "gasprice": UINT256_T}
44
45
46 CONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())}
47
48
49 def get_constant_vars() -> Dict:
50 """
51 Get a dictionary of constant environment variables.
52 """
53 result = {}
54 for k, v in CONSTANT_ENVIRONMENT_VARS.items():
55 result[k] = VarInfo(v, is_constant=True)
56
57 return result
58
59
60 # Not sure this is necessary, but add an ad-hoc type for `self` for clarity
61 class _SelfT(AddressT):
62 pass
63
64
65 MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": _SelfT}
66
67
68 def get_mutable_vars() -> Dict:
69 """
70 Get a dictionary of mutable environment variables (those that are
71 modified during the course of contract execution, such as `self`).
72 """
73 return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()}
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vyper/semantics/environment.py b/vyper/semantics/environment.py
--- a/vyper/semantics/environment.py
+++ b/vyper/semantics/environment.py
@@ -57,12 +57,7 @@
return result
-# Not sure this is necessary, but add an ad-hoc type for `self` for clarity
-class _SelfT(AddressT):
- pass
-
-
-MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": _SelfT}
+MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": AddressT}
def get_mutable_vars() -> Dict:
| {"golden_diff": "diff --git a/vyper/semantics/environment.py b/vyper/semantics/environment.py\n--- a/vyper/semantics/environment.py\n+++ b/vyper/semantics/environment.py\n@@ -57,12 +57,7 @@\n return result\n \n \n-# Not sure this is necessary, but add an ad-hoc type for `self` for clarity\n-class _SelfT(AddressT):\n- pass\n-\n-\n-MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": _SelfT}\n+MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": AddressT}\n \n \n def get_mutable_vars() -> Dict:\n", "issue": "Bug: compiler dislikes `x not in [a, b]` in 0.3.8, whereas it was fine in 0.3.7\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): 0.3.8\r\n* OS: osx\r\n* Python Version (output of `python --version`): 3.10.4\r\n\r\n### What's your issue about?\r\n\r\n<img width=\"705\" alt=\"image\" src=\"https://user-images.githubusercontent.com/11488427/230437774-c3b68030-9319-4169-b344-dbb470002102.png\">\r\n\r\n\n", "before_files": [{"content": "from typing import Dict\n\nfrom vyper.semantics.analysis.base import VarInfo\nfrom vyper.semantics.types import AddressT, BytesT, VyperType\nfrom vyper.semantics.types.shortcuts import BYTES32_T, UINT256_T\n\n\n# common properties for environment variables\nclass _EnvType(VyperType):\n def __eq__(self, other):\n return self is other\n\n def __hash__(self):\n return hash(id(self))\n\n\nclass _Block(_EnvType):\n _id = \"block\"\n _type_members = {\n \"coinbase\": AddressT(),\n \"difficulty\": UINT256_T,\n \"prevrandao\": UINT256_T,\n \"number\": UINT256_T,\n \"gaslimit\": UINT256_T,\n \"basefee\": UINT256_T,\n \"prevhash\": BYTES32_T,\n \"timestamp\": UINT256_T,\n }\n\n\nclass _Chain(_EnvType):\n _id = \"chain\"\n _type_members = {\"id\": UINT256_T}\n\n\nclass _Msg(_EnvType):\n _id = \"msg\"\n _type_members = {\"data\": BytesT(), \"gas\": UINT256_T, \"sender\": AddressT(), \"value\": UINT256_T}\n\n\nclass _Tx(_EnvType):\n _id = \"tx\"\n _type_members = {\"origin\": AddressT(), \"gasprice\": UINT256_T}\n\n\nCONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())}\n\n\ndef get_constant_vars() -> Dict:\n \"\"\"\n Get a dictionary of constant environment variables.\n \"\"\"\n result = {}\n for k, v in CONSTANT_ENVIRONMENT_VARS.items():\n result[k] = VarInfo(v, is_constant=True)\n\n return result\n\n\n# Not sure this is necessary, but add an ad-hoc type for `self` for clarity\nclass _SelfT(AddressT):\n pass\n\n\nMUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": _SelfT}\n\n\ndef get_mutable_vars() -> Dict:\n \"\"\"\n Get a dictionary of mutable environment variables (those that are\n modified during the course of contract execution, such as `self`).\n \"\"\"\n return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()}\n", "path": "vyper/semantics/environment.py"}], "after_files": [{"content": "from typing import Dict\n\nfrom vyper.semantics.analysis.base import VarInfo\nfrom vyper.semantics.types import AddressT, BytesT, VyperType\nfrom vyper.semantics.types.shortcuts import BYTES32_T, UINT256_T\n\n\n# common properties for environment variables\nclass _EnvType(VyperType):\n def __eq__(self, other):\n return self is other\n\n def __hash__(self):\n return hash(id(self))\n\n\nclass _Block(_EnvType):\n _id = \"block\"\n _type_members = {\n \"coinbase\": AddressT(),\n \"difficulty\": UINT256_T,\n \"prevrandao\": UINT256_T,\n \"number\": UINT256_T,\n \"gaslimit\": UINT256_T,\n \"basefee\": UINT256_T,\n \"prevhash\": BYTES32_T,\n \"timestamp\": UINT256_T,\n }\n\n\nclass _Chain(_EnvType):\n _id = \"chain\"\n _type_members = {\"id\": UINT256_T}\n\n\nclass _Msg(_EnvType):\n _id = \"msg\"\n _type_members = {\"data\": BytesT(), \"gas\": UINT256_T, \"sender\": AddressT(), \"value\": UINT256_T}\n\n\nclass _Tx(_EnvType):\n _id = \"tx\"\n _type_members = {\"origin\": AddressT(), \"gasprice\": UINT256_T}\n\n\nCONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())}\n\n\ndef get_constant_vars() -> Dict:\n \"\"\"\n Get a dictionary of constant environment variables.\n \"\"\"\n result = {}\n for k, v in CONSTANT_ENVIRONMENT_VARS.items():\n result[k] = VarInfo(v, is_constant=True)\n\n return result\n\n\nMUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": AddressT}\n\n\ndef get_mutable_vars() -> Dict:\n \"\"\"\n Get a dictionary of mutable environment variables (those that are\n modified during the course of contract execution, such as `self`).\n \"\"\"\n return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()}\n", "path": "vyper/semantics/environment.py"}]} | 1,099 | 143 |
gh_patches_debug_12382 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-1748 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Checkpoint adding "version_" at the start of the logger name
**To reproduce :**
```python
logger = pl.loggers.TensorBoardLogger(
save_dir='.',
version='my_name'
name='lightning_logs'
)
trainer = pl.Trainer(logger=logger, log_gpu_memory='all', max_epochs=10)
```
**Giving as a result:**
- /lightning_logs/my_name: Where is saved the logs
- /lightning_logs/version_my_name : Where is saved the checkpoints
**Possible Explanation:**
It seems like the checkpoint saving add "version_" to the start of the name even if the name have been given as a parameter :
https://github.com/PyTorchLightning/pytorch-lightning/blob/3e8f2d99a9951bfb5fc67a98614128317913be1d/pytorch_lightning/trainer/callback_config.py#L52-L57
Even if in the Tensorboard Logger if the name is provided there is no "version_" prefix :
https://github.com/PyTorchLightning/pytorch-lightning/blob/8b82ce09039e75f3fcb77a987c964249e38def3b/pytorch_lightning/loggers/tensorboard.py#L81
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/trainer/callback_config.py`
Content:
```
1 import os
2 from abc import ABC, abstractmethod
3 from typing import Union, List
4
5
6 from pytorch_lightning.callbacks import Callback, ModelCheckpoint, EarlyStopping, ProgressBarBase, ProgressBar
7 from pytorch_lightning.loggers import LightningLoggerBase
8 from pytorch_lightning.utilities.exceptions import MisconfigurationException
9
10
11 class TrainerCallbackConfigMixin(ABC):
12
13 # this is just a summary on variables used in this abstract class,
14 # the proper values/initialisation should be done in child class
15 callbacks: List[Callback]
16 default_root_dir: str
17 logger: Union[LightningLoggerBase, bool]
18 weights_save_path: str
19 ckpt_path: str
20 checkpoint_callback: ModelCheckpoint
21 progress_bar_refresh_rate: int
22 process_position: int
23
24 @property
25 @abstractmethod
26 def slurm_job_id(self) -> int:
27 """Warning: this is just empty shell for code implemented in other class."""
28
29 @abstractmethod
30 def save_checkpoint(self, *args):
31 """Warning: this is just empty shell for code implemented in other class."""
32
33 def configure_checkpoint_callback(self):
34 """
35 Weight path set in this priority:
36 Checkpoint_callback's path (if passed in).
37 User provided weights_saved_path
38 Otherwise use os.getcwd()
39 """
40 ckpt_path = self.default_root_dir
41 if self.checkpoint_callback:
42 # init a default one
43 if self.logger is not None:
44 save_dir = (getattr(self.logger, 'save_dir', None) or
45 getattr(self.logger, '_save_dir', None) or
46 self.default_root_dir)
47
48 # weights_save_path overrides anything
49 if self.weights_save_path is not None:
50 save_dir = self.weights_save_path
51
52 ckpt_path = os.path.join(
53 save_dir,
54 self.logger.name,
55 f'version_{self.logger.version}',
56 "checkpoints"
57 )
58 else:
59 ckpt_path = os.path.join(self.default_root_dir, "checkpoints")
60
61 # when no val step is defined, use 'loss' otherwise 'val_loss'
62 train_step_only = not self.is_overriden('validation_step')
63 monitor_key = 'loss' if train_step_only else 'val_loss'
64
65 if self.checkpoint_callback is True:
66 os.makedirs(ckpt_path, exist_ok=True)
67 self.checkpoint_callback = ModelCheckpoint(
68 filepath=ckpt_path,
69 monitor=monitor_key
70 )
71 # If user specified None in filepath, override with runtime default
72 elif isinstance(self.checkpoint_callback, ModelCheckpoint) \
73 and self.checkpoint_callback.dirpath is None:
74 self.checkpoint_callback.dirpath = ckpt_path
75 self.checkpoint_callback.filename = '{epoch}'
76 os.makedirs(self.checkpoint_callback.dirpath, exist_ok=True)
77 elif self.checkpoint_callback is False:
78 self.checkpoint_callback = None
79
80 self.ckpt_path = ckpt_path
81
82 if self.checkpoint_callback:
83 # set the path for the callbacks
84 self.checkpoint_callback.save_function = self.save_checkpoint
85
86 # if checkpoint callback used, then override the weights path
87 self.weights_save_path = self.checkpoint_callback.dirpath
88
89 # if weights_save_path is still none here, set to current working dir
90 if self.weights_save_path is None:
91 self.weights_save_path = self.default_root_dir
92
93 def configure_early_stopping(self, early_stop_callback):
94 if early_stop_callback is True or None:
95 self.early_stop_callback = EarlyStopping(
96 monitor='val_loss',
97 patience=3,
98 strict=True,
99 verbose=True,
100 mode='min'
101 )
102 self.enable_early_stop = True
103 elif not early_stop_callback:
104 self.early_stop_callback = None
105 self.enable_early_stop = False
106 else:
107 self.early_stop_callback = early_stop_callback
108 self.enable_early_stop = True
109
110 def configure_progress_bar(self):
111 progress_bars = [c for c in self.callbacks if isinstance(c, ProgressBarBase)]
112 if len(progress_bars) > 1:
113 raise MisconfigurationException(
114 'You added multiple progress bar callbacks to the Trainer, but currently only one'
115 ' progress bar is supported.'
116 )
117 elif len(progress_bars) == 1:
118 self.progress_bar_callback = progress_bars[0]
119 elif self.progress_bar_refresh_rate > 0:
120 self.progress_bar_callback = ProgressBar(
121 refresh_rate=self.progress_bar_refresh_rate,
122 process_position=self.process_position,
123 )
124 self.callbacks.append(self.progress_bar_callback)
125 else:
126 self.progress_bar_callback = None
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_lightning/trainer/callback_config.py b/pytorch_lightning/trainer/callback_config.py
--- a/pytorch_lightning/trainer/callback_config.py
+++ b/pytorch_lightning/trainer/callback_config.py
@@ -49,10 +49,12 @@
if self.weights_save_path is not None:
save_dir = self.weights_save_path
+ version = self.logger.version if isinstance(
+ self.logger.version, str) else f'version_{self.logger.version}'
ckpt_path = os.path.join(
save_dir,
self.logger.name,
- f'version_{self.logger.version}',
+ version,
"checkpoints"
)
else:
| {"golden_diff": "diff --git a/pytorch_lightning/trainer/callback_config.py b/pytorch_lightning/trainer/callback_config.py\n--- a/pytorch_lightning/trainer/callback_config.py\n+++ b/pytorch_lightning/trainer/callback_config.py\n@@ -49,10 +49,12 @@\n if self.weights_save_path is not None:\n save_dir = self.weights_save_path\n \n+ version = self.logger.version if isinstance(\n+ self.logger.version, str) else f'version_{self.logger.version}'\n ckpt_path = os.path.join(\n save_dir,\n self.logger.name,\n- f'version_{self.logger.version}',\n+ version,\n \"checkpoints\"\n )\n else:\n", "issue": "Checkpoint adding \"version_\" at the start of the logger name\n**To reproduce :** \r\n```python\r\nlogger = pl.loggers.TensorBoardLogger(\r\n save_dir='.',\r\n version='my_name'\r\n name='lightning_logs'\r\n )\r\n\r\ntrainer = pl.Trainer(logger=logger, log_gpu_memory='all', max_epochs=10)\r\n```\r\n\r\n**Giving as a result:**\r\n\r\n- /lightning_logs/my_name: Where is saved the logs\r\n- /lightning_logs/version_my_name : Where is saved the checkpoints\r\n\r\n\r\n\r\n\r\n**Possible Explanation:** \r\nIt seems like the checkpoint saving add \"version_\" to the start of the name even if the name have been given as a parameter : \r\n\r\nhttps://github.com/PyTorchLightning/pytorch-lightning/blob/3e8f2d99a9951bfb5fc67a98614128317913be1d/pytorch_lightning/trainer/callback_config.py#L52-L57\r\n\r\nEven if in the Tensorboard Logger if the name is provided there is no \"version_\" prefix :\r\n\r\nhttps://github.com/PyTorchLightning/pytorch-lightning/blob/8b82ce09039e75f3fcb77a987c964249e38def3b/pytorch_lightning/loggers/tensorboard.py#L81\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nfrom abc import ABC, abstractmethod\nfrom typing import Union, List\n\n\nfrom pytorch_lightning.callbacks import Callback, ModelCheckpoint, EarlyStopping, ProgressBarBase, ProgressBar\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n\nclass TrainerCallbackConfigMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n callbacks: List[Callback]\n default_root_dir: str\n logger: Union[LightningLoggerBase, bool]\n weights_save_path: str\n ckpt_path: str\n checkpoint_callback: ModelCheckpoint\n progress_bar_refresh_rate: int\n process_position: int\n\n @property\n @abstractmethod\n def slurm_job_id(self) -> int:\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def save_checkpoint(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def configure_checkpoint_callback(self):\n \"\"\"\n Weight path set in this priority:\n Checkpoint_callback's path (if passed in).\n User provided weights_saved_path\n Otherwise use os.getcwd()\n \"\"\"\n ckpt_path = self.default_root_dir\n if self.checkpoint_callback:\n # init a default one\n if self.logger is not None:\n save_dir = (getattr(self.logger, 'save_dir', None) or\n getattr(self.logger, '_save_dir', None) or\n self.default_root_dir)\n\n # weights_save_path overrides anything\n if self.weights_save_path is not None:\n save_dir = self.weights_save_path\n\n ckpt_path = os.path.join(\n save_dir,\n self.logger.name,\n f'version_{self.logger.version}',\n \"checkpoints\"\n )\n else:\n ckpt_path = os.path.join(self.default_root_dir, \"checkpoints\")\n\n # when no val step is defined, use 'loss' otherwise 'val_loss'\n train_step_only = not self.is_overriden('validation_step')\n monitor_key = 'loss' if train_step_only else 'val_loss'\n\n if self.checkpoint_callback is True:\n os.makedirs(ckpt_path, exist_ok=True)\n self.checkpoint_callback = ModelCheckpoint(\n filepath=ckpt_path,\n monitor=monitor_key\n )\n # If user specified None in filepath, override with runtime default\n elif isinstance(self.checkpoint_callback, ModelCheckpoint) \\\n and self.checkpoint_callback.dirpath is None:\n self.checkpoint_callback.dirpath = ckpt_path\n self.checkpoint_callback.filename = '{epoch}'\n os.makedirs(self.checkpoint_callback.dirpath, exist_ok=True)\n elif self.checkpoint_callback is False:\n self.checkpoint_callback = None\n\n self.ckpt_path = ckpt_path\n\n if self.checkpoint_callback:\n # set the path for the callbacks\n self.checkpoint_callback.save_function = self.save_checkpoint\n\n # if checkpoint callback used, then override the weights path\n self.weights_save_path = self.checkpoint_callback.dirpath\n\n # if weights_save_path is still none here, set to current working dir\n if self.weights_save_path is None:\n self.weights_save_path = self.default_root_dir\n\n def configure_early_stopping(self, early_stop_callback):\n if early_stop_callback is True or None:\n self.early_stop_callback = EarlyStopping(\n monitor='val_loss',\n patience=3,\n strict=True,\n verbose=True,\n mode='min'\n )\n self.enable_early_stop = True\n elif not early_stop_callback:\n self.early_stop_callback = None\n self.enable_early_stop = False\n else:\n self.early_stop_callback = early_stop_callback\n self.enable_early_stop = True\n\n def configure_progress_bar(self):\n progress_bars = [c for c in self.callbacks if isinstance(c, ProgressBarBase)]\n if len(progress_bars) > 1:\n raise MisconfigurationException(\n 'You added multiple progress bar callbacks to the Trainer, but currently only one'\n ' progress bar is supported.'\n )\n elif len(progress_bars) == 1:\n self.progress_bar_callback = progress_bars[0]\n elif self.progress_bar_refresh_rate > 0:\n self.progress_bar_callback = ProgressBar(\n refresh_rate=self.progress_bar_refresh_rate,\n process_position=self.process_position,\n )\n self.callbacks.append(self.progress_bar_callback)\n else:\n self.progress_bar_callback = None\n", "path": "pytorch_lightning/trainer/callback_config.py"}], "after_files": [{"content": "import os\nfrom abc import ABC, abstractmethod\nfrom typing import Union, List\n\n\nfrom pytorch_lightning.callbacks import Callback, ModelCheckpoint, EarlyStopping, ProgressBarBase, ProgressBar\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n\nclass TrainerCallbackConfigMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n callbacks: List[Callback]\n default_root_dir: str\n logger: Union[LightningLoggerBase, bool]\n weights_save_path: str\n ckpt_path: str\n checkpoint_callback: ModelCheckpoint\n progress_bar_refresh_rate: int\n process_position: int\n\n @property\n @abstractmethod\n def slurm_job_id(self) -> int:\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def save_checkpoint(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def configure_checkpoint_callback(self):\n \"\"\"\n Weight path set in this priority:\n Checkpoint_callback's path (if passed in).\n User provided weights_saved_path\n Otherwise use os.getcwd()\n \"\"\"\n ckpt_path = self.default_root_dir\n if self.checkpoint_callback:\n # init a default one\n if self.logger is not None:\n save_dir = (getattr(self.logger, 'save_dir', None) or\n getattr(self.logger, '_save_dir', None) or\n self.default_root_dir)\n\n # weights_save_path overrides anything\n if self.weights_save_path is not None:\n save_dir = self.weights_save_path\n\n version = self.logger.version if isinstance(\n self.logger.version, str) else f'version_{self.logger.version}'\n ckpt_path = os.path.join(\n save_dir,\n self.logger.name,\n version,\n \"checkpoints\"\n )\n else:\n ckpt_path = os.path.join(self.default_root_dir, \"checkpoints\")\n\n # when no val step is defined, use 'loss' otherwise 'val_loss'\n train_step_only = not self.is_overriden('validation_step')\n monitor_key = 'loss' if train_step_only else 'val_loss'\n\n if self.checkpoint_callback is True:\n os.makedirs(ckpt_path, exist_ok=True)\n self.checkpoint_callback = ModelCheckpoint(\n filepath=ckpt_path,\n monitor=monitor_key\n )\n # If user specified None in filepath, override with runtime default\n elif isinstance(self.checkpoint_callback, ModelCheckpoint) \\\n and self.checkpoint_callback.dirpath is None:\n self.checkpoint_callback.dirpath = ckpt_path\n self.checkpoint_callback.filename = '{epoch}'\n os.makedirs(self.checkpoint_callback.dirpath, exist_ok=True)\n elif self.checkpoint_callback is False:\n self.checkpoint_callback = None\n\n self.ckpt_path = ckpt_path\n\n if self.checkpoint_callback:\n # set the path for the callbacks\n self.checkpoint_callback.save_function = self.save_checkpoint\n\n # if checkpoint callback used, then override the weights path\n self.weights_save_path = self.checkpoint_callback.dirpath\n\n # if weights_save_path is still none here, set to current working dir\n if self.weights_save_path is None:\n self.weights_save_path = self.default_root_dir\n\n def configure_early_stopping(self, early_stop_callback):\n if early_stop_callback is True or None:\n self.early_stop_callback = EarlyStopping(\n monitor='val_loss',\n patience=3,\n strict=True,\n verbose=True,\n mode='min'\n )\n self.enable_early_stop = True\n elif not early_stop_callback:\n self.early_stop_callback = None\n self.enable_early_stop = False\n else:\n self.early_stop_callback = early_stop_callback\n self.enable_early_stop = True\n\n def configure_progress_bar(self):\n progress_bars = [c for c in self.callbacks if isinstance(c, ProgressBarBase)]\n if len(progress_bars) > 1:\n raise MisconfigurationException(\n 'You added multiple progress bar callbacks to the Trainer, but currently only one'\n ' progress bar is supported.'\n )\n elif len(progress_bars) == 1:\n self.progress_bar_callback = progress_bars[0]\n elif self.progress_bar_refresh_rate > 0:\n self.progress_bar_callback = ProgressBar(\n refresh_rate=self.progress_bar_refresh_rate,\n process_position=self.process_position,\n )\n self.callbacks.append(self.progress_bar_callback)\n else:\n self.progress_bar_callback = None\n", "path": "pytorch_lightning/trainer/callback_config.py"}]} | 1,827 | 152 |
gh_patches_debug_20094 | rasdani/github-patches | git_diff | Flexget__Flexget-2495 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk'
<!---
Before opening an issue, verify:
- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget
- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions
- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon
- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues
- Did you fill out the issue template as completely as possible?
The issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!
--->
### Expected behaviour:
After daemon startup, tasks are run without errors
<!---
Please don't just say "it doesn't crash" or "it works". Explain what the expected result is.
--->
### Actual behaviour:
Tasks are aborted
### Steps to reproduce:
- Step 1: Configure exists plugin
- Step 2: Run flexget
#### Config:
AFAIR, any config using exists plugin
#### Log:
<details>
<summary>(click to expand)</summary>
```
2019-11-13 20:32 CRITICAL task tv_rarbg BUG: Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk'
Traceback (most recent call last):
File "/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/task.py", line 520, in __run_plugin
result = method(*args, **kwargs)
File "/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/event.py", line 20, in __call__
return self.func(*args, **kwargs)
File "/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/plugins/filter/exists.py", line 41, in on_task_filter
for p in folder.walk(errors='ignore'):
AttributeError: 'PosixPath' object has no attribute 'walk'
```
</details>
### Additional information:
- FlexGet version: 3.0.1
- Python version: 3.7.2
- Installation method: pip
- Using daemon (yes/no): yes
- OS and version: debian unstable
- Link to crash log: N/A
<!---
In config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.
Please verify that the following data is present before submitting your issue:
- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.
- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).
- FlexGet version (use `flexget -V` to get it).
- Full Python version, for example `2.7.11` (use `python -V` to get it).
- Installation method (pip, git install, etc).
- Whether or not you're running FlexGet as a daemon.
- OS and version.
- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.
--->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/filter/exists.py`
Content:
```
1 import logging
2 import platform
3 from pathlib import Path
4
5 from flexget import plugin
6 from flexget.config_schema import one_or_more
7 from flexget.event import event
8
9 log = logging.getLogger('exists')
10
11
12 class FilterExists:
13 """
14 Reject entries that already exist in given path.
15
16 Example::
17
18 exists: /storage/movies/
19 """
20
21 schema = one_or_more({'type': 'string', 'format': 'path'})
22
23 def prepare_config(self, config):
24 # If only a single path is passed turn it into a 1 element list
25 if isinstance(config, str):
26 config = [config]
27 return config
28
29 @plugin.priority(-1)
30 def on_task_filter(self, task, config):
31 if not task.accepted:
32 log.debug('No accepted entries, not scanning for existing.')
33 return
34 log.verbose('Scanning path(s) for existing files.')
35 config = self.prepare_config(config)
36 filenames = {}
37 for folder in config:
38 folder = Path(folder).expanduser()
39 if not folder.exists():
40 raise plugin.PluginWarning('Path %s does not exist' % folder, log)
41 for p in folder.walk(errors='ignore'):
42 key = p.name
43 # windows file system is not case sensitive
44 if platform.system() == 'Windows':
45 key = key.lower()
46 filenames[key] = p
47 for entry in task.accepted:
48 # priority is: filename, location (filename only), title
49 name = Path(entry.get('filename', entry.get('location', entry['title']))).name
50 if platform.system() == 'Windows':
51 name = name.lower()
52 if name in filenames:
53 log.debug('Found %s in %s' % (name, filenames[name]))
54 entry.reject('exists in %s' % filenames[name])
55
56
57 @event('plugin.register')
58 def register_plugin():
59 plugin.register(FilterExists, 'exists', api_ver=2)
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/plugins/filter/exists.py b/flexget/plugins/filter/exists.py
--- a/flexget/plugins/filter/exists.py
+++ b/flexget/plugins/filter/exists.py
@@ -38,12 +38,13 @@
folder = Path(folder).expanduser()
if not folder.exists():
raise plugin.PluginWarning('Path %s does not exist' % folder, log)
- for p in folder.walk(errors='ignore'):
- key = p.name
- # windows file system is not case sensitive
- if platform.system() == 'Windows':
- key = key.lower()
- filenames[key] = p
+ for p in folder.rglob('*'):
+ if p.is_file():
+ key = p.name
+ # windows file system is not case sensitive
+ if platform.system() == 'Windows':
+ key = key.lower()
+ filenames[key] = p
for entry in task.accepted:
# priority is: filename, location (filename only), title
name = Path(entry.get('filename', entry.get('location', entry['title']))).name
| {"golden_diff": "diff --git a/flexget/plugins/filter/exists.py b/flexget/plugins/filter/exists.py\n--- a/flexget/plugins/filter/exists.py\n+++ b/flexget/plugins/filter/exists.py\n@@ -38,12 +38,13 @@\n folder = Path(folder).expanduser()\n if not folder.exists():\n raise plugin.PluginWarning('Path %s does not exist' % folder, log)\n- for p in folder.walk(errors='ignore'):\n- key = p.name\n- # windows file system is not case sensitive\n- if platform.system() == 'Windows':\n- key = key.lower()\n- filenames[key] = p\n+ for p in folder.rglob('*'):\n+ if p.is_file():\n+ key = p.name\n+ # windows file system is not case sensitive\n+ if platform.system() == 'Windows':\n+ key = key.lower()\n+ filenames[key] = p\n for entry in task.accepted:\n # priority is: filename, location (filename only), title\n name = Path(entry.get('filename', entry.get('location', entry['title']))).name\n", "issue": "Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk'\n<!---\r\nBefore opening an issue, verify:\r\n\r\n- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget\r\n- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions\r\n- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon\r\n- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues\r\n- Did you fill out the issue template as completely as possible?\r\n\r\nThe issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!\r\n--->\r\n\r\n### Expected behaviour:\r\nAfter daemon startup, tasks are run without errors\r\n<!---\r\nPlease don't just say \"it doesn't crash\" or \"it works\". Explain what the expected result is.\r\n--->\r\n\r\n### Actual behaviour:\r\nTasks are aborted\r\n### Steps to reproduce:\r\n- Step 1: Configure exists plugin\r\n- Step 2: Run flexget\r\n\r\n#### Config:\r\nAFAIR, any config using exists plugin\r\n \r\n#### Log:\r\n<details>\r\n <summary>(click to expand)</summary>\r\n\r\n```\r\n2019-11-13 20:32 CRITICAL task tv_rarbg BUG: Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk'\r\nTraceback (most recent call last):\r\n File \"/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/task.py\", line 520, in __run_plugin\r\n result = method(*args, **kwargs)\r\n File \"/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/event.py\", line 20, in __call__\r\n return self.func(*args, **kwargs)\r\n File \"/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/plugins/filter/exists.py\", line 41, in on_task_filter\r\n for p in folder.walk(errors='ignore'):\r\nAttributeError: 'PosixPath' object has no attribute 'walk'\r\n\r\n```\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 3.0.1\r\n- Python version: 3.7.2\r\n- Installation method: pip\r\n- Using daemon (yes/no): yes\r\n- OS and version: debian unstable\r\n- Link to crash log: N/A\r\n\r\n<!---\r\nIn config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.\r\n\r\nPlease verify that the following data is present before submitting your issue:\r\n\r\n- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.\r\n- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).\r\n- FlexGet version (use `flexget -V` to get it).\r\n- Full Python version, for example `2.7.11` (use `python -V` to get it). \r\n- Installation method (pip, git install, etc).\r\n- Whether or not you're running FlexGet as a daemon.\r\n- OS and version.\r\n- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.\r\n--->\r\n\n", "before_files": [{"content": "import logging\nimport platform\nfrom pathlib import Path\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.event import event\n\nlog = logging.getLogger('exists')\n\n\nclass FilterExists:\n \"\"\"\n Reject entries that already exist in given path.\n\n Example::\n\n exists: /storage/movies/\n \"\"\"\n\n schema = one_or_more({'type': 'string', 'format': 'path'})\n\n def prepare_config(self, config):\n # If only a single path is passed turn it into a 1 element list\n if isinstance(config, str):\n config = [config]\n return config\n\n @plugin.priority(-1)\n def on_task_filter(self, task, config):\n if not task.accepted:\n log.debug('No accepted entries, not scanning for existing.')\n return\n log.verbose('Scanning path(s) for existing files.')\n config = self.prepare_config(config)\n filenames = {}\n for folder in config:\n folder = Path(folder).expanduser()\n if not folder.exists():\n raise plugin.PluginWarning('Path %s does not exist' % folder, log)\n for p in folder.walk(errors='ignore'):\n key = p.name\n # windows file system is not case sensitive\n if platform.system() == 'Windows':\n key = key.lower()\n filenames[key] = p\n for entry in task.accepted:\n # priority is: filename, location (filename only), title\n name = Path(entry.get('filename', entry.get('location', entry['title']))).name\n if platform.system() == 'Windows':\n name = name.lower()\n if name in filenames:\n log.debug('Found %s in %s' % (name, filenames[name]))\n entry.reject('exists in %s' % filenames[name])\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FilterExists, 'exists', api_ver=2)\n", "path": "flexget/plugins/filter/exists.py"}], "after_files": [{"content": "import logging\nimport platform\nfrom pathlib import Path\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.event import event\n\nlog = logging.getLogger('exists')\n\n\nclass FilterExists:\n \"\"\"\n Reject entries that already exist in given path.\n\n Example::\n\n exists: /storage/movies/\n \"\"\"\n\n schema = one_or_more({'type': 'string', 'format': 'path'})\n\n def prepare_config(self, config):\n # If only a single path is passed turn it into a 1 element list\n if isinstance(config, str):\n config = [config]\n return config\n\n @plugin.priority(-1)\n def on_task_filter(self, task, config):\n if not task.accepted:\n log.debug('No accepted entries, not scanning for existing.')\n return\n log.verbose('Scanning path(s) for existing files.')\n config = self.prepare_config(config)\n filenames = {}\n for folder in config:\n folder = Path(folder).expanduser()\n if not folder.exists():\n raise plugin.PluginWarning('Path %s does not exist' % folder, log)\n for p in folder.rglob('*'):\n if p.is_file():\n key = p.name\n # windows file system is not case sensitive\n if platform.system() == 'Windows':\n key = key.lower()\n filenames[key] = p\n for entry in task.accepted:\n # priority is: filename, location (filename only), title\n name = Path(entry.get('filename', entry.get('location', entry['title']))).name\n if platform.system() == 'Windows':\n name = name.lower()\n if name in filenames:\n log.debug('Found %s in %s' % (name, filenames[name]))\n entry.reject('exists in %s' % filenames[name])\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FilterExists, 'exists', api_ver=2)\n", "path": "flexget/plugins/filter/exists.py"}]} | 1,607 | 248 |
gh_patches_debug_33506 | rasdani/github-patches | git_diff | optuna__optuna-1285 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`experimental` decorator breaks class documentation.
The `experimental` decorator used on classes break documentation. This could be one manifestation but there is an issue with how the documentation including type hints are propagated to the decorated class. This does not apply for free functions.
See https://github.com/optuna/optuna/pull/1265#issuecomment-633195955 for how it may break.
## Expected behavior
Class documentation should not be altered by applying the experimental decorator.
## Steps to reproduce
1. Apply the experimental decorator to a class.
1. Build the document (`cd docs && make html`)
1. Open the rendered documentation and note that the class signatures is broken.
## Additional context (optional)
- An issue regarding the indentation https://github.com/optuna/optuna/issues/1213.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/_experimental.py`
Content:
```
1 import functools
2 import inspect
3 from typing import Any
4 from typing import Callable
5 import warnings
6
7 from optuna.exceptions import ExperimentalWarning
8
9
10 # White spaces of each line are necessary to beautifully rendered documentation.
11 # NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require
12 # another four spaces.
13 _EXPERIMENTAL_DOCSTRING_TEMPLATE = """
14
15 .. note::
16 Added in v{ver} as an experimental feature. The interface may change in newer versions
17 without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.
18 """
19
20
21 def _make_func_spec_str(func: Callable[..., Any]) -> str:
22
23 name = func.__name__
24 argspec = inspect.getfullargspec(func)
25
26 n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0
27 offset = int(len(argspec.args) > 0 and argspec.args[0] == "self")
28
29 if n_defaults > 0:
30 args = ", ".join(argspec.args[offset:-n_defaults])
31 with_default_values = ", ".join(
32 [
33 "{}={}".format(a, d)
34 for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore
35 ]
36 )
37 else:
38 args = ", ".join(argspec.args[offset:])
39 with_default_values = ""
40
41 if len(args) > 0 and len(with_default_values) > 0:
42 args += ", "
43
44 # NOTE(crcrpar): The four spaces are necessary to correctly render documentation.
45 # Different classes or methods require more spaces.
46 str_args_description = "(" + args + with_default_values + ")\n\n "
47 return name + str_args_description
48
49
50 def _validate_version(version: str) -> None:
51
52 if not isinstance(version, str) or len(version.split(".")) != 3:
53 raise ValueError(
54 "Invalid version specification. Must follow `x.y.z` format but `{}` is given".format(
55 version
56 )
57 )
58
59
60 def experimental(version: str, name: str = None) -> Any:
61 """Decorate class or function as experimental.
62
63 Args:
64 version: The first version that supports the target feature.
65 name: The name of the feature. Defaults to the function or class name. Optional.
66 """
67
68 _validate_version(version)
69
70 def _experimental_wrapper(f: Any) -> Any:
71 # f is either func or class.
72
73 def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:
74
75 docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)
76 if func.__doc__ is None:
77 func.__doc__ = ""
78 func.__doc__ += docstring
79
80 # TODO(crcrpar): Annotate this correctly.
81 @functools.wraps(func)
82 def new_func(*args: Any, **kwargs: Any) -> Any:
83 """Wrapped function."""
84
85 warnings.warn(
86 "{} is experimental (supported from v{}). "
87 "The interface can change in the future.".format(
88 name if name is not None else func.__name__, version
89 ),
90 ExperimentalWarning,
91 )
92
93 return func(*args, **kwargs) # type: ignore
94
95 return new_func
96
97 def _experimental_class(cls: Any) -> Any:
98 """Decorates a class as experimental.
99
100 This decorator is supposed to be applied to the experimental class.
101 """
102
103 _original_init = cls.__init__
104
105 def wrapped_init(self, *args, **kwargs) -> None: # type: ignore
106 warnings.warn(
107 "{} is experimental (supported from v{}). "
108 "The interface can change in the future.".format(
109 name if name is not None else cls.__name__, version
110 ),
111 ExperimentalWarning,
112 )
113
114 _original_init(self, *args, **kwargs)
115
116 cls.__init__ = wrapped_init
117
118 if cls.__doc__ is None:
119 cls.__doc__ = ""
120 cls.__doc__ = (
121 _make_func_spec_str(_original_init)
122 + cls.__doc__
123 + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)
124 )
125 return cls
126
127 return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)
128
129 return _experimental_wrapper
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optuna/_experimental.py b/optuna/_experimental.py
--- a/optuna/_experimental.py
+++ b/optuna/_experimental.py
@@ -18,35 +18,6 @@
"""
-def _make_func_spec_str(func: Callable[..., Any]) -> str:
-
- name = func.__name__
- argspec = inspect.getfullargspec(func)
-
- n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0
- offset = int(len(argspec.args) > 0 and argspec.args[0] == "self")
-
- if n_defaults > 0:
- args = ", ".join(argspec.args[offset:-n_defaults])
- with_default_values = ", ".join(
- [
- "{}={}".format(a, d)
- for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore
- ]
- )
- else:
- args = ", ".join(argspec.args[offset:])
- with_default_values = ""
-
- if len(args) > 0 and len(with_default_values) > 0:
- args += ", "
-
- # NOTE(crcrpar): The four spaces are necessary to correctly render documentation.
- # Different classes or methods require more spaces.
- str_args_description = "(" + args + with_default_values + ")\n\n "
- return name + str_args_description
-
-
def _validate_version(version: str) -> None:
if not isinstance(version, str) or len(version.split(".")) != 3:
@@ -102,6 +73,7 @@
_original_init = cls.__init__
+ @functools.wraps(_original_init)
def wrapped_init(self, *args, **kwargs) -> None: # type: ignore
warnings.warn(
"{} is experimental (supported from v{}). "
@@ -117,11 +89,7 @@
if cls.__doc__ is None:
cls.__doc__ = ""
- cls.__doc__ = (
- _make_func_spec_str(_original_init)
- + cls.__doc__
- + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)
- )
+ cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)
return cls
return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)
| {"golden_diff": "diff --git a/optuna/_experimental.py b/optuna/_experimental.py\n--- a/optuna/_experimental.py\n+++ b/optuna/_experimental.py\n@@ -18,35 +18,6 @@\n \"\"\"\n \n \n-def _make_func_spec_str(func: Callable[..., Any]) -> str:\n-\n- name = func.__name__\n- argspec = inspect.getfullargspec(func)\n-\n- n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0\n- offset = int(len(argspec.args) > 0 and argspec.args[0] == \"self\")\n-\n- if n_defaults > 0:\n- args = \", \".join(argspec.args[offset:-n_defaults])\n- with_default_values = \", \".join(\n- [\n- \"{}={}\".format(a, d)\n- for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore\n- ]\n- )\n- else:\n- args = \", \".join(argspec.args[offset:])\n- with_default_values = \"\"\n-\n- if len(args) > 0 and len(with_default_values) > 0:\n- args += \", \"\n-\n- # NOTE(crcrpar): The four spaces are necessary to correctly render documentation.\n- # Different classes or methods require more spaces.\n- str_args_description = \"(\" + args + with_default_values + \")\\n\\n \"\n- return name + str_args_description\n-\n-\n def _validate_version(version: str) -> None:\n \n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n@@ -102,6 +73,7 @@\n \n _original_init = cls.__init__\n \n+ @functools.wraps(_original_init)\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n@@ -117,11 +89,7 @@\n \n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n- cls.__doc__ = (\n- _make_func_spec_str(_original_init)\n- + cls.__doc__\n- + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n- )\n+ cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n return cls\n \n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n", "issue": "`experimental` decorator breaks class documentation.\nThe `experimental` decorator used on classes break documentation. This could be one manifestation but there is an issue with how the documentation including type hints are propagated to the decorated class. This does not apply for free functions.\r\n\r\nSee https://github.com/optuna/optuna/pull/1265#issuecomment-633195955 for how it may break.\r\n\r\n## Expected behavior\r\n\r\nClass documentation should not be altered by applying the experimental decorator.\r\n\r\n## Steps to reproduce\r\n\r\n1. Apply the experimental decorator to a class.\r\n1. Build the document (`cd docs && make html`) \r\n1. Open the rendered documentation and note that the class signatures is broken.\r\n\r\n## Additional context (optional)\r\n\r\n- An issue regarding the indentation https://github.com/optuna/optuna/issues/1213.\r\n\n", "before_files": [{"content": "import functools\nimport inspect\nfrom typing import Any\nfrom typing import Callable\nimport warnings\n\nfrom optuna.exceptions import ExperimentalWarning\n\n\n# White spaces of each line are necessary to beautifully rendered documentation.\n# NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require\n# another four spaces.\n_EXPERIMENTAL_DOCSTRING_TEMPLATE = \"\"\"\n\n .. note::\n Added in v{ver} as an experimental feature. The interface may change in newer versions\n without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n\"\"\"\n\n\ndef _make_func_spec_str(func: Callable[..., Any]) -> str:\n\n name = func.__name__\n argspec = inspect.getfullargspec(func)\n\n n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0\n offset = int(len(argspec.args) > 0 and argspec.args[0] == \"self\")\n\n if n_defaults > 0:\n args = \", \".join(argspec.args[offset:-n_defaults])\n with_default_values = \", \".join(\n [\n \"{}={}\".format(a, d)\n for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore\n ]\n )\n else:\n args = \", \".join(argspec.args[offset:])\n with_default_values = \"\"\n\n if len(args) > 0 and len(with_default_values) > 0:\n args += \", \"\n\n # NOTE(crcrpar): The four spaces are necessary to correctly render documentation.\n # Different classes or methods require more spaces.\n str_args_description = \"(\" + args + with_default_values + \")\\n\\n \"\n return name + str_args_description\n\n\ndef _validate_version(version: str) -> None:\n\n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n raise ValueError(\n \"Invalid version specification. Must follow `x.y.z` format but `{}` is given\".format(\n version\n )\n )\n\n\ndef experimental(version: str, name: str = None) -> Any:\n \"\"\"Decorate class or function as experimental.\n\n Args:\n version: The first version that supports the target feature.\n name: The name of the feature. Defaults to the function or class name. Optional.\n \"\"\"\n\n _validate_version(version)\n\n def _experimental_wrapper(f: Any) -> Any:\n # f is either func or class.\n\n def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n if func.__doc__ is None:\n func.__doc__ = \"\"\n func.__doc__ += docstring\n\n # TODO(crcrpar): Annotate this correctly.\n @functools.wraps(func)\n def new_func(*args: Any, **kwargs: Any) -> Any:\n \"\"\"Wrapped function.\"\"\"\n\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else func.__name__, version\n ),\n ExperimentalWarning,\n )\n\n return func(*args, **kwargs) # type: ignore\n\n return new_func\n\n def _experimental_class(cls: Any) -> Any:\n \"\"\"Decorates a class as experimental.\n\n This decorator is supposed to be applied to the experimental class.\n \"\"\"\n\n _original_init = cls.__init__\n\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else cls.__name__, version\n ),\n ExperimentalWarning,\n )\n\n _original_init(self, *args, **kwargs)\n\n cls.__init__ = wrapped_init\n\n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n cls.__doc__ = (\n _make_func_spec_str(_original_init)\n + cls.__doc__\n + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n )\n return cls\n\n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n\n return _experimental_wrapper\n", "path": "optuna/_experimental.py"}], "after_files": [{"content": "import functools\nimport inspect\nfrom typing import Any\nfrom typing import Callable\nimport warnings\n\nfrom optuna.exceptions import ExperimentalWarning\n\n\n# White spaces of each line are necessary to beautifully rendered documentation.\n# NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require\n# another four spaces.\n_EXPERIMENTAL_DOCSTRING_TEMPLATE = \"\"\"\n\n .. note::\n Added in v{ver} as an experimental feature. The interface may change in newer versions\n without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n\"\"\"\n\n\ndef _validate_version(version: str) -> None:\n\n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n raise ValueError(\n \"Invalid version specification. Must follow `x.y.z` format but `{}` is given\".format(\n version\n )\n )\n\n\ndef experimental(version: str, name: str = None) -> Any:\n \"\"\"Decorate class or function as experimental.\n\n Args:\n version: The first version that supports the target feature.\n name: The name of the feature. Defaults to the function or class name. Optional.\n \"\"\"\n\n _validate_version(version)\n\n def _experimental_wrapper(f: Any) -> Any:\n # f is either func or class.\n\n def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n if func.__doc__ is None:\n func.__doc__ = \"\"\n func.__doc__ += docstring\n\n # TODO(crcrpar): Annotate this correctly.\n @functools.wraps(func)\n def new_func(*args: Any, **kwargs: Any) -> Any:\n \"\"\"Wrapped function.\"\"\"\n\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else func.__name__, version\n ),\n ExperimentalWarning,\n )\n\n return func(*args, **kwargs) # type: ignore\n\n return new_func\n\n def _experimental_class(cls: Any) -> Any:\n \"\"\"Decorates a class as experimental.\n\n This decorator is supposed to be applied to the experimental class.\n \"\"\"\n\n _original_init = cls.__init__\n\n @functools.wraps(_original_init)\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else cls.__name__, version\n ),\n ExperimentalWarning,\n )\n\n _original_init(self, *args, **kwargs)\n\n cls.__init__ = wrapped_init\n\n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n return cls\n\n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n\n return _experimental_wrapper\n", "path": "optuna/_experimental.py"}]} | 1,662 | 540 |
gh_patches_debug_25202 | rasdani/github-patches | git_diff | opsdroid__opsdroid-12 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Connectors should fork
When a connector is started it should fork into its own process. This is because connectors block to accept messages from their source.
This requires #5 to enable persistent memory between connector processes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/core.py`
Content:
```
1 """Core components of OpsDroid."""
2
3 import logging
4 import sys
5 import weakref
6 from opsdroid.helper import match
7 from opsdroid.memory import Memory
8
9
10 class OpsDroid():
11 """Root object for opsdroid."""
12
13 instances = []
14
15 def __init__(self):
16 """Start opsdroid."""
17 self.bot_name = 'opsdroid'
18 self.sys_status = 0
19 self.connectors = []
20 self.skills = []
21 self.memory = Memory()
22 logging.info("Created main opsdroid object")
23
24 def __enter__(self):
25 """Add self to existing instances."""
26 if len(self.__class__.instances) == 0:
27 self.__class__.instances.append(weakref.proxy(self))
28 else:
29 self.critical("opsdroid has already been started", 1)
30 return self
31
32 def __exit__(self, exc_type, exc_value, traceback):
33 """Remove self from existing instances."""
34 self.__class__.instances = []
35
36 def exit(self):
37 """Exit application."""
38 logging.info("Exiting application with return code " +
39 str(self.sys_status))
40 sys.exit(self.sys_status)
41
42 def critical(self, error, code):
43 """Exit due to unrecoverable error."""
44 self.sys_status = code
45 logging.critical(error)
46 print("Error: " + error)
47 self.exit()
48
49 def start_connectors(self, connectors):
50 """Start the connectors."""
51 if len(connectors) == 0:
52 self.critical("All connectors failed to load", 1)
53 for connector_module in connectors:
54 for name, cls in connector_module["module"].__dict__.items():
55 if isinstance(cls, type) and "Connector" in name:
56 connector_module["config"]["bot-name"] = self.bot_name
57 connector = cls(connector_module["config"])
58 self.connectors.append(connector)
59 connector.connect(self)
60
61 def start_databases(self, databases):
62 """Start the databases."""
63 if len(databases) == 0:
64 logging.warning("All databases failed to load")
65 for database_module in databases:
66 for name, cls in database_module["module"].__dict__.items():
67 if isinstance(cls, type) and "Database" in name:
68 logging.debug("Adding database: " + name)
69 database = cls(database_module["config"])
70 self.memory.databases.append(database)
71 database.connect()
72
73 def load_regex_skill(self, regex, skill):
74 """Load skills."""
75 self.skills.append({"regex": regex, "skill": skill})
76
77 def parse(self, message):
78 """Parse a string against all skills."""
79 if message.text.strip() != "":
80 logging.debug("Parsing input: " + message.text)
81 for skill in self.skills:
82 if "regex" in skill:
83 regex = match(skill["regex"], message.text)
84 if regex:
85 message.regex = regex
86 skill["skill"](self, message)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/core.py b/opsdroid/core.py
--- a/opsdroid/core.py
+++ b/opsdroid/core.py
@@ -3,6 +3,7 @@
import logging
import sys
import weakref
+from multiprocessing import Process
from opsdroid.helper import match
from opsdroid.memory import Memory
@@ -17,6 +18,7 @@
self.bot_name = 'opsdroid'
self.sys_status = 0
self.connectors = []
+ self.connector_jobs = []
self.skills = []
self.memory = Memory()
logging.info("Created main opsdroid object")
@@ -56,7 +58,11 @@
connector_module["config"]["bot-name"] = self.bot_name
connector = cls(connector_module["config"])
self.connectors.append(connector)
- connector.connect(self)
+ job = Process(target=connector.connect, args=(self,))
+ job.start()
+ self.connector_jobs.append(job)
+ for job in self.connector_jobs:
+ job.join()
def start_databases(self, databases):
"""Start the databases."""
| {"golden_diff": "diff --git a/opsdroid/core.py b/opsdroid/core.py\n--- a/opsdroid/core.py\n+++ b/opsdroid/core.py\n@@ -3,6 +3,7 @@\n import logging\n import sys\n import weakref\n+from multiprocessing import Process\n from opsdroid.helper import match\n from opsdroid.memory import Memory\n \n@@ -17,6 +18,7 @@\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n+ self.connector_jobs = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n@@ -56,7 +58,11 @@\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n- connector.connect(self)\n+ job = Process(target=connector.connect, args=(self,))\n+ job.start()\n+ self.connector_jobs.append(job)\n+ for job in self.connector_jobs:\n+ job.join()\n \n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n", "issue": "Connectors should fork\nWhen a connector is started it should fork into its own process. This is because connectors block to accept messages from their source.\n\nThis requires #5 to enable persistent memory between connector processes.\n\n", "before_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport logging\nimport sys\nimport weakref\nfrom opsdroid.helper import match\nfrom opsdroid.memory import Memory\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n if len(self.__class__.instances) == 0:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n str(self.sys_status))\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n logging.critical(error)\n print(\"Error: \" + error)\n self.exit()\n\n def start_connectors(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n if len(connectors) == 0:\n self.critical(\"All connectors failed to load\", 1)\n for connector_module in connectors:\n for name, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Connector\" in name:\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n connector.connect(self)\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if len(databases) == 0:\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Database\" in name:\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n database.connect()\n\n def load_regex_skill(self, regex, skill):\n \"\"\"Load skills.\"\"\"\n self.skills.append({\"regex\": regex, \"skill\": skill})\n\n def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n if message.text.strip() != \"\":\n logging.debug(\"Parsing input: \" + message.text)\n for skill in self.skills:\n if \"regex\" in skill:\n regex = match(skill[\"regex\"], message.text)\n if regex:\n message.regex = regex\n skill[\"skill\"](self, message)\n", "path": "opsdroid/core.py"}], "after_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport logging\nimport sys\nimport weakref\nfrom multiprocessing import Process\nfrom opsdroid.helper import match\nfrom opsdroid.memory import Memory\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.connector_jobs = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n if len(self.__class__.instances) == 0:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n str(self.sys_status))\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n logging.critical(error)\n print(\"Error: \" + error)\n self.exit()\n\n def start_connectors(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n if len(connectors) == 0:\n self.critical(\"All connectors failed to load\", 1)\n for connector_module in connectors:\n for name, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Connector\" in name:\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n job = Process(target=connector.connect, args=(self,))\n job.start()\n self.connector_jobs.append(job)\n for job in self.connector_jobs:\n job.join()\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if len(databases) == 0:\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Database\" in name:\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n database.connect()\n\n def load_regex_skill(self, regex, skill):\n \"\"\"Load skills.\"\"\"\n self.skills.append({\"regex\": regex, \"skill\": skill})\n\n def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n if message.text.strip() != \"\":\n logging.debug(\"Parsing input: \" + message.text)\n for skill in self.skills:\n if \"regex\" in skill:\n regex = match(skill[\"regex\"], message.text)\n if regex:\n message.regex = regex\n skill[\"skill\"](self, message)\n", "path": "opsdroid/core.py"}]} | 1,093 | 251 |
gh_patches_debug_31638 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5766 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Azure Function App Slots - Ensure web app redirects all HTTP traffic to HTTPS in Azure Function App Slots
**Describe the issue**
It seems that there are no checks that ensure that the following resource only allows HTTPS:
- azurerm_function_app_slot
- azurerm_linux_function_app_slot
- azurerm_windows_function_app_slot
**Examples**
````hcl
resource "azurerm_function_app_slot" "example" {
name = "test-azure-functions_slot"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
app_service_plan_id = azurerm_app_service_plan.example.id
function_app_name = azurerm_function_app.example.name
storage_account_name = azurerm_storage_account.example.name
storage_account_access_key = azurerm_storage_account.example.primary_access_key
https_only = true
}
resource "azurerm_linux_function_app_slot" "example" {
name = "example-linux-function-app-slot"
function_app_id = azurerm_linux_function_app.example.id
storage_account_name = azurerm_storage_account.example.name
site_config {
require_https = true
}
}
resource "azurerm_windows_function_app" "example" {
name = "example-windows-function-app"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
storage_account_name = azurerm_storage_account.example.name
service_plan_id = azurerm_service_plan.example.id
site_config {
require_https = true
}
}
````
**Version (please complete the following information):**
- N/A
**Additional context**
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class FunctionAppsAccessibleOverHttps(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure that Function apps is only accessible over HTTPS"
8 id = "CKV_AZURE_70"
9 supported_resources = ['azurerm_function_app']
10 categories = [CheckCategories.NETWORKING]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return 'https_only'
15
16
17 check = FunctionAppsAccessibleOverHttps()
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py
--- a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py
+++ b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py
@@ -1,17 +1,44 @@
-from checkov.common.models.enums import CheckCategories
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+from __future__ import annotations
+from typing import Any
-class FunctionAppsAccessibleOverHttps(BaseResourceValueCheck):
- def __init__(self):
+from checkov.common.models.enums import CheckCategories, CheckResult
+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
+
+
+class FunctionAppsAccessibleOverHttps(BaseResourceCheck):
+
+ def __init__(self) -> None:
name = "Ensure that Function apps is only accessible over HTTPS"
id = "CKV_AZURE_70"
- supported_resources = ['azurerm_function_app']
+ supported_resources = ['azurerm_function_app', 'azurerm_linux_function_app', 'azurerm_windows_function_app',
+ 'azurerm_function_app_slot', 'azurerm_linux_function_app_slot',
+ 'azurerm_windows_function_app_slot']
categories = [CheckCategories.NETWORKING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def get_inspected_key(self):
- return 'https_only'
+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
+ # default=false for https_only
+ if 'https_only' not in conf.keys():
+ return CheckResult.FAILED
+
+ https_only = conf.get('https_only')[0]
+ if not https_only:
+ return CheckResult.FAILED
+
+ # relevant for linux/windows resources
+ if 'auth_settings_v2' in conf.keys():
+ auth_settings_v2 = conf['auth_settings_v2'][0]
+
+ # default=true for require_https
+ if 'require_https' not in auth_settings_v2.keys():
+ return CheckResult.PASSED
+
+ require_https = auth_settings_v2.get('require_https')[0]
+ if not require_https:
+ return CheckResult.FAILED
+
+ return CheckResult.PASSED
check = FunctionAppsAccessibleOverHttps()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py\n--- a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py\n+++ b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py\n@@ -1,17 +1,44 @@\n-from checkov.common.models.enums import CheckCategories\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n+from __future__ import annotations\n \n+from typing import Any\n \n-class FunctionAppsAccessibleOverHttps(BaseResourceValueCheck):\n- def __init__(self):\n+from checkov.common.models.enums import CheckCategories, CheckResult\n+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n+\n+\n+class FunctionAppsAccessibleOverHttps(BaseResourceCheck):\n+\n+ def __init__(self) -> None:\n name = \"Ensure that Function apps is only accessible over HTTPS\"\n id = \"CKV_AZURE_70\"\n- supported_resources = ['azurerm_function_app']\n+ supported_resources = ['azurerm_function_app', 'azurerm_linux_function_app', 'azurerm_windows_function_app',\n+ 'azurerm_function_app_slot', 'azurerm_linux_function_app_slot',\n+ 'azurerm_windows_function_app_slot']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def get_inspected_key(self):\n- return 'https_only'\n+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n+ # default=false for https_only\n+ if 'https_only' not in conf.keys():\n+ return CheckResult.FAILED\n+\n+ https_only = conf.get('https_only')[0]\n+ if not https_only:\n+ return CheckResult.FAILED\n+\n+ # relevant for linux/windows resources\n+ if 'auth_settings_v2' in conf.keys():\n+ auth_settings_v2 = conf['auth_settings_v2'][0]\n+\n+ # default=true for require_https\n+ if 'require_https' not in auth_settings_v2.keys():\n+ return CheckResult.PASSED\n+\n+ require_https = auth_settings_v2.get('require_https')[0]\n+ if not require_https:\n+ return CheckResult.FAILED\n+\n+ return CheckResult.PASSED\n \n \n check = FunctionAppsAccessibleOverHttps()\n", "issue": "Azure Function App Slots - Ensure web app redirects all HTTP traffic to HTTPS in Azure Function App Slots\n**Describe the issue**\r\nIt seems that there are no checks that ensure that the following resource only allows HTTPS:\r\n\r\n- azurerm_function_app_slot\r\n- azurerm_linux_function_app_slot\r\n- azurerm_windows_function_app_slot\r\n\r\n**Examples**\r\n\r\n````hcl\r\nresource \"azurerm_function_app_slot\" \"example\" {\r\n name = \"test-azure-functions_slot\"\r\n location = azurerm_resource_group.example.location\r\n resource_group_name = azurerm_resource_group.example.name\r\n app_service_plan_id = azurerm_app_service_plan.example.id\r\n function_app_name = azurerm_function_app.example.name\r\n storage_account_name = azurerm_storage_account.example.name\r\n storage_account_access_key = azurerm_storage_account.example.primary_access_key\r\n https_only = true\r\n}\r\n\r\nresource \"azurerm_linux_function_app_slot\" \"example\" {\r\n name = \"example-linux-function-app-slot\"\r\n function_app_id = azurerm_linux_function_app.example.id\r\n storage_account_name = azurerm_storage_account.example.name\r\n\r\n site_config {\r\n require_https = true \r\n }\r\n}\r\n\r\nresource \"azurerm_windows_function_app\" \"example\" {\r\n name = \"example-windows-function-app\"\r\n resource_group_name = azurerm_resource_group.example.name\r\n location = azurerm_resource_group.example.location\r\n storage_account_name = azurerm_storage_account.example.name\r\n service_plan_id = azurerm_service_plan.example.id\r\n\r\n site_config {\r\n require_https = true \r\n }\r\n}\r\n\r\n````\r\n\r\n**Version (please complete the following information):**\r\n - N/A\r\n \r\n**Additional context**\r\n\r\nN/A\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass FunctionAppsAccessibleOverHttps(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Function apps is only accessible over HTTPS\"\n id = \"CKV_AZURE_70\"\n supported_resources = ['azurerm_function_app']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'https_only'\n\n\ncheck = FunctionAppsAccessibleOverHttps()\n", "path": "checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass FunctionAppsAccessibleOverHttps(BaseResourceCheck):\n\n def __init__(self) -> None:\n name = \"Ensure that Function apps is only accessible over HTTPS\"\n id = \"CKV_AZURE_70\"\n supported_resources = ['azurerm_function_app', 'azurerm_linux_function_app', 'azurerm_windows_function_app',\n 'azurerm_function_app_slot', 'azurerm_linux_function_app_slot',\n 'azurerm_windows_function_app_slot']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n # default=false for https_only\n if 'https_only' not in conf.keys():\n return CheckResult.FAILED\n\n https_only = conf.get('https_only')[0]\n if not https_only:\n return CheckResult.FAILED\n\n # relevant for linux/windows resources\n if 'auth_settings_v2' in conf.keys():\n auth_settings_v2 = conf['auth_settings_v2'][0]\n\n # default=true for require_https\n if 'require_https' not in auth_settings_v2.keys():\n return CheckResult.PASSED\n\n require_https = auth_settings_v2.get('require_https')[0]\n if not require_https:\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = FunctionAppsAccessibleOverHttps()\n", "path": "checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py"}]} | 815 | 555 |
gh_patches_debug_2348 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-2285 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Docs] Bernoulli likelihoods
# 📚 Documentation/Examples
In the document for [Bernoulli likelihoods](https://docs.gpytorch.ai/en/stable/likelihoods.html), since the labels take value in {0, 1}, the likelihood should be
$$p(Y=y | f) = \Phi ((2y-1)f)$$
instead of the currently displayed
$$p(Y=y|f) = \Phi(yf).$$
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpytorch/likelihoods/bernoulli_likelihood.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import warnings
4
5 import torch
6
7 from ..distributions import base_distributions
8 from ..functions import log_normal_cdf
9 from .likelihood import _OneDimensionalLikelihood
10
11
12 class BernoulliLikelihood(_OneDimensionalLikelihood):
13 r"""
14 Implements the Bernoulli likelihood used for GP classification, using
15 Probit regression (i.e., the latent function is warped to be in [0,1]
16 using the standard Normal CDF :math:`\Phi(x)`). Given the identity
17 :math:`\Phi(-x) = 1-\Phi(x)`, we can write the likelihood compactly as:
18
19 .. math::
20 \begin{equation*}
21 p(Y=y|f)=\Phi(yf)
22 \end{equation*}
23 """
24
25 def forward(self, function_samples, **kwargs):
26 output_probs = base_distributions.Normal(0, 1).cdf(function_samples)
27 return base_distributions.Bernoulli(probs=output_probs)
28
29 def log_marginal(self, observations, function_dist, *args, **kwargs):
30 marginal = self.marginal(function_dist, *args, **kwargs)
31 return marginal.log_prob(observations)
32
33 def marginal(self, function_dist, **kwargs):
34 mean = function_dist.mean
35 var = function_dist.variance
36 link = mean.div(torch.sqrt(1 + var))
37 output_probs = base_distributions.Normal(0, 1).cdf(link)
38 return base_distributions.Bernoulli(probs=output_probs)
39
40 def expected_log_prob(self, observations, function_dist, *params, **kwargs):
41 if torch.any(observations.eq(-1)):
42 # Remove after 1.0
43 warnings.warn(
44 "BernoulliLikelihood.expected_log_prob expects observations with labels in {0, 1}. "
45 "Observations with labels in {-1, 1} are deprecated.",
46 DeprecationWarning,
47 )
48 else:
49 observations = observations.mul(2).sub(1)
50 # Custom function here so we can use log_normal_cdf rather than Normal.cdf
51 # This is going to be less prone to overflow errors
52 log_prob_lambda = lambda function_samples: log_normal_cdf(function_samples.mul(observations))
53 log_prob = self.quadrature(log_prob_lambda, function_dist)
54 return log_prob
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gpytorch/likelihoods/bernoulli_likelihood.py b/gpytorch/likelihoods/bernoulli_likelihood.py
--- a/gpytorch/likelihoods/bernoulli_likelihood.py
+++ b/gpytorch/likelihoods/bernoulli_likelihood.py
@@ -18,8 +18,11 @@
.. math::
\begin{equation*}
- p(Y=y|f)=\Phi(yf)
+ p(Y=y|f)=\Phi((2y - 1)f)
\end{equation*}
+
+ .. note::
+ The labels should take values in {0, 1}.
"""
def forward(self, function_samples, **kwargs):
| {"golden_diff": "diff --git a/gpytorch/likelihoods/bernoulli_likelihood.py b/gpytorch/likelihoods/bernoulli_likelihood.py\n--- a/gpytorch/likelihoods/bernoulli_likelihood.py\n+++ b/gpytorch/likelihoods/bernoulli_likelihood.py\n@@ -18,8 +18,11 @@\n \n .. math::\n \\begin{equation*}\n- p(Y=y|f)=\\Phi(yf)\n+ p(Y=y|f)=\\Phi((2y - 1)f)\n \\end{equation*}\n+\n+ .. note::\n+ The labels should take values in {0, 1}.\n \"\"\"\n \n def forward(self, function_samples, **kwargs):\n", "issue": "[Docs] Bernoulli likelihoods \n# \ud83d\udcda Documentation/Examples\r\n\r\nIn the document for [Bernoulli likelihoods](https://docs.gpytorch.ai/en/stable/likelihoods.html), since the labels take value in {0, 1}, the likelihood should be \r\n\r\n$$p(Y=y | f) = \\Phi ((2y-1)f)$$\r\n\r\ninstead of the currently displayed\r\n\r\n$$p(Y=y|f) = \\Phi(yf).$$\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport warnings\n\nimport torch\n\nfrom ..distributions import base_distributions\nfrom ..functions import log_normal_cdf\nfrom .likelihood import _OneDimensionalLikelihood\n\n\nclass BernoulliLikelihood(_OneDimensionalLikelihood):\n r\"\"\"\n Implements the Bernoulli likelihood used for GP classification, using\n Probit regression (i.e., the latent function is warped to be in [0,1]\n using the standard Normal CDF :math:`\\Phi(x)`). Given the identity\n :math:`\\Phi(-x) = 1-\\Phi(x)`, we can write the likelihood compactly as:\n\n .. math::\n \\begin{equation*}\n p(Y=y|f)=\\Phi(yf)\n \\end{equation*}\n \"\"\"\n\n def forward(self, function_samples, **kwargs):\n output_probs = base_distributions.Normal(0, 1).cdf(function_samples)\n return base_distributions.Bernoulli(probs=output_probs)\n\n def log_marginal(self, observations, function_dist, *args, **kwargs):\n marginal = self.marginal(function_dist, *args, **kwargs)\n return marginal.log_prob(observations)\n\n def marginal(self, function_dist, **kwargs):\n mean = function_dist.mean\n var = function_dist.variance\n link = mean.div(torch.sqrt(1 + var))\n output_probs = base_distributions.Normal(0, 1).cdf(link)\n return base_distributions.Bernoulli(probs=output_probs)\n\n def expected_log_prob(self, observations, function_dist, *params, **kwargs):\n if torch.any(observations.eq(-1)):\n # Remove after 1.0\n warnings.warn(\n \"BernoulliLikelihood.expected_log_prob expects observations with labels in {0, 1}. \"\n \"Observations with labels in {-1, 1} are deprecated.\",\n DeprecationWarning,\n )\n else:\n observations = observations.mul(2).sub(1)\n # Custom function here so we can use log_normal_cdf rather than Normal.cdf\n # This is going to be less prone to overflow errors\n log_prob_lambda = lambda function_samples: log_normal_cdf(function_samples.mul(observations))\n log_prob = self.quadrature(log_prob_lambda, function_dist)\n return log_prob\n", "path": "gpytorch/likelihoods/bernoulli_likelihood.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport warnings\n\nimport torch\n\nfrom ..distributions import base_distributions\nfrom ..functions import log_normal_cdf\nfrom .likelihood import _OneDimensionalLikelihood\n\n\nclass BernoulliLikelihood(_OneDimensionalLikelihood):\n r\"\"\"\n Implements the Bernoulli likelihood used for GP classification, using\n Probit regression (i.e., the latent function is warped to be in [0,1]\n using the standard Normal CDF :math:`\\Phi(x)`). Given the identity\n :math:`\\Phi(-x) = 1-\\Phi(x)`, we can write the likelihood compactly as:\n\n .. math::\n \\begin{equation*}\n p(Y=y|f)=\\Phi((2y - 1)f)\n \\end{equation*}\n\n .. note::\n The labels should take values in {0, 1}.\n \"\"\"\n\n def forward(self, function_samples, **kwargs):\n output_probs = base_distributions.Normal(0, 1).cdf(function_samples)\n return base_distributions.Bernoulli(probs=output_probs)\n\n def log_marginal(self, observations, function_dist, *args, **kwargs):\n marginal = self.marginal(function_dist, *args, **kwargs)\n return marginal.log_prob(observations)\n\n def marginal(self, function_dist, **kwargs):\n mean = function_dist.mean\n var = function_dist.variance\n link = mean.div(torch.sqrt(1 + var))\n output_probs = base_distributions.Normal(0, 1).cdf(link)\n return base_distributions.Bernoulli(probs=output_probs)\n\n def expected_log_prob(self, observations, function_dist, *params, **kwargs):\n if torch.any(observations.eq(-1)):\n # Remove after 1.0\n warnings.warn(\n \"BernoulliLikelihood.expected_log_prob expects observations with labels in {0, 1}. \"\n \"Observations with labels in {-1, 1} are deprecated.\",\n DeprecationWarning,\n )\n else:\n observations = observations.mul(2).sub(1)\n # Custom function here so we can use log_normal_cdf rather than Normal.cdf\n # This is going to be less prone to overflow errors\n log_prob_lambda = lambda function_samples: log_normal_cdf(function_samples.mul(observations))\n log_prob = self.quadrature(log_prob_lambda, function_dist)\n return log_prob\n", "path": "gpytorch/likelihoods/bernoulli_likelihood.py"}]} | 978 | 159 |
gh_patches_debug_1415 | rasdani/github-patches | git_diff | bokeh__bokeh-1434 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tools get lost on Grid Plots
JS logic error prevents all tools from showing up in the toolbar. (cf. comment in #1342)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/plotting/file/grid.py`
Content:
```
1 import numpy as np
2
3 from bokeh.plotting import *
4
5 N = 50
6
7 x = np.linspace(0, 4*np.pi, N)
8 y = np.sin(x)
9
10 TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
11
12 l = figure(title="line", tools=TOOLS)
13 l.line(x,y, line_width=3, color="gold")
14
15 aw = figure(title="annular wedge", tools=TOOLS)
16 aw.annular_wedge(x, y, 10, 20, 0.6, 4.1, color="navy", alpha=0.5,
17 inner_radius_units="screen", outer_radius_units="screen")
18
19 bez = figure(title="bezier", tools=TOOLS)
20 bez.bezier(x, y, x+0.4, y, x+0.1, y+0.2, x-0.1, y-0.2,
21 line_width=2, color="olive")
22
23 q = figure(title="quad", tools=TOOLS)
24 q.quad(x, x-0.2, y, y-0.2, color="tomato", alpha=0.4)
25
26 p = gridplot([[l,aw],[bez,q]])
27
28 output_file("grid.html", title="grid.py example")
29 show(p)
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/plotting/file/grid.py b/examples/plotting/file/grid.py
--- a/examples/plotting/file/grid.py
+++ b/examples/plotting/file/grid.py
@@ -7,7 +7,7 @@
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
-TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
+TOOLS = "pan,wheel_zoom,box_zoom,reset,save,crosshair"
l = figure(title="line", tools=TOOLS)
l.line(x,y, line_width=3, color="gold")
| {"golden_diff": "diff --git a/examples/plotting/file/grid.py b/examples/plotting/file/grid.py\n--- a/examples/plotting/file/grid.py\n+++ b/examples/plotting/file/grid.py\n@@ -7,7 +7,7 @@\n x = np.linspace(0, 4*np.pi, N)\n y = np.sin(x)\n \n-TOOLS = \"pan,wheel_zoom,box_zoom,reset,save\"\n+TOOLS = \"pan,wheel_zoom,box_zoom,reset,save,crosshair\"\n \n l = figure(title=\"line\", tools=TOOLS)\n l.line(x,y, line_width=3, color=\"gold\")\n", "issue": "Tools get lost on Grid Plots\nJS logic error prevents all tools from showing up in the toolbar. (cf. comment in #1342)\n\n", "before_files": [{"content": "import numpy as np\n\nfrom bokeh.plotting import *\n\nN = 50\n\nx = np.linspace(0, 4*np.pi, N)\ny = np.sin(x)\n\nTOOLS = \"pan,wheel_zoom,box_zoom,reset,save\"\n\nl = figure(title=\"line\", tools=TOOLS)\nl.line(x,y, line_width=3, color=\"gold\")\n\naw = figure(title=\"annular wedge\", tools=TOOLS)\naw.annular_wedge(x, y, 10, 20, 0.6, 4.1, color=\"navy\", alpha=0.5,\n inner_radius_units=\"screen\", outer_radius_units=\"screen\")\n\nbez = figure(title=\"bezier\", tools=TOOLS)\nbez.bezier(x, y, x+0.4, y, x+0.1, y+0.2, x-0.1, y-0.2,\n line_width=2, color=\"olive\")\n\nq = figure(title=\"quad\", tools=TOOLS)\nq.quad(x, x-0.2, y, y-0.2, color=\"tomato\", alpha=0.4)\n\np = gridplot([[l,aw],[bez,q]])\n\noutput_file(\"grid.html\", title=\"grid.py example\")\nshow(p)\n", "path": "examples/plotting/file/grid.py"}], "after_files": [{"content": "import numpy as np\n\nfrom bokeh.plotting import *\n\nN = 50\n\nx = np.linspace(0, 4*np.pi, N)\ny = np.sin(x)\n\nTOOLS = \"pan,wheel_zoom,box_zoom,reset,save,crosshair\"\n\nl = figure(title=\"line\", tools=TOOLS)\nl.line(x,y, line_width=3, color=\"gold\")\n\naw = figure(title=\"annular wedge\", tools=TOOLS)\naw.annular_wedge(x, y, 10, 20, 0.6, 4.1, color=\"navy\", alpha=0.5,\n inner_radius_units=\"screen\", outer_radius_units=\"screen\")\n\nbez = figure(title=\"bezier\", tools=TOOLS)\nbez.bezier(x, y, x+0.4, y, x+0.1, y+0.2, x-0.1, y-0.2,\n line_width=2, color=\"olive\")\n\nq = figure(title=\"quad\", tools=TOOLS)\nq.quad(x, x-0.2, y, y-0.2, color=\"tomato\", alpha=0.4)\n\np = gridplot([[l,aw],[bez,q]])\n\noutput_file(\"grid.html\", title=\"grid.py example\")\nshow(p)\n", "path": "examples/plotting/file/grid.py"}]} | 628 | 132 |
gh_patches_debug_2061 | rasdani/github-patches | git_diff | plotly__dash-565 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New version of dash_renderer is not automatically installed with Dash 0.36.0
Deploying apps on Dash Deployment Server results in `dash-renderer` not being updated if it is already installed (even if that version is `0.16.x` and the Dash version is specified as `0.36.0`. This causes an `Error loading dependencies`, as `dash-renderer` attempts to attach event handlers to Dash events, which don't exist any more.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import io
2 from setuptools import setup, find_packages
3
4 main_ns = {}
5 exec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used
6
7 setup(
8 name='dash',
9 version=main_ns['__version__'],
10 author='chris p',
11 author_email='[email protected]',
12 packages=find_packages(exclude=['tests*']),
13 include_package_data=True,
14 license='MIT',
15 description=('A Python framework for building reactive web-apps. '
16 'Developed by Plotly.'),
17 long_description=io.open('README.md', encoding='utf-8').read(),
18 long_description_content_type='text/markdown',
19 install_requires=[
20 'Flask>=0.12',
21 'flask-compress',
22 'plotly',
23 'dash_renderer',
24 ],
25 entry_points={
26 'console_scripts': [
27 'dash-generate-components ='
28 ' dash.development.component_generator:cli'
29 ]
30 },
31 url='https://plot.ly/dash',
32 classifiers=[
33 'Development Status :: 5 - Production/Stable',
34 'Environment :: Web Environment',
35 'Framework :: Flask',
36 'Intended Audience :: Developers',
37 'Intended Audience :: Education',
38 'Intended Audience :: Financial and Insurance Industry',
39 'Intended Audience :: Healthcare Industry',
40 'Intended Audience :: Manufacturing',
41 'Intended Audience :: Science/Research',
42 'License :: OSI Approved :: MIT License',
43 'Programming Language :: Python',
44 'Programming Language :: Python :: 2',
45 'Programming Language :: Python :: 2.7',
46 'Programming Language :: Python :: 3',
47 'Programming Language :: Python :: 3.3',
48 'Programming Language :: Python :: 3.4',
49 'Programming Language :: Python :: 3.5',
50 'Programming Language :: Python :: 3.6',
51 'Programming Language :: Python :: 3.7',
52 'Topic :: Database :: Front-Ends',
53 'Topic :: Office/Business :: Financial :: Spreadsheet',
54 'Topic :: Scientific/Engineering :: Visualization',
55 'Topic :: Software Development :: Libraries :: Application Frameworks',
56 'Topic :: Software Development :: Widget Sets'
57 ]
58 )
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,10 @@
'Flask>=0.12',
'flask-compress',
'plotly',
- 'dash_renderer',
+ 'dash_renderer==0.17.0',
+ 'dash-core-components==0.43.0',
+ 'dash-html-components==0.13.5',
+ 'dash-table==3.3.0'
],
entry_points={
'console_scripts': [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,10 @@\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n- 'dash_renderer',\n+ 'dash_renderer==0.17.0',\n+ 'dash-core-components==0.43.0',\n+ 'dash-html-components==0.13.5',\n+ 'dash-table==3.3.0'\n ],\n entry_points={\n 'console_scripts': [\n", "issue": "New version of dash_renderer is not automatically installed with Dash 0.36.0\nDeploying apps on Dash Deployment Server results in `dash-renderer` not being updated if it is already installed (even if that version is `0.16.x` and the Dash version is specified as `0.36.0`. This causes an `Error loading dependencies`, as `dash-renderer` attempts to attach event handlers to Dash events, which don't exist any more. \n", "before_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n include_package_data=True,\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer',\n ],\n entry_points={\n 'console_scripts': [\n 'dash-generate-components ='\n ' dash.development.component_generator:cli'\n ]\n },\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n include_package_data=True,\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer==0.17.0',\n 'dash-core-components==0.43.0',\n 'dash-html-components==0.13.5',\n 'dash-table==3.3.0'\n ],\n entry_points={\n 'console_scripts': [\n 'dash-generate-components ='\n ' dash.development.component_generator:cli'\n ]\n },\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py"}]} | 932 | 126 |
gh_patches_debug_10715 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2711 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Custom stylesheets for Akvo pages missing
The custom CSS that can be uploaded to an Akvo page site is currently not served.
This is due to the link to it accidentally being removed when the main stylesheet for RSR was supplemented with a second sheet for the new Results.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/context_processors.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the
6 Akvo RSR module. For additional details on the GNU license please see
7 < http://www.gnu.org/licenses/agpl.html >.
8 """
9
10 import django
11
12 from django.conf import settings
13 from django.core.exceptions import DisallowedHost
14 from django.contrib.sites.models import get_current_site
15
16
17 def extra_context(request, protocol="http"):
18 """Add information to the request context."""
19 try:
20 current_site = get_current_site(request)
21 except DisallowedHost:
22 current_site = None
23
24 django_version = django.get_version()
25 debug = getattr(settings, 'DEBUG', False)
26 deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')
27 deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')
28 deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')
29 deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')
30
31 return dict(
32 current_site=current_site,
33 django_version=django_version,
34 debug=debug,
35 deploy_tag=deploy_tag,
36 deploy_branch=deploy_branch,
37 deploy_commit_id=deploy_commit_id,
38 deploy_commit_full_id=deploy_commit_full_id
39 )
40
41
42 def get_current_path_without_lang(request):
43 """Return current path without lang."""
44 path = request.get_full_path()
45 path_bits = path.split('/')
46 path = '/'.join(path_bits[2:])
47 return {'current_path_without_lang': path}
48
49
50 def extra_pages_context(request):
51 """Add context information of an RSR Page."""
52 if request.rsr_page:
53 page = request.rsr_page
54 return {
55 'rsr_page': page,
56 'favicon': page.favicon,
57 'logo': page.logo,
58 'organisation': page.organisation,
59 'return_url': page.return_url,
60 'return_url_text': page.custom_return_url_text,
61 'stylesheet': page.stylesheet,
62 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),
63 'domain_url': '//{}'.format(settings.RSR_DOMAIN),
64 'no_facebook': not page.facebook_button,
65 'facebook_app_id': page.facebook_app_id,
66 'no_twitter': not page.twitter_button,
67 }
68
69 return {}
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py
--- a/akvo/rsr/context_processors.py
+++ b/akvo/rsr/context_processors.py
@@ -58,7 +58,7 @@
'organisation': page.organisation,
'return_url': page.return_url,
'return_url_text': page.custom_return_url_text,
- 'stylesheet': page.stylesheet,
+ 'page_stylesheet': page.stylesheet,
'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),
'domain_url': '//{}'.format(settings.RSR_DOMAIN),
'no_facebook': not page.facebook_button,
| {"golden_diff": "diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py\n--- a/akvo/rsr/context_processors.py\n+++ b/akvo/rsr/context_processors.py\n@@ -58,7 +58,7 @@\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n- 'stylesheet': page.stylesheet,\n+ 'page_stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n", "issue": "Custom stylesheets for Akvo pages missing\nThe custom CSS that can be uploaded to an Akvo page site is currently not served.\r\n\r\nThis is due to the link to it accidentally being removed when the main stylesheet for RSR was supplemented with a second sheet for the new Results.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please see\n< http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport django\n\nfrom django.conf import settings\nfrom django.core.exceptions import DisallowedHost\nfrom django.contrib.sites.models import get_current_site\n\n\ndef extra_context(request, protocol=\"http\"):\n \"\"\"Add information to the request context.\"\"\"\n try:\n current_site = get_current_site(request)\n except DisallowedHost:\n current_site = None\n\n django_version = django.get_version()\n debug = getattr(settings, 'DEBUG', False)\n deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')\n deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')\n\n return dict(\n current_site=current_site,\n django_version=django_version,\n debug=debug,\n deploy_tag=deploy_tag,\n deploy_branch=deploy_branch,\n deploy_commit_id=deploy_commit_id,\n deploy_commit_full_id=deploy_commit_full_id\n )\n\n\ndef get_current_path_without_lang(request):\n \"\"\"Return current path without lang.\"\"\"\n path = request.get_full_path()\n path_bits = path.split('/')\n path = '/'.join(path_bits[2:])\n return {'current_path_without_lang': path}\n\n\ndef extra_pages_context(request):\n \"\"\"Add context information of an RSR Page.\"\"\"\n if request.rsr_page:\n page = request.rsr_page\n return {\n 'rsr_page': page,\n 'favicon': page.favicon,\n 'logo': page.logo,\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n 'stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n 'facebook_app_id': page.facebook_app_id,\n 'no_twitter': not page.twitter_button,\n }\n\n return {}\n", "path": "akvo/rsr/context_processors.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please see\n< http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport django\n\nfrom django.conf import settings\nfrom django.core.exceptions import DisallowedHost\nfrom django.contrib.sites.models import get_current_site\n\n\ndef extra_context(request, protocol=\"http\"):\n \"\"\"Add information to the request context.\"\"\"\n try:\n current_site = get_current_site(request)\n except DisallowedHost:\n current_site = None\n\n django_version = django.get_version()\n debug = getattr(settings, 'DEBUG', False)\n deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')\n deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')\n\n return dict(\n current_site=current_site,\n django_version=django_version,\n debug=debug,\n deploy_tag=deploy_tag,\n deploy_branch=deploy_branch,\n deploy_commit_id=deploy_commit_id,\n deploy_commit_full_id=deploy_commit_full_id\n )\n\n\ndef get_current_path_without_lang(request):\n \"\"\"Return current path without lang.\"\"\"\n path = request.get_full_path()\n path_bits = path.split('/')\n path = '/'.join(path_bits[2:])\n return {'current_path_without_lang': path}\n\n\ndef extra_pages_context(request):\n \"\"\"Add context information of an RSR Page.\"\"\"\n if request.rsr_page:\n page = request.rsr_page\n return {\n 'rsr_page': page,\n 'favicon': page.favicon,\n 'logo': page.logo,\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n 'page_stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n 'facebook_app_id': page.facebook_app_id,\n 'no_twitter': not page.twitter_button,\n }\n\n return {}\n", "path": "akvo/rsr/context_processors.py"}]} | 956 | 145 |
gh_patches_debug_129 | rasdani/github-patches | git_diff | librosa__librosa-1839 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
librosa 0.10.2 is not compatible with matplotlib <3.5
***BEFORE POSTING A BUG REPORT*** Please look through [existing issues (both open and closed)](https://github.com/librosa/librosa/issues?q=is%3Aissue) to see if it's already been reported or fixed!
**librosa 0.10.2 is not imcompatible with matplotlib.colormap**
When I try to user librosa.display, it reports the following error: cannot import name 'colormaps' from 'matplotlib'

**error code**
<!--
Example:
```
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
-->
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Software versions***

**Additional context**
I have tried to change the version of matplotlib, but it does not work. And the versions I have tried are: 2.0.0, 3.0.0
librosa 0.10.2 is not compatible with matplotlib <3.5
***BEFORE POSTING A BUG REPORT*** Please look through [existing issues (both open and closed)](https://github.com/librosa/librosa/issues?q=is%3Aissue) to see if it's already been reported or fixed!
**librosa 0.10.2 is not imcompatible with matplotlib.colormap**
When I try to user librosa.display, it reports the following error: cannot import name 'colormaps' from 'matplotlib'

**error code**
<!--
Example:
```
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
-->
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Software versions***

**Additional context**
I have tried to change the version of matplotlib, but it does not work. And the versions I have tried are: 2.0.0, 3.0.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `librosa/version.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """Version info"""
4
5 import sys
6 import importlib
7
8 short_version = "0.10"
9 version = "0.10.2"
10
11
12 def __get_mod_version(modname):
13 try:
14 if modname in sys.modules:
15 mod = sys.modules[modname]
16 else:
17 mod = importlib.import_module(modname)
18 try:
19 return mod.__version__
20 except AttributeError:
21 return "installed, no version number available"
22
23 except ImportError:
24 return None
25
26
27 def show_versions() -> None:
28 """Return the version information for all librosa dependencies."""
29 core_deps = [
30 "audioread",
31 "numpy",
32 "scipy",
33 "sklearn",
34 "joblib",
35 "decorator",
36 "numba",
37 "soundfile",
38 "pooch",
39 "soxr",
40 "typing_extensions",
41 "lazy_loader",
42 "msgpack",
43 ]
44
45 extra_deps = [
46 "numpydoc",
47 "sphinx",
48 "sphinx_rtd_theme",
49 "matplotlib",
50 "sphinx_multiversion",
51 "sphinx_gallery",
52 "mir_eval",
53 "ipython",
54 "sphinxcontrib.rsvgconverter",
55 "pytest",
56 "pytest_mpl",
57 "pytest_cov",
58 "samplerate",
59 "resampy",
60 "presets",
61 "packaging",
62 ]
63
64 print("INSTALLED VERSIONS")
65 print("------------------")
66 print(f"python: {sys.version}\n")
67 print(f"librosa: {version}\n")
68 for dep in core_deps:
69 print("{}: {}".format(dep, __get_mod_version(dep)))
70 print("")
71 for dep in extra_deps:
72 print("{}: {}".format(dep, __get_mod_version(dep)))
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/librosa/version.py b/librosa/version.py
--- a/librosa/version.py
+++ b/librosa/version.py
@@ -6,7 +6,7 @@
import importlib
short_version = "0.10"
-version = "0.10.2"
+version = "0.10.2.post1"
def __get_mod_version(modname):
| {"golden_diff": "diff --git a/librosa/version.py b/librosa/version.py\n--- a/librosa/version.py\n+++ b/librosa/version.py\n@@ -6,7 +6,7 @@\n import importlib\n \n short_version = \"0.10\"\n-version = \"0.10.2\"\n+version = \"0.10.2.post1\"\n \n \n def __get_mod_version(modname):\n", "issue": "librosa 0.10.2 is not compatible with matplotlib <3.5\n***BEFORE POSTING A BUG REPORT*** Please look through [existing issues (both open and closed)](https://github.com/librosa/librosa/issues?q=is%3Aissue) to see if it's already been reported or fixed!\r\n\r\n\r\n**librosa 0.10.2 is not imcompatible with matplotlib.colormap**\r\n When I try to user librosa.display, it reports the following error: cannot import name 'colormaps' from 'matplotlib'\r\n\r\n\r\n\r\n\r\n**error code**\r\n<!--\r\nExample:\r\n```\r\nimport librosa.display\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n-->\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Software versions***\r\n\r\n\r\n\r\n**Additional context**\r\nI have tried to change the version of matplotlib, but it does not work. And the versions I have tried are: 2.0.0, 3.0.0\r\n\nlibrosa 0.10.2 is not compatible with matplotlib <3.5\n***BEFORE POSTING A BUG REPORT*** Please look through [existing issues (both open and closed)](https://github.com/librosa/librosa/issues?q=is%3Aissue) to see if it's already been reported or fixed!\r\n\r\n\r\n**librosa 0.10.2 is not imcompatible with matplotlib.colormap**\r\n When I try to user librosa.display, it reports the following error: cannot import name 'colormaps' from 'matplotlib'\r\n\r\n\r\n\r\n\r\n**error code**\r\n<!--\r\nExample:\r\n```\r\nimport librosa.display\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n-->\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Software versions***\r\n\r\n\r\n\r\n**Additional context**\r\nI have tried to change the version of matplotlib, but it does not work. And the versions I have tried are: 2.0.0, 3.0.0\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Version info\"\"\"\n\nimport sys\nimport importlib\n\nshort_version = \"0.10\"\nversion = \"0.10.2\"\n\n\ndef __get_mod_version(modname):\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n try:\n return mod.__version__\n except AttributeError:\n return \"installed, no version number available\"\n\n except ImportError:\n return None\n\n\ndef show_versions() -> None:\n \"\"\"Return the version information for all librosa dependencies.\"\"\"\n core_deps = [\n \"audioread\",\n \"numpy\",\n \"scipy\",\n \"sklearn\",\n \"joblib\",\n \"decorator\",\n \"numba\",\n \"soundfile\",\n \"pooch\",\n \"soxr\",\n \"typing_extensions\",\n \"lazy_loader\",\n \"msgpack\",\n ]\n\n extra_deps = [\n \"numpydoc\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"matplotlib\",\n \"sphinx_multiversion\",\n \"sphinx_gallery\",\n \"mir_eval\",\n \"ipython\",\n \"sphinxcontrib.rsvgconverter\",\n \"pytest\",\n \"pytest_mpl\",\n \"pytest_cov\",\n \"samplerate\",\n \"resampy\",\n \"presets\",\n \"packaging\",\n ]\n\n print(\"INSTALLED VERSIONS\")\n print(\"------------------\")\n print(f\"python: {sys.version}\\n\")\n print(f\"librosa: {version}\\n\")\n for dep in core_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n print(\"\")\n for dep in extra_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n", "path": "librosa/version.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Version info\"\"\"\n\nimport sys\nimport importlib\n\nshort_version = \"0.10\"\nversion = \"0.10.2.post1\"\n\n\ndef __get_mod_version(modname):\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n try:\n return mod.__version__\n except AttributeError:\n return \"installed, no version number available\"\n\n except ImportError:\n return None\n\n\ndef show_versions() -> None:\n \"\"\"Return the version information for all librosa dependencies.\"\"\"\n core_deps = [\n \"audioread\",\n \"numpy\",\n \"scipy\",\n \"sklearn\",\n \"joblib\",\n \"decorator\",\n \"numba\",\n \"soundfile\",\n \"pooch\",\n \"soxr\",\n \"typing_extensions\",\n \"lazy_loader\",\n \"msgpack\",\n ]\n\n extra_deps = [\n \"numpydoc\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"matplotlib\",\n \"sphinx_multiversion\",\n \"sphinx_gallery\",\n \"mir_eval\",\n \"ipython\",\n \"sphinxcontrib.rsvgconverter\",\n \"pytest\",\n \"pytest_mpl\",\n \"pytest_cov\",\n \"samplerate\",\n \"resampy\",\n \"presets\",\n \"packaging\",\n ]\n\n print(\"INSTALLED VERSIONS\")\n print(\"------------------\")\n print(f\"python: {sys.version}\\n\")\n print(f\"librosa: {version}\\n\")\n for dep in core_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n print(\"\")\n for dep in extra_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n", "path": "librosa/version.py"}]} | 1,473 | 87 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.