problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_63306 | rasdani/github-patches | git_diff | scikit-hep__pyhf-363 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
consolidation: add simplemodls to __all__
# Description
It would be nice if the snippet in the README could be shorter:
right now this is needed
```
import pyhf
import pyhf.simplemodels
pdf = pyhf.simplemodels.hepdata_like(signal_data=[12.0], bkg_data=[50.0], bkg_uncerts=[3.0])
CLs_obs = pyhf.utils.hypotest(1.0, [51] + pdf.config.auxdata, pdf)
```
whereas if we pre-import `simplemodels` it could be
```
import pyhf
pdf = pyhf.simplemodels.hepdata_like(signal_data=[12.0], bkg_data=[50.0], bkg_uncerts=[3.0])
CLs_obs = pyhf.utils.hypotest(1.0, [51] + pdf.config.auxdata, pdf)
```
since `simplemodels.py` doesn't add much code, i don't think it would slow down things a lot
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyhf/__init__.py`
Content:
```
1 from .tensor import BackendRetriever as tensor
2 from .optimize import OptimizerRetriever as optimize
3 from .version import __version__
4 from . import events
5
6 tensorlib = tensor.numpy_backend()
7 default_backend = tensorlib
8 optimizer = optimize.scipy_optimizer()
9 default_optimizer = optimizer
10
11
12 def get_backend():
13 """
14 Get the current backend and the associated optimizer
15
16 Example:
17 >>> import pyhf
18 >>> pyhf.get_backend()
19 (<pyhf.tensor.numpy_backend.numpy_backend object at 0x...>, <pyhf.optimize.opt_scipy.scipy_optimizer object at 0x...>)
20
21 Returns:
22 backend, optimizer
23 """
24 global tensorlib
25 global optimizer
26 return tensorlib, optimizer
27
28
29 @events.register('change_backend')
30 def set_backend(backend, custom_optimizer=None):
31 """
32 Set the backend and the associated optimizer
33
34 Example:
35 >>> import pyhf
36 >>> import tensorflow as tf
37 >>> pyhf.set_backend(pyhf.tensor.tensorflow_backend(session=tf.Session()))
38
39 Args:
40 backend: One of the supported pyhf backends: NumPy,
41 TensorFlow, PyTorch, and MXNet
42
43 Returns:
44 None
45 """
46 global tensorlib
47 global optimizer
48
49 # need to determine if the tensorlib changed or the optimizer changed for events
50 tensorlib_changed = bool(backend.name != tensorlib.name)
51 optimizer_changed = False
52
53 if backend.name == 'tensorflow':
54 new_optimizer = (
55 custom_optimizer if custom_optimizer else optimize.tflow_optimizer(backend)
56 )
57 if tensorlib.name == 'tensorflow':
58 tensorlib_changed |= bool(backend.session != tensorlib.session)
59 elif backend.name == 'pytorch':
60 new_optimizer = (
61 custom_optimizer
62 if custom_optimizer
63 else optimize.pytorch_optimizer(tensorlib=backend)
64 )
65 # TODO: Add support for mxnet_optimizer()
66 # elif tensorlib.name == 'mxnet':
67 # new_optimizer = custom_optimizer if custom_optimizer else mxnet_optimizer()
68 else:
69 new_optimizer = (
70 custom_optimizer if custom_optimizer else optimize.scipy_optimizer()
71 )
72
73 optimizer_changed = bool(optimizer != new_optimizer)
74 # set new backend
75 tensorlib = backend
76 optimizer = new_optimizer
77 # trigger events
78 if tensorlib_changed:
79 events.trigger("tensorlib_changed")()
80 if optimizer_changed:
81 events.trigger("optimizer_changed")()
82
83
84 from .pdf import Model
85
86 __all__ = ['Model', 'utils', 'modifiers', '__version__']
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyhf/__init__.py b/pyhf/__init__.py
--- a/pyhf/__init__.py
+++ b/pyhf/__init__.py
@@ -82,5 +82,6 @@
from .pdf import Model
+from . import simplemodels
-__all__ = ['Model', 'utils', 'modifiers', '__version__']
+__all__ = ['Model', 'utils', 'modifiers', 'simplemodels', '__version__']
| {"golden_diff": "diff --git a/pyhf/__init__.py b/pyhf/__init__.py\n--- a/pyhf/__init__.py\n+++ b/pyhf/__init__.py\n@@ -82,5 +82,6 @@\n \n \n from .pdf import Model\n+from . import simplemodels\n \n-__all__ = ['Model', 'utils', 'modifiers', '__version__']\n+__all__ = ['Model', 'utils', 'modifiers', 'simplemodels', '__version__']\n", "issue": "consolidation: add simplemodls to __all__\n# Description\r\n\r\nIt would be nice if the snippet in the README could be shorter:\r\n\r\nright now this is needed\r\n```\r\nimport pyhf\r\nimport pyhf.simplemodels\r\npdf = pyhf.simplemodels.hepdata_like(signal_data=[12.0], bkg_data=[50.0], bkg_uncerts=[3.0])\r\nCLs_obs = pyhf.utils.hypotest(1.0, [51] + pdf.config.auxdata, pdf)\r\n```\r\n\r\nwhereas if we pre-import `simplemodels` it could be \r\n```\r\nimport pyhf\r\npdf = pyhf.simplemodels.hepdata_like(signal_data=[12.0], bkg_data=[50.0], bkg_uncerts=[3.0])\r\nCLs_obs = pyhf.utils.hypotest(1.0, [51] + pdf.config.auxdata, pdf)\r\n```\r\n\r\nsince `simplemodels.py` doesn't add much code, i don't think it would slow down things a lot\n", "before_files": [{"content": "from .tensor import BackendRetriever as tensor\nfrom .optimize import OptimizerRetriever as optimize\nfrom .version import __version__\nfrom . import events\n\ntensorlib = tensor.numpy_backend()\ndefault_backend = tensorlib\noptimizer = optimize.scipy_optimizer()\ndefault_optimizer = optimizer\n\n\ndef get_backend():\n \"\"\"\n Get the current backend and the associated optimizer\n\n Example:\n >>> import pyhf\n >>> pyhf.get_backend()\n (<pyhf.tensor.numpy_backend.numpy_backend object at 0x...>, <pyhf.optimize.opt_scipy.scipy_optimizer object at 0x...>)\n\n Returns:\n backend, optimizer\n \"\"\"\n global tensorlib\n global optimizer\n return tensorlib, optimizer\n\n\[email protected]('change_backend')\ndef set_backend(backend, custom_optimizer=None):\n \"\"\"\n Set the backend and the associated optimizer\n\n Example:\n >>> import pyhf\n >>> import tensorflow as tf\n >>> pyhf.set_backend(pyhf.tensor.tensorflow_backend(session=tf.Session()))\n\n Args:\n backend: One of the supported pyhf backends: NumPy,\n TensorFlow, PyTorch, and MXNet\n\n Returns:\n None\n \"\"\"\n global tensorlib\n global optimizer\n\n # need to determine if the tensorlib changed or the optimizer changed for events\n tensorlib_changed = bool(backend.name != tensorlib.name)\n optimizer_changed = False\n\n if backend.name == 'tensorflow':\n new_optimizer = (\n custom_optimizer if custom_optimizer else optimize.tflow_optimizer(backend)\n )\n if tensorlib.name == 'tensorflow':\n tensorlib_changed |= bool(backend.session != tensorlib.session)\n elif backend.name == 'pytorch':\n new_optimizer = (\n custom_optimizer\n if custom_optimizer\n else optimize.pytorch_optimizer(tensorlib=backend)\n )\n # TODO: Add support for mxnet_optimizer()\n # elif tensorlib.name == 'mxnet':\n # new_optimizer = custom_optimizer if custom_optimizer else mxnet_optimizer()\n else:\n new_optimizer = (\n custom_optimizer if custom_optimizer else optimize.scipy_optimizer()\n )\n\n optimizer_changed = bool(optimizer != new_optimizer)\n # set new backend\n tensorlib = backend\n optimizer = new_optimizer\n # trigger events\n if tensorlib_changed:\n events.trigger(\"tensorlib_changed\")()\n if optimizer_changed:\n events.trigger(\"optimizer_changed\")()\n\n\nfrom .pdf import Model\n\n__all__ = ['Model', 'utils', 'modifiers', '__version__']\n", "path": "pyhf/__init__.py"}], "after_files": [{"content": "from .tensor import BackendRetriever as tensor\nfrom .optimize import OptimizerRetriever as optimize\nfrom .version import __version__\nfrom . import events\n\ntensorlib = tensor.numpy_backend()\ndefault_backend = tensorlib\noptimizer = optimize.scipy_optimizer()\ndefault_optimizer = optimizer\n\n\ndef get_backend():\n \"\"\"\n Get the current backend and the associated optimizer\n\n Example:\n >>> import pyhf\n >>> pyhf.get_backend()\n (<pyhf.tensor.numpy_backend.numpy_backend object at 0x...>, <pyhf.optimize.opt_scipy.scipy_optimizer object at 0x...>)\n\n Returns:\n backend, optimizer\n \"\"\"\n global tensorlib\n global optimizer\n return tensorlib, optimizer\n\n\[email protected]('change_backend')\ndef set_backend(backend, custom_optimizer=None):\n \"\"\"\n Set the backend and the associated optimizer\n\n Example:\n >>> import pyhf\n >>> import tensorflow as tf\n >>> pyhf.set_backend(pyhf.tensor.tensorflow_backend(session=tf.Session()))\n\n Args:\n backend: One of the supported pyhf backends: NumPy,\n TensorFlow, PyTorch, and MXNet\n\n Returns:\n None\n \"\"\"\n global tensorlib\n global optimizer\n\n # need to determine if the tensorlib changed or the optimizer changed for events\n tensorlib_changed = bool(backend.name != tensorlib.name)\n optimizer_changed = False\n\n if backend.name == 'tensorflow':\n new_optimizer = (\n custom_optimizer if custom_optimizer else optimize.tflow_optimizer(backend)\n )\n if tensorlib.name == 'tensorflow':\n tensorlib_changed |= bool(backend.session != tensorlib.session)\n elif backend.name == 'pytorch':\n new_optimizer = (\n custom_optimizer\n if custom_optimizer\n else optimize.pytorch_optimizer(tensorlib=backend)\n )\n # TODO: Add support for mxnet_optimizer()\n # elif tensorlib.name == 'mxnet':\n # new_optimizer = custom_optimizer if custom_optimizer else mxnet_optimizer()\n else:\n new_optimizer = (\n custom_optimizer if custom_optimizer else optimize.scipy_optimizer()\n )\n\n optimizer_changed = bool(optimizer != new_optimizer)\n # set new backend\n tensorlib = backend\n optimizer = new_optimizer\n # trigger events\n if tensorlib_changed:\n events.trigger(\"tensorlib_changed\")()\n if optimizer_changed:\n events.trigger(\"optimizer_changed\")()\n\n\nfrom .pdf import Model\nfrom . import simplemodels\n\n__all__ = ['Model', 'utils', 'modifiers', 'simplemodels', '__version__']\n", "path": "pyhf/__init__.py"}]} | 1,190 | 102 |
gh_patches_debug_27047 | rasdani/github-patches | git_diff | encode__starlette-761 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Background tasks exception handler
Currently, if a background task faces an exception, it isn't handled and there's no way to handle it (rather than wrapping the task)
My suggestion is to be able to add exception handler for background tasks, as there is for requests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/middleware/errors.py`
Content:
```
1 import asyncio
2 import html
3 import inspect
4 import traceback
5 import typing
6
7 from starlette.concurrency import run_in_threadpool
8 from starlette.requests import Request
9 from starlette.responses import HTMLResponse, PlainTextResponse, Response
10 from starlette.types import ASGIApp, Message, Receive, Scope, Send
11
12 STYLES = """
13 p {
14 color: #211c1c;
15 }
16 .traceback-container {
17 border: 1px solid #038BB8;
18 }
19 .traceback-title {
20 background-color: #038BB8;
21 color: lemonchiffon;
22 padding: 12px;
23 font-size: 20px;
24 margin-top: 0px;
25 }
26 .frame-line {
27 padding-left: 10px;
28 font-family: monospace;
29 }
30 .frame-filename {
31 font-family: monospace;
32 }
33 .center-line {
34 background-color: #038BB8;
35 color: #f9f6e1;
36 padding: 5px 0px 5px 5px;
37 }
38 .lineno {
39 margin-right: 5px;
40 }
41 .frame-title {
42 font-weight: unset;
43 padding: 10px 10px 10px 10px;
44 background-color: #E4F4FD;
45 margin-right: 10px;
46 color: #191f21;
47 font-size: 17px;
48 border: 1px solid #c7dce8;
49 }
50 .collapse-btn {
51 float: right;
52 padding: 0px 5px 1px 5px;
53 border: solid 1px #96aebb;
54 cursor: pointer;
55 }
56 .collapsed {
57 display: none;
58 }
59 .source-code {
60 font-family: courier;
61 font-size: small;
62 padding-bottom: 10px;
63 }
64 """
65
66 JS = """
67 <script type="text/javascript">
68 function collapse(element){
69 const frameId = element.getAttribute("data-frame-id");
70 const frame = document.getElementById(frameId);
71
72 if (frame.classList.contains("collapsed")){
73 element.innerHTML = "‒";
74 frame.classList.remove("collapsed");
75 } else {
76 element.innerHTML = "+";
77 frame.classList.add("collapsed");
78 }
79 }
80 </script>
81 """
82
83 TEMPLATE = """
84 <html>
85 <head>
86 <style type='text/css'>
87 {styles}
88 </style>
89 <title>Starlette Debugger</title>
90 </head>
91 <body>
92 <h1>500 Server Error</h1>
93 <h2>{error}</h2>
94 <div class="traceback-container">
95 <p class="traceback-title">Traceback</p>
96 <div>{exc_html}</div>
97 </div>
98 {js}
99 </body>
100 </html>
101 """
102
103 FRAME_TEMPLATE = """
104 <div>
105 <p class="frame-title">File <span class="frame-filename">{frame_filename}</span>,
106 line <i>{frame_lineno}</i>,
107 in <b>{frame_name}</b>
108 <span class="collapse-btn" data-frame-id="{frame_filename}-{frame_lineno}" onclick="collapse(this)">{collapse_button}</span>
109 </p>
110 <div id="{frame_filename}-{frame_lineno}" class="source-code {collapsed}">{code_context}</div>
111 </div>
112 """ # noqa: E501
113
114 LINE = """
115 <p><span class="frame-line">
116 <span class="lineno">{lineno}.</span> {line}</span></p>
117 """
118
119 CENTER_LINE = """
120 <p class="center-line"><span class="frame-line center-line">
121 <span class="lineno">{lineno}.</span> {line}</span></p>
122 """
123
124
125 class ServerErrorMiddleware:
126 """
127 Handles returning 500 responses when a server error occurs.
128
129 If 'debug' is set, then traceback responses will be returned,
130 otherwise the designated 'handler' will be called.
131
132 This middleware class should generally be used to wrap *everything*
133 else up, so that unhandled exceptions anywhere in the stack
134 always result in an appropriate 500 response.
135 """
136
137 def __init__(
138 self, app: ASGIApp, handler: typing.Callable = None, debug: bool = False
139 ) -> None:
140 self.app = app
141 self.handler = handler
142 self.debug = debug
143
144 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
145 if scope["type"] != "http":
146 await self.app(scope, receive, send)
147 return
148
149 response_started = False
150
151 async def _send(message: Message) -> None:
152 nonlocal response_started, send
153
154 if message["type"] == "http.response.start":
155 response_started = True
156 await send(message)
157
158 try:
159 await self.app(scope, receive, _send)
160 except Exception as exc:
161 if not response_started:
162 request = Request(scope)
163 if self.debug:
164 # In debug mode, return traceback responses.
165 response = self.debug_response(request, exc)
166 elif self.handler is None:
167 # Use our default 500 error handler.
168 response = self.error_response(request, exc)
169 else:
170 # Use an installed 500 error handler.
171 if asyncio.iscoroutinefunction(self.handler):
172 response = await self.handler(request, exc)
173 else:
174 response = await run_in_threadpool(self.handler, request, exc)
175
176 await response(scope, receive, send)
177
178 # We always continue to raise the exception.
179 # This allows servers to log the error, or allows test clients
180 # to optionally raise the error within the test case.
181 raise exc
182
183 def format_line(
184 self, index: int, line: str, frame_lineno: int, frame_index: int
185 ) -> str:
186 values = {
187 # HTML escape - line could contain < or >
188 "line": html.escape(line).replace(" ", " "),
189 "lineno": (frame_lineno - frame_index) + index,
190 }
191
192 if index != frame_index:
193 return LINE.format(**values)
194 return CENTER_LINE.format(**values)
195
196 def generate_frame_html(self, frame: inspect.FrameInfo, is_collapsed: bool) -> str:
197 code_context = "".join(
198 self.format_line(index, line, frame.lineno, frame.index) # type: ignore
199 for index, line in enumerate(frame.code_context or [])
200 )
201
202 values = {
203 # HTML escape - filename could contain < or >, especially if it's a virtual
204 # file e.g. <stdin> in the REPL
205 "frame_filename": html.escape(frame.filename),
206 "frame_lineno": frame.lineno,
207 # HTML escape - if you try very hard it's possible to name a function with <
208 # or >
209 "frame_name": html.escape(frame.function),
210 "code_context": code_context,
211 "collapsed": "collapsed" if is_collapsed else "",
212 "collapse_button": "+" if is_collapsed else "‒",
213 }
214 return FRAME_TEMPLATE.format(**values)
215
216 def generate_html(self, exc: Exception, limit: int = 7) -> str:
217 traceback_obj = traceback.TracebackException.from_exception(
218 exc, capture_locals=True
219 )
220
221 exc_html = ""
222 is_collapsed = False
223 exc_traceback = exc.__traceback__
224 if exc_traceback is not None:
225 frames = inspect.getinnerframes(exc_traceback, limit)
226 for frame in reversed(frames):
227 exc_html += self.generate_frame_html(frame, is_collapsed)
228 is_collapsed = True
229
230 # escape error class and text
231 error = (
232 f"{html.escape(traceback_obj.exc_type.__name__)}: "
233 f"{html.escape(str(traceback_obj))}"
234 )
235
236 return TEMPLATE.format(styles=STYLES, js=JS, error=error, exc_html=exc_html)
237
238 def generate_plain_text(self, exc: Exception) -> str:
239 return "".join(traceback.format_exception(type(exc), exc, exc.__traceback__))
240
241 def debug_response(self, request: Request, exc: Exception) -> Response:
242 accept = request.headers.get("accept", "")
243
244 if "text/html" in accept:
245 content = self.generate_html(exc)
246 return HTMLResponse(content, status_code=500)
247 content = self.generate_plain_text(exc)
248 return PlainTextResponse(content, status_code=500)
249
250 def error_response(self, request: Request, exc: Exception) -> Response:
251 return PlainTextResponse("Internal Server Error", status_code=500)
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/middleware/errors.py b/starlette/middleware/errors.py
--- a/starlette/middleware/errors.py
+++ b/starlette/middleware/errors.py
@@ -158,21 +158,21 @@
try:
await self.app(scope, receive, _send)
except Exception as exc:
- if not response_started:
- request = Request(scope)
- if self.debug:
- # In debug mode, return traceback responses.
- response = self.debug_response(request, exc)
- elif self.handler is None:
- # Use our default 500 error handler.
- response = self.error_response(request, exc)
+ request = Request(scope)
+ if self.debug:
+ # In debug mode, return traceback responses.
+ response = self.debug_response(request, exc)
+ elif self.handler is None:
+ # Use our default 500 error handler.
+ response = self.error_response(request, exc)
+ else:
+ # Use an installed 500 error handler.
+ if asyncio.iscoroutinefunction(self.handler):
+ response = await self.handler(request, exc)
else:
- # Use an installed 500 error handler.
- if asyncio.iscoroutinefunction(self.handler):
- response = await self.handler(request, exc)
- else:
- response = await run_in_threadpool(self.handler, request, exc)
+ response = await run_in_threadpool(self.handler, request, exc)
+ if not response_started:
await response(scope, receive, send)
# We always continue to raise the exception.
| {"golden_diff": "diff --git a/starlette/middleware/errors.py b/starlette/middleware/errors.py\n--- a/starlette/middleware/errors.py\n+++ b/starlette/middleware/errors.py\n@@ -158,21 +158,21 @@\n try:\n await self.app(scope, receive, _send)\n except Exception as exc:\n- if not response_started:\n- request = Request(scope)\n- if self.debug:\n- # In debug mode, return traceback responses.\n- response = self.debug_response(request, exc)\n- elif self.handler is None:\n- # Use our default 500 error handler.\n- response = self.error_response(request, exc)\n+ request = Request(scope)\n+ if self.debug:\n+ # In debug mode, return traceback responses.\n+ response = self.debug_response(request, exc)\n+ elif self.handler is None:\n+ # Use our default 500 error handler.\n+ response = self.error_response(request, exc)\n+ else:\n+ # Use an installed 500 error handler.\n+ if asyncio.iscoroutinefunction(self.handler):\n+ response = await self.handler(request, exc)\n else:\n- # Use an installed 500 error handler.\n- if asyncio.iscoroutinefunction(self.handler):\n- response = await self.handler(request, exc)\n- else:\n- response = await run_in_threadpool(self.handler, request, exc)\n+ response = await run_in_threadpool(self.handler, request, exc)\n \n+ if not response_started:\n await response(scope, receive, send)\n \n # We always continue to raise the exception.\n", "issue": "Background tasks exception handler\nCurrently, if a background task faces an exception, it isn't handled and there's no way to handle it (rather than wrapping the task)\r\nMy suggestion is to be able to add exception handler for background tasks, as there is for requests.\n", "before_files": [{"content": "import asyncio\nimport html\nimport inspect\nimport traceback\nimport typing\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.requests import Request\nfrom starlette.responses import HTMLResponse, PlainTextResponse, Response\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\nSTYLES = \"\"\"\np {\n color: #211c1c;\n}\n.traceback-container {\n border: 1px solid #038BB8;\n}\n.traceback-title {\n background-color: #038BB8;\n color: lemonchiffon;\n padding: 12px;\n font-size: 20px;\n margin-top: 0px;\n}\n.frame-line {\n padding-left: 10px;\n font-family: monospace;\n}\n.frame-filename {\n font-family: monospace;\n}\n.center-line {\n background-color: #038BB8;\n color: #f9f6e1;\n padding: 5px 0px 5px 5px;\n}\n.lineno {\n margin-right: 5px;\n}\n.frame-title {\n font-weight: unset;\n padding: 10px 10px 10px 10px;\n background-color: #E4F4FD;\n margin-right: 10px;\n color: #191f21;\n font-size: 17px;\n border: 1px solid #c7dce8;\n}\n.collapse-btn {\n float: right;\n padding: 0px 5px 1px 5px;\n border: solid 1px #96aebb;\n cursor: pointer;\n}\n.collapsed {\n display: none;\n}\n.source-code {\n font-family: courier;\n font-size: small;\n padding-bottom: 10px;\n}\n\"\"\"\n\nJS = \"\"\"\n<script type=\"text/javascript\">\n function collapse(element){\n const frameId = element.getAttribute(\"data-frame-id\");\n const frame = document.getElementById(frameId);\n\n if (frame.classList.contains(\"collapsed\")){\n element.innerHTML = \"‒\";\n frame.classList.remove(\"collapsed\");\n } else {\n element.innerHTML = \"+\";\n frame.classList.add(\"collapsed\");\n }\n }\n</script>\n\"\"\"\n\nTEMPLATE = \"\"\"\n<html>\n <head>\n <style type='text/css'>\n {styles}\n </style>\n <title>Starlette Debugger</title>\n </head>\n <body>\n <h1>500 Server Error</h1>\n <h2>{error}</h2>\n <div class=\"traceback-container\">\n <p class=\"traceback-title\">Traceback</p>\n <div>{exc_html}</div>\n </div>\n {js}\n </body>\n</html>\n\"\"\"\n\nFRAME_TEMPLATE = \"\"\"\n<div>\n <p class=\"frame-title\">File <span class=\"frame-filename\">{frame_filename}</span>,\n line <i>{frame_lineno}</i>,\n in <b>{frame_name}</b>\n <span class=\"collapse-btn\" data-frame-id=\"{frame_filename}-{frame_lineno}\" onclick=\"collapse(this)\">{collapse_button}</span>\n </p>\n <div id=\"{frame_filename}-{frame_lineno}\" class=\"source-code {collapsed}\">{code_context}</div>\n</div>\n\"\"\" # noqa: E501\n\nLINE = \"\"\"\n<p><span class=\"frame-line\">\n<span class=\"lineno\">{lineno}.</span> {line}</span></p>\n\"\"\"\n\nCENTER_LINE = \"\"\"\n<p class=\"center-line\"><span class=\"frame-line center-line\">\n<span class=\"lineno\">{lineno}.</span> {line}</span></p>\n\"\"\"\n\n\nclass ServerErrorMiddleware:\n \"\"\"\n Handles returning 500 responses when a server error occurs.\n\n If 'debug' is set, then traceback responses will be returned,\n otherwise the designated 'handler' will be called.\n\n This middleware class should generally be used to wrap *everything*\n else up, so that unhandled exceptions anywhere in the stack\n always result in an appropriate 500 response.\n \"\"\"\n\n def __init__(\n self, app: ASGIApp, handler: typing.Callable = None, debug: bool = False\n ) -> None:\n self.app = app\n self.handler = handler\n self.debug = debug\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n response_started = False\n\n async def _send(message: Message) -> None:\n nonlocal response_started, send\n\n if message[\"type\"] == \"http.response.start\":\n response_started = True\n await send(message)\n\n try:\n await self.app(scope, receive, _send)\n except Exception as exc:\n if not response_started:\n request = Request(scope)\n if self.debug:\n # In debug mode, return traceback responses.\n response = self.debug_response(request, exc)\n elif self.handler is None:\n # Use our default 500 error handler.\n response = self.error_response(request, exc)\n else:\n # Use an installed 500 error handler.\n if asyncio.iscoroutinefunction(self.handler):\n response = await self.handler(request, exc)\n else:\n response = await run_in_threadpool(self.handler, request, exc)\n\n await response(scope, receive, send)\n\n # We always continue to raise the exception.\n # This allows servers to log the error, or allows test clients\n # to optionally raise the error within the test case.\n raise exc\n\n def format_line(\n self, index: int, line: str, frame_lineno: int, frame_index: int\n ) -> str:\n values = {\n # HTML escape - line could contain < or >\n \"line\": html.escape(line).replace(\" \", \" \"),\n \"lineno\": (frame_lineno - frame_index) + index,\n }\n\n if index != frame_index:\n return LINE.format(**values)\n return CENTER_LINE.format(**values)\n\n def generate_frame_html(self, frame: inspect.FrameInfo, is_collapsed: bool) -> str:\n code_context = \"\".join(\n self.format_line(index, line, frame.lineno, frame.index) # type: ignore\n for index, line in enumerate(frame.code_context or [])\n )\n\n values = {\n # HTML escape - filename could contain < or >, especially if it's a virtual\n # file e.g. <stdin> in the REPL\n \"frame_filename\": html.escape(frame.filename),\n \"frame_lineno\": frame.lineno,\n # HTML escape - if you try very hard it's possible to name a function with <\n # or >\n \"frame_name\": html.escape(frame.function),\n \"code_context\": code_context,\n \"collapsed\": \"collapsed\" if is_collapsed else \"\",\n \"collapse_button\": \"+\" if is_collapsed else \"‒\",\n }\n return FRAME_TEMPLATE.format(**values)\n\n def generate_html(self, exc: Exception, limit: int = 7) -> str:\n traceback_obj = traceback.TracebackException.from_exception(\n exc, capture_locals=True\n )\n\n exc_html = \"\"\n is_collapsed = False\n exc_traceback = exc.__traceback__\n if exc_traceback is not None:\n frames = inspect.getinnerframes(exc_traceback, limit)\n for frame in reversed(frames):\n exc_html += self.generate_frame_html(frame, is_collapsed)\n is_collapsed = True\n\n # escape error class and text\n error = (\n f\"{html.escape(traceback_obj.exc_type.__name__)}: \"\n f\"{html.escape(str(traceback_obj))}\"\n )\n\n return TEMPLATE.format(styles=STYLES, js=JS, error=error, exc_html=exc_html)\n\n def generate_plain_text(self, exc: Exception) -> str:\n return \"\".join(traceback.format_exception(type(exc), exc, exc.__traceback__))\n\n def debug_response(self, request: Request, exc: Exception) -> Response:\n accept = request.headers.get(\"accept\", \"\")\n\n if \"text/html\" in accept:\n content = self.generate_html(exc)\n return HTMLResponse(content, status_code=500)\n content = self.generate_plain_text(exc)\n return PlainTextResponse(content, status_code=500)\n\n def error_response(self, request: Request, exc: Exception) -> Response:\n return PlainTextResponse(\"Internal Server Error\", status_code=500)\n", "path": "starlette/middleware/errors.py"}], "after_files": [{"content": "import asyncio\nimport html\nimport inspect\nimport traceback\nimport typing\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.requests import Request\nfrom starlette.responses import HTMLResponse, PlainTextResponse, Response\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\nSTYLES = \"\"\"\np {\n color: #211c1c;\n}\n.traceback-container {\n border: 1px solid #038BB8;\n}\n.traceback-title {\n background-color: #038BB8;\n color: lemonchiffon;\n padding: 12px;\n font-size: 20px;\n margin-top: 0px;\n}\n.frame-line {\n padding-left: 10px;\n font-family: monospace;\n}\n.frame-filename {\n font-family: monospace;\n}\n.center-line {\n background-color: #038BB8;\n color: #f9f6e1;\n padding: 5px 0px 5px 5px;\n}\n.lineno {\n margin-right: 5px;\n}\n.frame-title {\n font-weight: unset;\n padding: 10px 10px 10px 10px;\n background-color: #E4F4FD;\n margin-right: 10px;\n color: #191f21;\n font-size: 17px;\n border: 1px solid #c7dce8;\n}\n.collapse-btn {\n float: right;\n padding: 0px 5px 1px 5px;\n border: solid 1px #96aebb;\n cursor: pointer;\n}\n.collapsed {\n display: none;\n}\n.source-code {\n font-family: courier;\n font-size: small;\n padding-bottom: 10px;\n}\n\"\"\"\n\nJS = \"\"\"\n<script type=\"text/javascript\">\n function collapse(element){\n const frameId = element.getAttribute(\"data-frame-id\");\n const frame = document.getElementById(frameId);\n\n if (frame.classList.contains(\"collapsed\")){\n element.innerHTML = \"‒\";\n frame.classList.remove(\"collapsed\");\n } else {\n element.innerHTML = \"+\";\n frame.classList.add(\"collapsed\");\n }\n }\n</script>\n\"\"\"\n\nTEMPLATE = \"\"\"\n<html>\n <head>\n <style type='text/css'>\n {styles}\n </style>\n <title>Starlette Debugger</title>\n </head>\n <body>\n <h1>500 Server Error</h1>\n <h2>{error}</h2>\n <div class=\"traceback-container\">\n <p class=\"traceback-title\">Traceback</p>\n <div>{exc_html}</div>\n </div>\n {js}\n </body>\n</html>\n\"\"\"\n\nFRAME_TEMPLATE = \"\"\"\n<div>\n <p class=\"frame-title\">File <span class=\"frame-filename\">{frame_filename}</span>,\n line <i>{frame_lineno}</i>,\n in <b>{frame_name}</b>\n <span class=\"collapse-btn\" data-frame-id=\"{frame_filename}-{frame_lineno}\" onclick=\"collapse(this)\">{collapse_button}</span>\n </p>\n <div id=\"{frame_filename}-{frame_lineno}\" class=\"source-code {collapsed}\">{code_context}</div>\n</div>\n\"\"\" # noqa: E501\n\nLINE = \"\"\"\n<p><span class=\"frame-line\">\n<span class=\"lineno\">{lineno}.</span> {line}</span></p>\n\"\"\"\n\nCENTER_LINE = \"\"\"\n<p class=\"center-line\"><span class=\"frame-line center-line\">\n<span class=\"lineno\">{lineno}.</span> {line}</span></p>\n\"\"\"\n\n\nclass ServerErrorMiddleware:\n \"\"\"\n Handles returning 500 responses when a server error occurs.\n\n If 'debug' is set, then traceback responses will be returned,\n otherwise the designated 'handler' will be called.\n\n This middleware class should generally be used to wrap *everything*\n else up, so that unhandled exceptions anywhere in the stack\n always result in an appropriate 500 response.\n \"\"\"\n\n def __init__(\n self, app: ASGIApp, handler: typing.Callable = None, debug: bool = False\n ) -> None:\n self.app = app\n self.handler = handler\n self.debug = debug\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n response_started = False\n\n async def _send(message: Message) -> None:\n nonlocal response_started, send\n\n if message[\"type\"] == \"http.response.start\":\n response_started = True\n await send(message)\n\n try:\n await self.app(scope, receive, _send)\n except Exception as exc:\n request = Request(scope)\n if self.debug:\n # In debug mode, return traceback responses.\n response = self.debug_response(request, exc)\n elif self.handler is None:\n # Use our default 500 error handler.\n response = self.error_response(request, exc)\n else:\n # Use an installed 500 error handler.\n if asyncio.iscoroutinefunction(self.handler):\n response = await self.handler(request, exc)\n else:\n response = await run_in_threadpool(self.handler, request, exc)\n\n if not response_started:\n await response(scope, receive, send)\n\n # We always continue to raise the exception.\n # This allows servers to log the error, or allows test clients\n # to optionally raise the error within the test case.\n raise exc\n\n def format_line(\n self, index: int, line: str, frame_lineno: int, frame_index: int\n ) -> str:\n values = {\n # HTML escape - line could contain < or >\n \"line\": html.escape(line).replace(\" \", \" \"),\n \"lineno\": (frame_lineno - frame_index) + index,\n }\n\n if index != frame_index:\n return LINE.format(**values)\n return CENTER_LINE.format(**values)\n\n def generate_frame_html(self, frame: inspect.FrameInfo, is_collapsed: bool) -> str:\n code_context = \"\".join(\n self.format_line(index, line, frame.lineno, frame.index) # type: ignore\n for index, line in enumerate(frame.code_context or [])\n )\n\n values = {\n # HTML escape - filename could contain < or >, especially if it's a virtual\n # file e.g. <stdin> in the REPL\n \"frame_filename\": html.escape(frame.filename),\n \"frame_lineno\": frame.lineno,\n # HTML escape - if you try very hard it's possible to name a function with <\n # or >\n \"frame_name\": html.escape(frame.function),\n \"code_context\": code_context,\n \"collapsed\": \"collapsed\" if is_collapsed else \"\",\n \"collapse_button\": \"+\" if is_collapsed else \"‒\",\n }\n return FRAME_TEMPLATE.format(**values)\n\n def generate_html(self, exc: Exception, limit: int = 7) -> str:\n traceback_obj = traceback.TracebackException.from_exception(\n exc, capture_locals=True\n )\n\n exc_html = \"\"\n is_collapsed = False\n exc_traceback = exc.__traceback__\n if exc_traceback is not None:\n frames = inspect.getinnerframes(exc_traceback, limit)\n for frame in reversed(frames):\n exc_html += self.generate_frame_html(frame, is_collapsed)\n is_collapsed = True\n\n # escape error class and text\n error = (\n f\"{html.escape(traceback_obj.exc_type.__name__)}: \"\n f\"{html.escape(str(traceback_obj))}\"\n )\n\n return TEMPLATE.format(styles=STYLES, js=JS, error=error, exc_html=exc_html)\n\n def generate_plain_text(self, exc: Exception) -> str:\n return \"\".join(traceback.format_exception(type(exc), exc, exc.__traceback__))\n\n def debug_response(self, request: Request, exc: Exception) -> Response:\n accept = request.headers.get(\"accept\", \"\")\n\n if \"text/html\" in accept:\n content = self.generate_html(exc)\n return HTMLResponse(content, status_code=500)\n content = self.generate_plain_text(exc)\n return PlainTextResponse(content, status_code=500)\n\n def error_response(self, request: Request, exc: Exception) -> Response:\n return PlainTextResponse(\"Internal Server Error\", status_code=500)\n", "path": "starlette/middleware/errors.py"}]} | 2,834 | 354 |
gh_patches_debug_27994 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1319 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot specify custom Python shebang for hook executable
I am working on a shared Python environment with `python3.6` as executable and I want the shebang to point to the correct executable `#!/usr/bin/env python3.6`. However, even though I specify
```
default_language_version:
python: python3.6
```
or `language_version: python3.6` in `hooks`, it always picks `#!/usr/bin/env python` as the shebang even though there is no `python` executable. Is there a way to specify a custom shebang?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/install_uninstall.py`
Content:
```
1 import itertools
2 import logging
3 import os.path
4 import shutil
5 import sys
6 from typing import Optional
7 from typing import Sequence
8 from typing import Tuple
9
10 from pre_commit import git
11 from pre_commit import output
12 from pre_commit.clientlib import load_config
13 from pre_commit.repository import all_hooks
14 from pre_commit.repository import install_hook_envs
15 from pre_commit.store import Store
16 from pre_commit.util import make_executable
17 from pre_commit.util import resource_text
18
19
20 logger = logging.getLogger(__name__)
21
22 # This is used to identify the hook file we install
23 PRIOR_HASHES = (
24 '4d9958c90bc262f47553e2c073f14cfe',
25 'd8ee923c46731b42cd95cc869add4062',
26 '49fd668cb42069aa1b6048464be5d395',
27 '79f09a650522a87b0da915d0d983b2de',
28 'e358c9dae00eac5d06b38dfdb1e33a8c',
29 )
30 CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
31 TEMPLATE_START = '# start templated\n'
32 TEMPLATE_END = '# end templated\n'
33
34
35 def _hook_paths(
36 hook_type: str,
37 git_dir: Optional[str] = None,
38 ) -> Tuple[str, str]:
39 git_dir = git_dir if git_dir is not None else git.get_git_dir()
40 pth = os.path.join(git_dir, 'hooks', hook_type)
41 return pth, f'{pth}.legacy'
42
43
44 def is_our_script(filename: str) -> bool:
45 if not os.path.exists(filename): # pragma: windows no cover (symlink)
46 return False
47 with open(filename) as f:
48 contents = f.read()
49 return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
50
51
52 def shebang() -> str:
53 if sys.platform == 'win32':
54 py = 'python'
55 else:
56 # Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`
57 path_choices = [p for p in os.defpath.split(os.pathsep) if p]
58 exe_choices = [
59 f'python{sys.version_info[0]}.{sys.version_info[1]}',
60 f'python{sys.version_info[0]}',
61 ]
62 for path, exe in itertools.product(path_choices, exe_choices):
63 if os.access(os.path.join(path, exe), os.X_OK):
64 py = exe
65 break
66 else:
67 py = 'python'
68 return f'#!/usr/bin/env {py}'
69
70
71 def _install_hook_script(
72 config_file: str,
73 hook_type: str,
74 overwrite: bool = False,
75 skip_on_missing_config: bool = False,
76 git_dir: Optional[str] = None,
77 ) -> None:
78 hook_path, legacy_path = _hook_paths(hook_type, git_dir=git_dir)
79
80 os.makedirs(os.path.dirname(hook_path), exist_ok=True)
81
82 # If we have an existing hook, move it to pre-commit.legacy
83 if os.path.lexists(hook_path) and not is_our_script(hook_path):
84 shutil.move(hook_path, legacy_path)
85
86 # If we specify overwrite, we simply delete the legacy file
87 if overwrite and os.path.exists(legacy_path):
88 os.remove(legacy_path)
89 elif os.path.exists(legacy_path):
90 output.write_line(
91 f'Running in migration mode with existing hooks at {legacy_path}\n'
92 f'Use -f to use only pre-commit.',
93 )
94
95 args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']
96 if skip_on_missing_config:
97 args.append('--skip-on-missing-config')
98 params = {'INSTALL_PYTHON': sys.executable, 'ARGS': args}
99
100 with open(hook_path, 'w') as hook_file:
101 contents = resource_text('hook-tmpl')
102 before, rest = contents.split(TEMPLATE_START)
103 to_template, after = rest.split(TEMPLATE_END)
104
105 before = before.replace('#!/usr/bin/env python3', shebang())
106
107 hook_file.write(before + TEMPLATE_START)
108 for line in to_template.splitlines():
109 var = line.split()[0]
110 hook_file.write(f'{var} = {params[var]!r}\n')
111 hook_file.write(TEMPLATE_END + after)
112 make_executable(hook_path)
113
114 output.write_line(f'pre-commit installed at {hook_path}')
115
116
117 def install(
118 config_file: str,
119 store: Store,
120 hook_types: Sequence[str],
121 overwrite: bool = False,
122 hooks: bool = False,
123 skip_on_missing_config: bool = False,
124 git_dir: Optional[str] = None,
125 ) -> int:
126 if git_dir is None and git.has_core_hookpaths_set():
127 logger.error(
128 'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
129 'hint: `git config --unset-all core.hooksPath`',
130 )
131 return 1
132
133 for hook_type in hook_types:
134 _install_hook_script(
135 config_file, hook_type,
136 overwrite=overwrite,
137 skip_on_missing_config=skip_on_missing_config,
138 git_dir=git_dir,
139 )
140
141 if hooks:
142 install_hooks(config_file, store)
143
144 return 0
145
146
147 def install_hooks(config_file: str, store: Store) -> int:
148 install_hook_envs(all_hooks(load_config(config_file), store), store)
149 return 0
150
151
152 def _uninstall_hook_script(hook_type: str) -> None:
153 hook_path, legacy_path = _hook_paths(hook_type)
154
155 # If our file doesn't exist or it isn't ours, gtfo.
156 if not os.path.exists(hook_path) or not is_our_script(hook_path):
157 return
158
159 os.remove(hook_path)
160 output.write_line(f'{hook_type} uninstalled')
161
162 if os.path.exists(legacy_path):
163 os.rename(legacy_path, hook_path)
164 output.write_line(f'Restored previous hooks to {hook_path}')
165
166
167 def uninstall(hook_types: Sequence[str]) -> int:
168 for hook_type in hook_types:
169 _uninstall_hook_script(hook_type)
170 return 0
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -30,6 +30,10 @@
CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
TEMPLATE_START = '# start templated\n'
TEMPLATE_END = '# end templated\n'
+# Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`
+# #1312 os.defpath is too restrictive on BSD
+POSIX_SEARCH_PATH = ('/usr/local/bin', '/usr/bin', '/bin')
+SYS_EXE = os.path.basename(os.path.realpath(sys.executable))
def _hook_paths(
@@ -51,20 +55,21 @@
def shebang() -> str:
if sys.platform == 'win32':
- py = 'python'
+ py = SYS_EXE
else:
- # Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`
- path_choices = [p for p in os.defpath.split(os.pathsep) if p]
exe_choices = [
f'python{sys.version_info[0]}.{sys.version_info[1]}',
f'python{sys.version_info[0]}',
]
- for path, exe in itertools.product(path_choices, exe_choices):
+ # avoid searching for bare `python` as it's likely to be python 2
+ if SYS_EXE != 'python':
+ exe_choices.append(SYS_EXE)
+ for path, exe in itertools.product(POSIX_SEARCH_PATH, exe_choices):
if os.access(os.path.join(path, exe), os.X_OK):
py = exe
break
else:
- py = 'python'
+ py = SYS_EXE
return f'#!/usr/bin/env {py}'
| {"golden_diff": "diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py\n--- a/pre_commit/commands/install_uninstall.py\n+++ b/pre_commit/commands/install_uninstall.py\n@@ -30,6 +30,10 @@\n CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\n TEMPLATE_START = '# start templated\\n'\n TEMPLATE_END = '# end templated\\n'\n+# Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`\n+# #1312 os.defpath is too restrictive on BSD\n+POSIX_SEARCH_PATH = ('/usr/local/bin', '/usr/bin', '/bin')\n+SYS_EXE = os.path.basename(os.path.realpath(sys.executable))\n \n \n def _hook_paths(\n@@ -51,20 +55,21 @@\n \n def shebang() -> str:\n if sys.platform == 'win32':\n- py = 'python'\n+ py = SYS_EXE\n else:\n- # Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`\n- path_choices = [p for p in os.defpath.split(os.pathsep) if p]\n exe_choices = [\n f'python{sys.version_info[0]}.{sys.version_info[1]}',\n f'python{sys.version_info[0]}',\n ]\n- for path, exe in itertools.product(path_choices, exe_choices):\n+ # avoid searching for bare `python` as it's likely to be python 2\n+ if SYS_EXE != 'python':\n+ exe_choices.append(SYS_EXE)\n+ for path, exe in itertools.product(POSIX_SEARCH_PATH, exe_choices):\n if os.access(os.path.join(path, exe), os.X_OK):\n py = exe\n break\n else:\n- py = 'python'\n+ py = SYS_EXE\n return f'#!/usr/bin/env {py}'\n", "issue": "Cannot specify custom Python shebang for hook executable\nI am working on a shared Python environment with `python3.6` as executable and I want the shebang to point to the correct executable `#!/usr/bin/env python3.6`. However, even though I specify \r\n```\r\ndefault_language_version:\r\n python: python3.6\r\n```\r\nor `language_version: python3.6` in `hooks`, it always picks `#!/usr/bin/env python` as the shebang even though there is no `python` executable. Is there a way to specify a custom shebang?\n", "before_files": [{"content": "import itertools\nimport logging\nimport os.path\nimport shutil\nimport sys\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.store import Store\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import resource_text\n\n\nlogger = logging.getLogger(__name__)\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n '79f09a650522a87b0da915d0d983b2de',\n 'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n\n\ndef _hook_paths(\n hook_type: str,\n git_dir: Optional[str] = None,\n) -> Tuple[str, str]:\n git_dir = git_dir if git_dir is not None else git.get_git_dir()\n pth = os.path.join(git_dir, 'hooks', hook_type)\n return pth, f'{pth}.legacy'\n\n\ndef is_our_script(filename: str) -> bool:\n if not os.path.exists(filename): # pragma: windows no cover (symlink)\n return False\n with open(filename) as f:\n contents = f.read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef shebang() -> str:\n if sys.platform == 'win32':\n py = 'python'\n else:\n # Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`\n path_choices = [p for p in os.defpath.split(os.pathsep) if p]\n exe_choices = [\n f'python{sys.version_info[0]}.{sys.version_info[1]}',\n f'python{sys.version_info[0]}',\n ]\n for path, exe in itertools.product(path_choices, exe_choices):\n if os.access(os.path.join(path, exe), os.X_OK):\n py = exe\n break\n else:\n py = 'python'\n return f'#!/usr/bin/env {py}'\n\n\ndef _install_hook_script(\n config_file: str,\n hook_type: str,\n overwrite: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> None:\n hook_path, legacy_path = _hook_paths(hook_type, git_dir=git_dir)\n\n os.makedirs(os.path.dirname(hook_path), exist_ok=True)\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n shutil.move(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n f'Running in migration mode with existing hooks at {legacy_path}\\n'\n f'Use -f to use only pre-commit.',\n )\n\n args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']\n if skip_on_missing_config:\n args.append('--skip-on-missing-config')\n params = {'INSTALL_PYTHON': sys.executable, 'ARGS': args}\n\n with open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n before = before.replace('#!/usr/bin/env python3', shebang())\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write(f'{var} = {params[var]!r}\\n')\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line(f'pre-commit installed at {hook_path}')\n\n\ndef install(\n config_file: str,\n store: Store,\n hook_types: Sequence[str],\n overwrite: bool = False,\n hooks: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> int:\n if git_dir is None and git.has_core_hookpaths_set():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n )\n return 1\n\n for hook_type in hook_types:\n _install_hook_script(\n config_file, hook_type,\n overwrite=overwrite,\n skip_on_missing_config=skip_on_missing_config,\n git_dir=git_dir,\n )\n\n if hooks:\n install_hooks(config_file, store)\n\n return 0\n\n\ndef install_hooks(config_file: str, store: Store) -> int:\n install_hook_envs(all_hooks(load_config(config_file), store), store)\n return 0\n\n\ndef _uninstall_hook_script(hook_type: str) -> None:\n hook_path, legacy_path = _hook_paths(hook_type)\n\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return\n\n os.remove(hook_path)\n output.write_line(f'{hook_type} uninstalled')\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line(f'Restored previous hooks to {hook_path}')\n\n\ndef uninstall(hook_types: Sequence[str]) -> int:\n for hook_type in hook_types:\n _uninstall_hook_script(hook_type)\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}], "after_files": [{"content": "import itertools\nimport logging\nimport os.path\nimport shutil\nimport sys\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.store import Store\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import resource_text\n\n\nlogger = logging.getLogger(__name__)\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n '79f09a650522a87b0da915d0d983b2de',\n 'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n# Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`\n# #1312 os.defpath is too restrictive on BSD\nPOSIX_SEARCH_PATH = ('/usr/local/bin', '/usr/bin', '/bin')\nSYS_EXE = os.path.basename(os.path.realpath(sys.executable))\n\n\ndef _hook_paths(\n hook_type: str,\n git_dir: Optional[str] = None,\n) -> Tuple[str, str]:\n git_dir = git_dir if git_dir is not None else git.get_git_dir()\n pth = os.path.join(git_dir, 'hooks', hook_type)\n return pth, f'{pth}.legacy'\n\n\ndef is_our_script(filename: str) -> bool:\n if not os.path.exists(filename): # pragma: windows no cover (symlink)\n return False\n with open(filename) as f:\n contents = f.read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef shebang() -> str:\n if sys.platform == 'win32':\n py = SYS_EXE\n else:\n exe_choices = [\n f'python{sys.version_info[0]}.{sys.version_info[1]}',\n f'python{sys.version_info[0]}',\n ]\n # avoid searching for bare `python` as it's likely to be python 2\n if SYS_EXE != 'python':\n exe_choices.append(SYS_EXE)\n for path, exe in itertools.product(POSIX_SEARCH_PATH, exe_choices):\n if os.access(os.path.join(path, exe), os.X_OK):\n py = exe\n break\n else:\n py = SYS_EXE\n return f'#!/usr/bin/env {py}'\n\n\ndef _install_hook_script(\n config_file: str,\n hook_type: str,\n overwrite: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> None:\n hook_path, legacy_path = _hook_paths(hook_type, git_dir=git_dir)\n\n os.makedirs(os.path.dirname(hook_path), exist_ok=True)\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n shutil.move(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n f'Running in migration mode with existing hooks at {legacy_path}\\n'\n f'Use -f to use only pre-commit.',\n )\n\n args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']\n if skip_on_missing_config:\n args.append('--skip-on-missing-config')\n params = {'INSTALL_PYTHON': sys.executable, 'ARGS': args}\n\n with open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n before = before.replace('#!/usr/bin/env python3', shebang())\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write(f'{var} = {params[var]!r}\\n')\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line(f'pre-commit installed at {hook_path}')\n\n\ndef install(\n config_file: str,\n store: Store,\n hook_types: Sequence[str],\n overwrite: bool = False,\n hooks: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> int:\n if git_dir is None and git.has_core_hookpaths_set():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n )\n return 1\n\n for hook_type in hook_types:\n _install_hook_script(\n config_file, hook_type,\n overwrite=overwrite,\n skip_on_missing_config=skip_on_missing_config,\n git_dir=git_dir,\n )\n\n if hooks:\n install_hooks(config_file, store)\n\n return 0\n\n\ndef install_hooks(config_file: str, store: Store) -> int:\n install_hook_envs(all_hooks(load_config(config_file), store), store)\n return 0\n\n\ndef _uninstall_hook_script(hook_type: str) -> None:\n hook_path, legacy_path = _hook_paths(hook_type)\n\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return\n\n os.remove(hook_path)\n output.write_line(f'{hook_type} uninstalled')\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line(f'Restored previous hooks to {hook_path}')\n\n\ndef uninstall(hook_types: Sequence[str]) -> int:\n for hook_type in hook_types:\n _uninstall_hook_script(hook_type)\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}]} | 2,236 | 439 |
gh_patches_debug_3535 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1206 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
canny edge detection throws AttributeError exception
I was trying out http://scikit-image.org/docs/dev/auto_examples/plot_canny.html
And the following lines of code:
# Generate noisy image of a square
im = np.zeros((128, 128))
im[32:-32, 32:-32] = 1
im = ndimage.rotate(im, 15, mode='constant')
im = ndimage.gaussian_filter(im, 4)
im += 0.2 \* np.random.random(im.shape)
# Compute the Canny filter for two values of sigma
edges1 = filter.canny(im)
produced this:
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "/home/abc/virtualenvs/activepython/lib/python2.7/site-packages/scikit_image-0.11dev-py2.7-linux-x86_64.egg/skimage/_shared/utils.py", line 46, in __call__
msg = 'Call to deprecated function `%s`.' % func.**name**
AttributeError: 'numpy.ndarray' object has no attribute '**name**'
Any guess? Seems I'm using the latest version of scikit-image.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/filter/__init__.py`
Content:
```
1 from .lpi_filter import inverse, wiener, LPIFilter2D
2 from ._gaussian import gaussian_filter
3 from .edges import (sobel, hsobel, vsobel, scharr, hscharr, vscharr, prewitt,
4 hprewitt, vprewitt, roberts, roberts_positive_diagonal,
5 roberts_negative_diagonal)
6 from ._rank_order import rank_order
7 from ._gabor import gabor_kernel, gabor_filter
8 from .thresholding import (threshold_adaptive, threshold_otsu, threshold_yen,
9 threshold_isodata)
10 from . import rank
11
12 from skimage._shared.utils import deprecated
13 from skimage import restoration
14 denoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\
15 (restoration.denoise_bilateral)
16 denoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\
17 (restoration.denoise_tv_bregman)
18 denoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\
19 (restoration.denoise_tv_chambolle)
20
21 # Backward compatibility v<0.11
22 @deprecated
23 def canny(*args, **kwargs):
24 # Hack to avoid circular import
25 from skimage.feature._canny import canny as canny_
26 return canny_(*args, **kwargs)
27
28
29 __all__ = ['inverse',
30 'wiener',
31 'LPIFilter2D',
32 'gaussian_filter',
33 'canny',
34 'sobel',
35 'hsobel',
36 'vsobel',
37 'scharr',
38 'hscharr',
39 'vscharr',
40 'prewitt',
41 'hprewitt',
42 'vprewitt',
43 'roberts',
44 'roberts_positive_diagonal',
45 'roberts_negative_diagonal',
46 'denoise_tv_chambolle',
47 'denoise_bilateral',
48 'denoise_tv_bregman',
49 'rank_order',
50 'gabor_kernel',
51 'gabor_filter',
52 'threshold_adaptive',
53 'threshold_otsu',
54 'threshold_yen',
55 'threshold_isodata',
56 'rank']
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/filter/__init__.py b/skimage/filter/__init__.py
--- a/skimage/filter/__init__.py
+++ b/skimage/filter/__init__.py
@@ -19,7 +19,7 @@
(restoration.denoise_tv_chambolle)
# Backward compatibility v<0.11
-@deprecated
+@deprecated('skimage.feature.canny')
def canny(*args, **kwargs):
# Hack to avoid circular import
from skimage.feature._canny import canny as canny_
| {"golden_diff": "diff --git a/skimage/filter/__init__.py b/skimage/filter/__init__.py\n--- a/skimage/filter/__init__.py\n+++ b/skimage/filter/__init__.py\n@@ -19,7 +19,7 @@\n (restoration.denoise_tv_chambolle)\n \n # Backward compatibility v<0.11\n-@deprecated\n+@deprecated('skimage.feature.canny')\n def canny(*args, **kwargs):\n # Hack to avoid circular import\n from skimage.feature._canny import canny as canny_\n", "issue": "canny edge detection throws AttributeError exception\nI was trying out http://scikit-image.org/docs/dev/auto_examples/plot_canny.html \n\nAnd the following lines of code:\n# Generate noisy image of a square\n\nim = np.zeros((128, 128))\nim[32:-32, 32:-32] = 1\n\nim = ndimage.rotate(im, 15, mode='constant')\nim = ndimage.gaussian_filter(im, 4)\nim += 0.2 \\* np.random.random(im.shape)\n# Compute the Canny filter for two values of sigma\n\nedges1 = filter.canny(im)\n\n produced this:\n\nTraceback (most recent call last):\n File \"<stdin>\", line 2, in <module>\n File \"/home/abc/virtualenvs/activepython/lib/python2.7/site-packages/scikit_image-0.11dev-py2.7-linux-x86_64.egg/skimage/_shared/utils.py\", line 46, in __call__\n msg = 'Call to deprecated function `%s`.' % func.**name**\nAttributeError: 'numpy.ndarray' object has no attribute '**name**'\n\nAny guess? Seems I'm using the latest version of scikit-image. \n\n", "before_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import gaussian_filter\nfrom .edges import (sobel, hsobel, vsobel, scharr, hscharr, vscharr, prewitt,\n hprewitt, vprewitt, roberts, roberts_positive_diagonal,\n roberts_negative_diagonal)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor_filter\nfrom .thresholding import (threshold_adaptive, threshold_otsu, threshold_yen,\n threshold_isodata)\nfrom . import rank\n\nfrom skimage._shared.utils import deprecated\nfrom skimage import restoration\ndenoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\\\n (restoration.denoise_bilateral)\ndenoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\\\n (restoration.denoise_tv_bregman)\ndenoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\\\n (restoration.denoise_tv_chambolle)\n\n# Backward compatibility v<0.11\n@deprecated\ndef canny(*args, **kwargs):\n # Hack to avoid circular import\n from skimage.feature._canny import canny as canny_\n return canny_(*args, **kwargs)\n\n\n__all__ = ['inverse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian_filter',\n 'canny',\n 'sobel',\n 'hsobel',\n 'vsobel',\n 'scharr',\n 'hscharr',\n 'vscharr',\n 'prewitt',\n 'hprewitt',\n 'vprewitt',\n 'roberts',\n 'roberts_positive_diagonal',\n 'roberts_negative_diagonal',\n 'denoise_tv_chambolle',\n 'denoise_bilateral',\n 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor_filter',\n 'threshold_adaptive',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'rank']\n", "path": "skimage/filter/__init__.py"}], "after_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import gaussian_filter\nfrom .edges import (sobel, hsobel, vsobel, scharr, hscharr, vscharr, prewitt,\n hprewitt, vprewitt, roberts, roberts_positive_diagonal,\n roberts_negative_diagonal)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor_filter\nfrom .thresholding import (threshold_adaptive, threshold_otsu, threshold_yen,\n threshold_isodata)\nfrom . import rank\n\nfrom skimage._shared.utils import deprecated\nfrom skimage import restoration\ndenoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\\\n (restoration.denoise_bilateral)\ndenoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\\\n (restoration.denoise_tv_bregman)\ndenoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\\\n (restoration.denoise_tv_chambolle)\n\n# Backward compatibility v<0.11\n@deprecated('skimage.feature.canny')\ndef canny(*args, **kwargs):\n # Hack to avoid circular import\n from skimage.feature._canny import canny as canny_\n return canny_(*args, **kwargs)\n\n\n__all__ = ['inverse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian_filter',\n 'canny',\n 'sobel',\n 'hsobel',\n 'vsobel',\n 'scharr',\n 'hscharr',\n 'vscharr',\n 'prewitt',\n 'hprewitt',\n 'vprewitt',\n 'roberts',\n 'roberts_positive_diagonal',\n 'roberts_negative_diagonal',\n 'denoise_tv_chambolle',\n 'denoise_bilateral',\n 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor_filter',\n 'threshold_adaptive',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'rank']\n", "path": "skimage/filter/__init__.py"}]} | 1,114 | 125 |
gh_patches_debug_4015 | rasdani/github-patches | git_diff | rasterio__rasterio-914 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
show_hist() fails with nans
I am getting an `UnboundLocalError: local variable 'ymin' referenced before assignment` when I am trying to use the show_hist() function in the plotting module.
```
import rasterio
from rasterio.plot import show_hist
from rasterio.plot import show
import matplotlib.pyplot as plt
fname = r"C:\workspace\GLCM\output\glcm_rasters\2014_05\10\R02028_10_asm.tif"
src = rasterio.open(fname)
fig, (axrgb, axhist) = plt.subplots(1, 2, figsize=(14,7))
show(src, ax=axrgb)
show_hist(src, bins=50, histtype='bar', lw=0.0, stacked=False, alpha=0.3, ax=axhist)
plt.show()
```
[here](https://drive.google.com/file/d/0B17WT3pXaqrYcUVQbXRVYkxqeXc/view?usp=sharing) is the offending file.
conda info:
```
platform : win-64
conda version : 4.1.11
conda-env version : 2.5.2
conda-build version : 1.20.0
python version : 2.7.11.final.0
requests version : 2.9.1
root environment : c:\Anaconda2 (writable)
default environment : c:\Anaconda2
envs directories : c:\Anaconda2\envs
package cache : c:\Anaconda2\pkgs
channel URLs : https://repo.continuum.io/pkgs/free/win-64/
https://repo.continuum.io/pkgs/free/noarch/
https://repo.continuum.io/pkgs/pro/win-64/
https://repo.continuum.io/pkgs/pro/noarch/
https://repo.continuum.io/pkgs/msys2/win-64/
https://repo.continuum.io/pkgs/msys2/noarch/
config file : None
offline mode : False
is foreign system : False
```
Package info:
```
rasterio 0.36.0 <pip>
matplotlib 1.5.1 np110py27_0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/plot.py`
Content:
```
1 """Implementations of various common operations.
2
3 Including `show()` for displaying an array or with matplotlib.
4 Most can handle a numpy array or `rasterio.Band()`.
5 Primarily supports `$ rio insp`.
6 """
7
8 from __future__ import absolute_import
9
10 import logging
11 import warnings
12
13 import numpy as np
14
15 import rasterio
16 from rasterio.io import DatasetReader
17 from rasterio.transform import guard_transform
18 from rasterio.compat import zip_longest
19
20 logger = logging.getLogger(__name__)
21
22
23 def get_plt():
24 """import matplotlib.pyplot
25 raise import error if matplotlib is not installed
26 """
27 try:
28 import matplotlib.pyplot as plt
29 return plt
30 except (ImportError, RuntimeError): # pragma: no cover
31 msg = "Could not import matplotlib\n"
32 msg += "matplotlib required for plotting functions"
33 raise ImportError(msg)
34
35
36 def show(source, with_bounds=True, contour=False, contour_label_kws=None,
37 ax=None, title=None, transform=None, **kwargs):
38 """Display a raster or raster band using matplotlib.
39
40 Parameters
41 ----------
42 source : array-like in raster axis order,
43 or (raster dataset, bidx) tuple,
44 or raster dataset,
45 If the tuple (raster dataset, bidx),
46 selects band `bidx` from raster. If raster dataset display the rgb image
47 as defined in the colorinterp metadata, or default to first band.
48 with_bounds : bool (opt)
49 Whether to change the image extent to the spatial bounds of the image,
50 rather than pixel coordinates. Only works when source is
51 (raster dataset, bidx) or raster dataset.
52 contour : bool (opt)
53 Whether to plot the raster data as contours
54 contour_label_kws : dictionary (opt)
55 Keyword arguments for labeling the contours,
56 empty dictionary for no labels.
57 ax : matplotlib axis (opt)
58 Axis to plot on, otherwise uses current axis.
59 title : str, optional
60 Title for the figure.
61 transform : Affine, optional
62 Defines the affine transform if source is an array
63 **kwargs : key, value pairings optional
64 These will be passed to the matplotlib imshow or contour method
65 depending on contour argument.
66 See full lists at:
67 http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.imshow
68 or
69 http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.contour
70
71 Returns
72 -------
73 ax : matplotlib Axes
74 Axes with plot.
75 """
76 plt = get_plt()
77
78 if isinstance(source, tuple):
79 arr = source[0].read(source[1])
80 if with_bounds:
81 kwargs['extent'] = plotting_extent(source[0])
82 elif isinstance(source, DatasetReader):
83 if source.count == 1:
84 arr = source.read(1, masked=True)
85 else:
86 try:
87 source_colorinterp = {source.colorinterp(n): n for n in source.indexes}
88 colorinterp = rasterio.enums.ColorInterp
89 rgb_indexes = [source_colorinterp[ci] for ci in
90 (colorinterp.red, colorinterp.green, colorinterp.blue)]
91 arr = source.read(rgb_indexes, masked=True)
92 arr = reshape_as_image(arr)
93
94 if with_bounds:
95 kwargs['extent'] = plotting_extent(source)
96 except KeyError:
97 arr = source.read(1, masked=True)
98 else:
99 # The source is a numpy array reshape it to image if it has 3+ bands
100 source = np.ma.squeeze(source)
101 if len(source.shape) >= 3:
102 arr = reshape_as_image(source)
103 else:
104 arr = source
105 if transform and with_bounds:
106 kwargs['extent'] = plotting_extent(arr, transform)
107
108 show = False
109 if not ax:
110 show = True
111 ax = plt.gca()
112
113 if contour:
114 if 'cmap' not in kwargs:
115 kwargs['colors'] = kwargs.get('colors', 'red')
116 kwargs['linewidths'] = kwargs.get('linewidths', 1.5)
117 kwargs['alpha'] = kwargs.get('alpha', 0.8)
118
119 C = ax.contour(arr, origin='upper', **kwargs)
120 if contour_label_kws is None:
121 # no explicit label kws passed use defaults
122 contour_label_kws = dict(fontsize=8,
123 inline=True)
124 if contour_label_kws:
125 ax.clabel(C, **contour_label_kws)
126 else:
127 ax.imshow(arr, **kwargs)
128 if title:
129 ax.set_title(title, fontweight='bold')
130
131 if show:
132 plt.show()
133
134 return ax
135
136
137 def plotting_extent(source, transform=None):
138 """Returns an extent in the format needed
139 for matplotlib's imshow (left, right, bottom, top)
140 instead of rasterio's bounds (left, bottom, top, right)
141
142 Parameters
143 ----------
144 source : raster dataset or array in image order (see reshape_as_image)
145 transform: Affine, required if source is array
146 """
147 if hasattr(source, 'bounds'):
148 extent = (source.bounds.left, source.bounds.right,
149 source.bounds.bottom, source.bounds.top)
150 elif not transform:
151 raise ValueError(
152 "transform is required if source is an array")
153 else:
154 transform = guard_transform(transform)
155 rows, cols = source.shape[0:2]
156 left, top = transform * (0, 0)
157 right, bottom = transform * (cols, rows)
158 extent = (left, right, bottom, top)
159
160 return extent
161
162
163 def reshape_as_image(arr):
164 """Returns the source array reshaped into the order
165 expected by image processing and visualization software
166 (matplotlib, scikit-image, etc)
167 by swapping the axes order from (bands, rows, columns)
168 to (rows, columns, bands)
169
170 Parameters
171 ----------
172 source : array-like in a of format (bands, rows, columns)
173 """
174 # swap the axes order from (bands, rows, columns) to (rows, columns, bands)
175 im = np.ma.transpose(arr, [1,2,0])
176 return im
177
178
179
180 def reshape_as_raster(arr):
181 """Returns the array in a raster order
182 by swapping the axes order from (rows, columns, bands)
183 to (bands, rows, columns)
184
185 Parameters
186 ----------
187 arr : array-like in the image form of (rows, columns, bands)
188 """
189 # swap the axes order from (rows, columns, bands) to (bands, rows, columns)
190 im = np.transpose(arr, [2,0,1])
191 return im
192
193
194 def show_hist(source, bins=10, masked=True, title='Histogram', ax=None, **kwargs):
195 """Easily display a histogram with matplotlib.
196
197 Parameters
198 ----------
199 source : np.array or DatasetReader, rasterio.Band or tuple(dataset, bidx)
200 Input data to display. The first three arrays in multi-dimensional
201 arrays are plotted as red, green, and blue.
202 bins : int, optional
203 Compute histogram across N bins.
204 masked : bool, optional
205 When working with a `rasterio.Band()` object, specifies if the data
206 should be masked on read.
207 title : str, optional
208 Title for the figure.
209 ax : matplotlib axes (opt)
210 The raster will be added to this axes if passed.
211 **kwargs : optional keyword arguments
212 These will be passed to the matplotlib hist method. See full list at:
213 http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.hist
214 """
215 plt = get_plt()
216
217 if isinstance(source, DatasetReader):
218 arr = source.read(masked=masked)
219 elif isinstance(source, (tuple, rasterio.Band)):
220 arr = source[0].read(source[1], masked=masked)
221 else:
222 arr = source
223
224 # The histogram is computed individually for each 'band' in the array
225 # so we need the overall min/max to constrain the plot
226 rng = arr.min(), arr.max()
227
228 if len(arr.shape) is 2:
229 arr = np.expand_dims(arr.flatten(), 0).T
230 colors = ['gold']
231 else:
232 arr = arr.reshape(arr.shape[0], -1).T
233 colors = ['red', 'green', 'blue', 'violet', 'gold', 'saddlebrown']
234
235 # The goal is to provide a curated set of colors for working with
236 # smaller datasets and let matplotlib define additional colors when
237 # working with larger datasets.
238 if arr.shape[-1] > len(colors):
239 n = arr.shape[-1] - len(colors)
240 colors.extend(np.ndarray.tolist(plt.get_cmap('Accent')(np.linspace(0, 1, n))))
241 else:
242 colors = colors[:arr.shape[-1]]
243
244 # If a rasterio.Band() is given make sure the proper index is displayed
245 # in the legend.
246 if isinstance(source, (tuple, rasterio.Band)):
247 labels = [str(source[1])]
248 else:
249 labels = (str(i + 1) for i in range(len(arr)))
250
251 if ax:
252 show = False
253 else:
254 show = True
255 ax = plt.gca()
256
257 fig = ax.get_figure()
258
259 ax.hist(arr,
260 bins=bins,
261 color=colors,
262 label=labels,
263 range=rng,
264 **kwargs)
265
266 ax.legend(loc="upper right")
267 ax.set_title(title, fontweight='bold')
268 ax.grid(True)
269 ax.set_xlabel('DN')
270 ax.set_ylabel('Frequency')
271 if show:
272 plt.show()
273
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/plot.py b/rasterio/plot.py
--- a/rasterio/plot.py
+++ b/rasterio/plot.py
@@ -223,7 +223,7 @@
# The histogram is computed individually for each 'band' in the array
# so we need the overall min/max to constrain the plot
- rng = arr.min(), arr.max()
+ rng = np.nanmin(arr), np.nanmax(arr)
if len(arr.shape) is 2:
arr = np.expand_dims(arr.flatten(), 0).T
| {"golden_diff": "diff --git a/rasterio/plot.py b/rasterio/plot.py\n--- a/rasterio/plot.py\n+++ b/rasterio/plot.py\n@@ -223,7 +223,7 @@\n \n # The histogram is computed individually for each 'band' in the array\n # so we need the overall min/max to constrain the plot\n- rng = arr.min(), arr.max()\n+ rng = np.nanmin(arr), np.nanmax(arr)\n \n if len(arr.shape) is 2:\n arr = np.expand_dims(arr.flatten(), 0).T\n", "issue": "show_hist() fails with nans\nI am getting an `UnboundLocalError: local variable 'ymin' referenced before assignment` when I am trying to use the show_hist() function in the plotting module. \n\n```\nimport rasterio\nfrom rasterio.plot import show_hist\nfrom rasterio.plot import show\nimport matplotlib.pyplot as plt\n\nfname = r\"C:\\workspace\\GLCM\\output\\glcm_rasters\\2014_05\\10\\R02028_10_asm.tif\"\nsrc = rasterio.open(fname)\n\nfig, (axrgb, axhist) = plt.subplots(1, 2, figsize=(14,7))\nshow(src, ax=axrgb)\nshow_hist(src, bins=50, histtype='bar', lw=0.0, stacked=False, alpha=0.3, ax=axhist)\n plt.show()\n```\n\n[here](https://drive.google.com/file/d/0B17WT3pXaqrYcUVQbXRVYkxqeXc/view?usp=sharing) is the offending file.\n\nconda info:\n\n```\n platform : win-64\n conda version : 4.1.11\n conda-env version : 2.5.2\n conda-build version : 1.20.0\n python version : 2.7.11.final.0\n requests version : 2.9.1\n root environment : c:\\Anaconda2 (writable)\n default environment : c:\\Anaconda2\n envs directories : c:\\Anaconda2\\envs\n package cache : c:\\Anaconda2\\pkgs\n channel URLs : https://repo.continuum.io/pkgs/free/win-64/\n https://repo.continuum.io/pkgs/free/noarch/\n https://repo.continuum.io/pkgs/pro/win-64/\n https://repo.continuum.io/pkgs/pro/noarch/\n https://repo.continuum.io/pkgs/msys2/win-64/\n https://repo.continuum.io/pkgs/msys2/noarch/\n config file : None\n offline mode : False\n is foreign system : False\n```\n\nPackage info:\n\n```\nrasterio 0.36.0 <pip>\nmatplotlib 1.5.1 np110py27_0\n```\n\n", "before_files": [{"content": "\"\"\"Implementations of various common operations.\n\nIncluding `show()` for displaying an array or with matplotlib.\nMost can handle a numpy array or `rasterio.Band()`.\nPrimarily supports `$ rio insp`.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport warnings\n\nimport numpy as np\n\nimport rasterio\nfrom rasterio.io import DatasetReader\nfrom rasterio.transform import guard_transform\nfrom rasterio.compat import zip_longest\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_plt():\n \"\"\"import matplotlib.pyplot\n raise import error if matplotlib is not installed\n \"\"\"\n try:\n import matplotlib.pyplot as plt\n return plt\n except (ImportError, RuntimeError): # pragma: no cover\n msg = \"Could not import matplotlib\\n\"\n msg += \"matplotlib required for plotting functions\"\n raise ImportError(msg)\n\n\ndef show(source, with_bounds=True, contour=False, contour_label_kws=None,\n ax=None, title=None, transform=None, **kwargs):\n \"\"\"Display a raster or raster band using matplotlib.\n\n Parameters\n ----------\n source : array-like in raster axis order,\n or (raster dataset, bidx) tuple,\n or raster dataset,\n If the tuple (raster dataset, bidx),\n selects band `bidx` from raster. If raster dataset display the rgb image\n as defined in the colorinterp metadata, or default to first band.\n with_bounds : bool (opt)\n Whether to change the image extent to the spatial bounds of the image,\n rather than pixel coordinates. Only works when source is\n (raster dataset, bidx) or raster dataset.\n contour : bool (opt)\n Whether to plot the raster data as contours\n contour_label_kws : dictionary (opt)\n Keyword arguments for labeling the contours,\n empty dictionary for no labels.\n ax : matplotlib axis (opt)\n Axis to plot on, otherwise uses current axis.\n title : str, optional\n Title for the figure.\n transform : Affine, optional\n Defines the affine transform if source is an array\n **kwargs : key, value pairings optional\n These will be passed to the matplotlib imshow or contour method\n depending on contour argument.\n See full lists at:\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.imshow\n or\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.contour\n\n Returns\n -------\n ax : matplotlib Axes\n Axes with plot.\n \"\"\"\n plt = get_plt()\n\n if isinstance(source, tuple):\n arr = source[0].read(source[1])\n if with_bounds:\n kwargs['extent'] = plotting_extent(source[0])\n elif isinstance(source, DatasetReader):\n if source.count == 1:\n arr = source.read(1, masked=True)\n else:\n try:\n source_colorinterp = {source.colorinterp(n): n for n in source.indexes}\n colorinterp = rasterio.enums.ColorInterp\n rgb_indexes = [source_colorinterp[ci] for ci in\n (colorinterp.red, colorinterp.green, colorinterp.blue)]\n arr = source.read(rgb_indexes, masked=True)\n arr = reshape_as_image(arr)\n\n if with_bounds:\n kwargs['extent'] = plotting_extent(source)\n except KeyError:\n arr = source.read(1, masked=True)\n else:\n # The source is a numpy array reshape it to image if it has 3+ bands\n source = np.ma.squeeze(source)\n if len(source.shape) >= 3:\n arr = reshape_as_image(source)\n else:\n arr = source\n if transform and with_bounds:\n kwargs['extent'] = plotting_extent(arr, transform)\n\n show = False\n if not ax:\n show = True\n ax = plt.gca()\n\n if contour:\n if 'cmap' not in kwargs:\n kwargs['colors'] = kwargs.get('colors', 'red')\n kwargs['linewidths'] = kwargs.get('linewidths', 1.5)\n kwargs['alpha'] = kwargs.get('alpha', 0.8)\n\n C = ax.contour(arr, origin='upper', **kwargs)\n if contour_label_kws is None:\n # no explicit label kws passed use defaults\n contour_label_kws = dict(fontsize=8,\n inline=True)\n if contour_label_kws:\n ax.clabel(C, **contour_label_kws)\n else:\n ax.imshow(arr, **kwargs)\n if title:\n ax.set_title(title, fontweight='bold')\n\n if show:\n plt.show()\n\n return ax\n\n\ndef plotting_extent(source, transform=None):\n \"\"\"Returns an extent in the format needed\n for matplotlib's imshow (left, right, bottom, top)\n instead of rasterio's bounds (left, bottom, top, right)\n\n Parameters\n ----------\n source : raster dataset or array in image order (see reshape_as_image)\n transform: Affine, required if source is array\n \"\"\"\n if hasattr(source, 'bounds'):\n extent = (source.bounds.left, source.bounds.right,\n source.bounds.bottom, source.bounds.top)\n elif not transform:\n raise ValueError(\n \"transform is required if source is an array\")\n else:\n transform = guard_transform(transform)\n rows, cols = source.shape[0:2]\n left, top = transform * (0, 0)\n right, bottom = transform * (cols, rows)\n extent = (left, right, bottom, top)\n\n return extent\n\n\ndef reshape_as_image(arr):\n \"\"\"Returns the source array reshaped into the order\n expected by image processing and visualization software\n (matplotlib, scikit-image, etc)\n by swapping the axes order from (bands, rows, columns)\n to (rows, columns, bands)\n\n Parameters\n ----------\n source : array-like in a of format (bands, rows, columns)\n \"\"\"\n # swap the axes order from (bands, rows, columns) to (rows, columns, bands)\n im = np.ma.transpose(arr, [1,2,0])\n return im\n\n\n\ndef reshape_as_raster(arr):\n \"\"\"Returns the array in a raster order\n by swapping the axes order from (rows, columns, bands)\n to (bands, rows, columns)\n\n Parameters\n ----------\n arr : array-like in the image form of (rows, columns, bands)\n \"\"\"\n # swap the axes order from (rows, columns, bands) to (bands, rows, columns)\n im = np.transpose(arr, [2,0,1])\n return im\n\n\ndef show_hist(source, bins=10, masked=True, title='Histogram', ax=None, **kwargs):\n \"\"\"Easily display a histogram with matplotlib.\n\n Parameters\n ----------\n source : np.array or DatasetReader, rasterio.Band or tuple(dataset, bidx)\n Input data to display. The first three arrays in multi-dimensional\n arrays are plotted as red, green, and blue.\n bins : int, optional\n Compute histogram across N bins.\n masked : bool, optional\n When working with a `rasterio.Band()` object, specifies if the data\n should be masked on read.\n title : str, optional\n Title for the figure.\n ax : matplotlib axes (opt)\n The raster will be added to this axes if passed.\n **kwargs : optional keyword arguments\n These will be passed to the matplotlib hist method. See full list at:\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.hist\n \"\"\"\n plt = get_plt()\n\n if isinstance(source, DatasetReader):\n arr = source.read(masked=masked)\n elif isinstance(source, (tuple, rasterio.Band)):\n arr = source[0].read(source[1], masked=masked)\n else:\n arr = source\n\n # The histogram is computed individually for each 'band' in the array\n # so we need the overall min/max to constrain the plot\n rng = arr.min(), arr.max()\n\n if len(arr.shape) is 2:\n arr = np.expand_dims(arr.flatten(), 0).T\n colors = ['gold']\n else:\n arr = arr.reshape(arr.shape[0], -1).T\n colors = ['red', 'green', 'blue', 'violet', 'gold', 'saddlebrown']\n\n # The goal is to provide a curated set of colors for working with\n # smaller datasets and let matplotlib define additional colors when\n # working with larger datasets.\n if arr.shape[-1] > len(colors):\n n = arr.shape[-1] - len(colors)\n colors.extend(np.ndarray.tolist(plt.get_cmap('Accent')(np.linspace(0, 1, n))))\n else:\n colors = colors[:arr.shape[-1]]\n\n # If a rasterio.Band() is given make sure the proper index is displayed\n # in the legend.\n if isinstance(source, (tuple, rasterio.Band)):\n labels = [str(source[1])]\n else:\n labels = (str(i + 1) for i in range(len(arr)))\n\n if ax:\n show = False\n else:\n show = True\n ax = plt.gca()\n\n fig = ax.get_figure()\n\n ax.hist(arr,\n bins=bins,\n color=colors,\n label=labels,\n range=rng,\n **kwargs)\n\n ax.legend(loc=\"upper right\")\n ax.set_title(title, fontweight='bold')\n ax.grid(True)\n ax.set_xlabel('DN')\n ax.set_ylabel('Frequency')\n if show:\n plt.show()\n", "path": "rasterio/plot.py"}], "after_files": [{"content": "\"\"\"Implementations of various common operations.\n\nIncluding `show()` for displaying an array or with matplotlib.\nMost can handle a numpy array or `rasterio.Band()`.\nPrimarily supports `$ rio insp`.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport warnings\n\nimport numpy as np\n\nimport rasterio\nfrom rasterio.io import DatasetReader\nfrom rasterio.transform import guard_transform\nfrom rasterio.compat import zip_longest\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_plt():\n \"\"\"import matplotlib.pyplot\n raise import error if matplotlib is not installed\n \"\"\"\n try:\n import matplotlib.pyplot as plt\n return plt\n except (ImportError, RuntimeError): # pragma: no cover\n msg = \"Could not import matplotlib\\n\"\n msg += \"matplotlib required for plotting functions\"\n raise ImportError(msg)\n\n\ndef show(source, with_bounds=True, contour=False, contour_label_kws=None,\n ax=None, title=None, transform=None, **kwargs):\n \"\"\"Display a raster or raster band using matplotlib.\n\n Parameters\n ----------\n source : array-like in raster axis order,\n or (raster dataset, bidx) tuple,\n or raster dataset,\n If the tuple (raster dataset, bidx),\n selects band `bidx` from raster. If raster dataset display the rgb image\n as defined in the colorinterp metadata, or default to first band.\n with_bounds : bool (opt)\n Whether to change the image extent to the spatial bounds of the image,\n rather than pixel coordinates. Only works when source is\n (raster dataset, bidx) or raster dataset.\n contour : bool (opt)\n Whether to plot the raster data as contours\n contour_label_kws : dictionary (opt)\n Keyword arguments for labeling the contours,\n empty dictionary for no labels.\n ax : matplotlib axis (opt)\n Axis to plot on, otherwise uses current axis.\n title : str, optional\n Title for the figure.\n transform : Affine, optional\n Defines the affine transform if source is an array\n **kwargs : key, value pairings optional\n These will be passed to the matplotlib imshow or contour method\n depending on contour argument.\n See full lists at:\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.imshow\n or\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.contour\n\n Returns\n -------\n ax : matplotlib Axes\n Axes with plot.\n \"\"\"\n plt = get_plt()\n\n if isinstance(source, tuple):\n arr = source[0].read(source[1])\n if with_bounds:\n kwargs['extent'] = plotting_extent(source[0])\n elif isinstance(source, DatasetReader):\n if source.count == 1:\n arr = source.read(1, masked=True)\n else:\n try:\n source_colorinterp = {source.colorinterp(n): n for n in source.indexes}\n colorinterp = rasterio.enums.ColorInterp\n rgb_indexes = [source_colorinterp[ci] for ci in\n (colorinterp.red, colorinterp.green, colorinterp.blue)]\n arr = source.read(rgb_indexes, masked=True)\n arr = reshape_as_image(arr)\n\n if with_bounds:\n kwargs['extent'] = plotting_extent(source)\n except KeyError:\n arr = source.read(1, masked=True)\n else:\n # The source is a numpy array reshape it to image if it has 3+ bands\n source = np.ma.squeeze(source)\n if len(source.shape) >= 3:\n arr = reshape_as_image(source)\n else:\n arr = source\n if transform and with_bounds:\n kwargs['extent'] = plotting_extent(arr, transform)\n\n show = False\n if not ax:\n show = True\n ax = plt.gca()\n\n if contour:\n if 'cmap' not in kwargs:\n kwargs['colors'] = kwargs.get('colors', 'red')\n kwargs['linewidths'] = kwargs.get('linewidths', 1.5)\n kwargs['alpha'] = kwargs.get('alpha', 0.8)\n\n C = ax.contour(arr, origin='upper', **kwargs)\n if contour_label_kws is None:\n # no explicit label kws passed use defaults\n contour_label_kws = dict(fontsize=8,\n inline=True)\n if contour_label_kws:\n ax.clabel(C, **contour_label_kws)\n else:\n ax.imshow(arr, **kwargs)\n if title:\n ax.set_title(title, fontweight='bold')\n\n if show:\n plt.show()\n\n return ax\n\n\ndef plotting_extent(source, transform=None):\n \"\"\"Returns an extent in the format needed\n for matplotlib's imshow (left, right, bottom, top)\n instead of rasterio's bounds (left, bottom, top, right)\n\n Parameters\n ----------\n source : raster dataset or array in image order (see reshape_as_image)\n transform: Affine, required if source is array\n \"\"\"\n if hasattr(source, 'bounds'):\n extent = (source.bounds.left, source.bounds.right,\n source.bounds.bottom, source.bounds.top)\n elif not transform:\n raise ValueError(\n \"transform is required if source is an array\")\n else:\n transform = guard_transform(transform)\n rows, cols = source.shape[0:2]\n left, top = transform * (0, 0)\n right, bottom = transform * (cols, rows)\n extent = (left, right, bottom, top)\n\n return extent\n\n\ndef reshape_as_image(arr):\n \"\"\"Returns the source array reshaped into the order\n expected by image processing and visualization software\n (matplotlib, scikit-image, etc)\n by swapping the axes order from (bands, rows, columns)\n to (rows, columns, bands)\n\n Parameters\n ----------\n source : array-like in a of format (bands, rows, columns)\n \"\"\"\n # swap the axes order from (bands, rows, columns) to (rows, columns, bands)\n im = np.ma.transpose(arr, [1,2,0])\n return im\n\n\n\ndef reshape_as_raster(arr):\n \"\"\"Returns the array in a raster order\n by swapping the axes order from (rows, columns, bands)\n to (bands, rows, columns)\n\n Parameters\n ----------\n arr : array-like in the image form of (rows, columns, bands)\n \"\"\"\n # swap the axes order from (rows, columns, bands) to (bands, rows, columns)\n im = np.transpose(arr, [2,0,1])\n return im\n\n\ndef show_hist(source, bins=10, masked=True, title='Histogram', ax=None, **kwargs):\n \"\"\"Easily display a histogram with matplotlib.\n\n Parameters\n ----------\n source : np.array or DatasetReader, rasterio.Band or tuple(dataset, bidx)\n Input data to display. The first three arrays in multi-dimensional\n arrays are plotted as red, green, and blue.\n bins : int, optional\n Compute histogram across N bins.\n masked : bool, optional\n When working with a `rasterio.Band()` object, specifies if the data\n should be masked on read.\n title : str, optional\n Title for the figure.\n ax : matplotlib axes (opt)\n The raster will be added to this axes if passed.\n **kwargs : optional keyword arguments\n These will be passed to the matplotlib hist method. See full list at:\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.hist\n \"\"\"\n plt = get_plt()\n\n if isinstance(source, DatasetReader):\n arr = source.read(masked=masked)\n elif isinstance(source, (tuple, rasterio.Band)):\n arr = source[0].read(source[1], masked=masked)\n else:\n arr = source\n\n # The histogram is computed individually for each 'band' in the array\n # so we need the overall min/max to constrain the plot\n rng = np.nanmin(arr), np.nanmax(arr)\n\n if len(arr.shape) is 2:\n arr = np.expand_dims(arr.flatten(), 0).T\n colors = ['gold']\n else:\n arr = arr.reshape(arr.shape[0], -1).T\n colors = ['red', 'green', 'blue', 'violet', 'gold', 'saddlebrown']\n\n # The goal is to provide a curated set of colors for working with\n # smaller datasets and let matplotlib define additional colors when\n # working with larger datasets.\n if arr.shape[-1] > len(colors):\n n = arr.shape[-1] - len(colors)\n colors.extend(np.ndarray.tolist(plt.get_cmap('Accent')(np.linspace(0, 1, n))))\n else:\n colors = colors[:arr.shape[-1]]\n\n # If a rasterio.Band() is given make sure the proper index is displayed\n # in the legend.\n if isinstance(source, (tuple, rasterio.Band)):\n labels = [str(source[1])]\n else:\n labels = (str(i + 1) for i in range(len(arr)))\n\n if ax:\n show = False\n else:\n show = True\n ax = plt.gca()\n\n fig = ax.get_figure()\n\n ax.hist(arr,\n bins=bins,\n color=colors,\n label=labels,\n range=rng,\n **kwargs)\n\n ax.legend(loc=\"upper right\")\n ax.set_title(title, fontweight='bold')\n ax.grid(True)\n ax.set_xlabel('DN')\n ax.set_ylabel('Frequency')\n if show:\n plt.show()\n", "path": "rasterio/plot.py"}]} | 3,604 | 130 |
gh_patches_debug_6217 | rasdani/github-patches | git_diff | apache__airflow-39345 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
webserver static file caching is disabled above 2.3.0 version.
### Apache Airflow version
Other Airflow 2 version (please specify below)
### If "Other Airflow 2 version" selected, which one?
2.3.x ~ 2.9.x
### What happened?
Recently, I migrated airflow version 2.2.0 to 2.9.0 and python 3.8 to 3.12.
and i experienced too much load time in airflow web dashboard.
in airflow 2.2.0, airflow dashboard loaded almost instantly. but in 2.9.0, it tooks almost 10 seconds.
## Airflow 2.2.0
<img width="960" alt="image" src="https://github.com/apache/airflow/assets/37045096/1d648049-424c-44a1-a505-24fe213f43a7">
## Airflow 2.9.0
<img width="960" alt="image" src="https://github.com/apache/airflow/assets/37045096/ecb58aca-93e9-483f-8e14-d0dc3ce3df6f">
### What you think should happen instead?
I think there is no reason to make slow page loading time because of not using static file caching.
And I figured out the root cause. the problem is airflow webserver's caching is disabled because of bumping up Flask version 1.x to 2.x.
In Flask, there is Configuration Option for static file's Expire time for cache-control.
it is `SEND_FILE_MAX_AGE_DEFAULT`.
In Flask 1.1.x, the default value of `SEND_FILE_MAX_AGE_DEFAULT` is 12 hours.
https://flask.palletsprojects.com/en/1.1.x/api/#flask.Flask.send_file_max_age_default

After Flask 2.0.x, the default value of `SEND_FILE_MAX_AGE_DEFAULT` is changed to None.
https://flask.palletsprojects.com/en/2.0.x/api/#flask.Flask.send_file_max_age_default

after Airflow 2.3.0, Flask version is upgraded to 2.0.x.
When i added flask configuration in app.py
static file caching works well.


example is here. https://github.com/VertexToEdge/airflow-docker-compose
### How to reproduce
I followed setup instruction in https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html
1. Setup Airflow 2.9.0 with docker-compose
``` bash
curl -LfO 'https://airflow.apache.org/docs/apache-airflow/2.9.0/docker-compose.yaml'
AIRFLOW_UID=$(id -u) docker compose up
```
2. access airflow webserver with webbrowser. and refresh the page.
### Operating System
Ubuntu 22.04.3 LTS
### Versions of Apache Airflow Providers
_No response_
### Deployment
Docker-Compose
### Deployment details
_No response_
### Anything else?
I am newbie in Airflow.
I read airflow installation guide and configruation reference. but I'm not sure if there is a better way to do this without changing the code.
Is there any way to configure static file caching? please just talk to me.
thanks for reading. have a good day.
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/www/app.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 from __future__ import annotations
19
20 import warnings
21 from datetime import timedelta
22
23 from flask import Flask
24 from flask_appbuilder import SQLA
25 from flask_wtf.csrf import CSRFProtect
26 from markupsafe import Markup
27 from sqlalchemy.engine.url import make_url
28
29 from airflow import settings
30 from airflow.api_internal.internal_api_call import InternalApiConfig
31 from airflow.configuration import conf
32 from airflow.exceptions import AirflowConfigException, RemovedInAirflow3Warning
33 from airflow.logging_config import configure_logging
34 from airflow.models import import_all_models
35 from airflow.settings import _ENABLE_AIP_44
36 from airflow.utils.json import AirflowJsonProvider
37 from airflow.www.extensions.init_appbuilder import init_appbuilder
38 from airflow.www.extensions.init_appbuilder_links import init_appbuilder_links
39 from airflow.www.extensions.init_auth_manager import get_auth_manager
40 from airflow.www.extensions.init_cache import init_cache
41 from airflow.www.extensions.init_dagbag import init_dagbag
42 from airflow.www.extensions.init_jinja_globals import init_jinja_globals
43 from airflow.www.extensions.init_manifest_files import configure_manifest_files
44 from airflow.www.extensions.init_robots import init_robots
45 from airflow.www.extensions.init_security import (
46 init_api_experimental_auth,
47 init_check_user_active,
48 init_xframe_protection,
49 )
50 from airflow.www.extensions.init_session import init_airflow_session_interface
51 from airflow.www.extensions.init_views import (
52 init_api_auth_provider,
53 init_api_connexion,
54 init_api_error_handlers,
55 init_api_experimental,
56 init_api_internal,
57 init_appbuilder_views,
58 init_error_handlers,
59 init_flash_views,
60 init_plugins,
61 )
62 from airflow.www.extensions.init_wsgi_middlewares import init_wsgi_middleware
63
64 app: Flask | None = None
65
66 # Initializes at the module level, so plugins can access it.
67 # See: /docs/plugins.rst
68 csrf = CSRFProtect()
69
70
71 def create_app(config=None, testing=False):
72 """Create a new instance of Airflow WWW app."""
73 flask_app = Flask(__name__)
74 flask_app.secret_key = conf.get("webserver", "SECRET_KEY")
75
76 flask_app.config["PERMANENT_SESSION_LIFETIME"] = timedelta(minutes=settings.get_session_lifetime_config())
77
78 flask_app.config["MAX_CONTENT_LENGTH"] = conf.getfloat("webserver", "allowed_payload_size") * 1024 * 1024
79
80 webserver_config = conf.get_mandatory_value("webserver", "config_file")
81 # Enable customizations in webserver_config.py to be applied via Flask.current_app.
82 with flask_app.app_context():
83 flask_app.config.from_pyfile(webserver_config, silent=True)
84
85 flask_app.config["TESTING"] = testing
86 flask_app.config["SQLALCHEMY_DATABASE_URI"] = conf.get("database", "SQL_ALCHEMY_CONN")
87
88 instance_name = conf.get(section="webserver", key="instance_name", fallback="Airflow")
89 require_confirmation_dag_change = conf.getboolean(
90 section="webserver", key="require_confirmation_dag_change", fallback=False
91 )
92 instance_name_has_markup = conf.getboolean(
93 section="webserver", key="instance_name_has_markup", fallback=False
94 )
95 if instance_name_has_markup:
96 instance_name = Markup(instance_name).striptags()
97
98 flask_app.config["APP_NAME"] = instance_name
99 flask_app.config["REQUIRE_CONFIRMATION_DAG_CHANGE"] = require_confirmation_dag_change
100
101 url = make_url(flask_app.config["SQLALCHEMY_DATABASE_URI"])
102 if url.drivername == "sqlite" and url.database and not url.database.startswith("/"):
103 raise AirflowConfigException(
104 f'Cannot use relative path: `{conf.get("database", "SQL_ALCHEMY_CONN")}` to connect to sqlite. '
105 "Please use absolute path such as `sqlite:////tmp/airflow.db`."
106 )
107
108 flask_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
109
110 flask_app.config["SESSION_COOKIE_HTTPONLY"] = True
111 flask_app.config["SESSION_COOKIE_SECURE"] = conf.getboolean("webserver", "COOKIE_SECURE")
112
113 cookie_samesite_config = conf.get("webserver", "COOKIE_SAMESITE")
114 if cookie_samesite_config == "":
115 warnings.warn(
116 "Old deprecated value found for `cookie_samesite` option in `[webserver]` section. "
117 "Using `Lax` instead. Change the value to `Lax` in airflow.cfg to remove this warning.",
118 RemovedInAirflow3Warning,
119 stacklevel=2,
120 )
121 cookie_samesite_config = "Lax"
122 flask_app.config["SESSION_COOKIE_SAMESITE"] = cookie_samesite_config
123
124 if config:
125 flask_app.config.from_mapping(config)
126
127 if "SQLALCHEMY_ENGINE_OPTIONS" not in flask_app.config:
128 flask_app.config["SQLALCHEMY_ENGINE_OPTIONS"] = settings.prepare_engine_args()
129
130 # Configure the JSON encoder used by `|tojson` filter from Flask
131 flask_app.json_provider_class = AirflowJsonProvider
132 flask_app.json = AirflowJsonProvider(flask_app)
133
134 InternalApiConfig.force_database_direct_access()
135
136 csrf.init_app(flask_app)
137
138 init_wsgi_middleware(flask_app)
139
140 db = SQLA()
141 db.session = settings.Session
142 db.init_app(flask_app)
143
144 init_dagbag(flask_app)
145
146 init_api_experimental_auth(flask_app)
147
148 init_robots(flask_app)
149
150 init_cache(flask_app)
151
152 init_flash_views(flask_app)
153
154 configure_logging()
155 configure_manifest_files(flask_app)
156
157 import_all_models()
158
159 with flask_app.app_context():
160 init_appbuilder(flask_app)
161
162 init_appbuilder_views(flask_app)
163 init_appbuilder_links(flask_app)
164 init_plugins(flask_app)
165 init_error_handlers(flask_app)
166 init_api_connexion(flask_app)
167 if conf.getboolean("webserver", "run_internal_api", fallback=False):
168 if not _ENABLE_AIP_44:
169 raise RuntimeError("The AIP_44 is not enabled so you cannot use it.")
170 init_api_internal(flask_app)
171 init_api_experimental(flask_app)
172 init_api_auth_provider(flask_app)
173 init_api_error_handlers(flask_app) # needs to be after all api inits to let them add their path first
174
175 get_auth_manager().init()
176
177 init_jinja_globals(flask_app)
178 init_xframe_protection(flask_app)
179 init_airflow_session_interface(flask_app)
180 init_check_user_active(flask_app)
181 return flask_app
182
183
184 def cached_app(config=None, testing=False):
185 """Return cached instance of Airflow WWW app."""
186 global app
187 if not app:
188 app = create_app(config=config, testing=testing)
189 return app
190
191
192 def purge_cached_app():
193 """Remove the cached version of the app in global state."""
194 global app
195 app = None
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/www/app.py b/airflow/www/app.py
--- a/airflow/www/app.py
+++ b/airflow/www/app.py
@@ -121,6 +121,10 @@
cookie_samesite_config = "Lax"
flask_app.config["SESSION_COOKIE_SAMESITE"] = cookie_samesite_config
+ # Above Flask 2.0.x, default value of SEND_FILE_MAX_AGE_DEFAULT changed 12 hours to None.
+ # for static file caching, it needs to set value explicitly.
+ flask_app.config["SEND_FILE_MAX_AGE_DEFAULT"] = timedelta(seconds=43200)
+
if config:
flask_app.config.from_mapping(config)
| {"golden_diff": "diff --git a/airflow/www/app.py b/airflow/www/app.py\n--- a/airflow/www/app.py\n+++ b/airflow/www/app.py\n@@ -121,6 +121,10 @@\n cookie_samesite_config = \"Lax\"\n flask_app.config[\"SESSION_COOKIE_SAMESITE\"] = cookie_samesite_config\n \n+ # Above Flask 2.0.x, default value of SEND_FILE_MAX_AGE_DEFAULT changed 12 hours to None.\n+ # for static file caching, it needs to set value explicitly.\n+ flask_app.config[\"SEND_FILE_MAX_AGE_DEFAULT\"] = timedelta(seconds=43200)\n+\n if config:\n flask_app.config.from_mapping(config)\n", "issue": "webserver static file caching is disabled above 2.3.0 version.\n### Apache Airflow version\n\nOther Airflow 2 version (please specify below)\n\n### If \"Other Airflow 2 version\" selected, which one?\n\n2.3.x ~ 2.9.x\n\n### What happened?\n\nRecently, I migrated airflow version 2.2.0 to 2.9.0 and python 3.8 to 3.12.\r\n\r\nand i experienced too much load time in airflow web dashboard.\r\nin airflow 2.2.0, airflow dashboard loaded almost instantly. but in 2.9.0, it tooks almost 10 seconds.\r\n\r\n## Airflow 2.2.0\r\n<img width=\"960\" alt=\"image\" src=\"https://github.com/apache/airflow/assets/37045096/1d648049-424c-44a1-a505-24fe213f43a7\">\r\n\r\n## Airflow 2.9.0\r\n<img width=\"960\" alt=\"image\" src=\"https://github.com/apache/airflow/assets/37045096/ecb58aca-93e9-483f-8e14-d0dc3ce3df6f\">\r\n\r\n\n\n### What you think should happen instead?\n\nI think there is no reason to make slow page loading time because of not using static file caching.\r\n\r\nAnd I figured out the root cause. the problem is airflow webserver's caching is disabled because of bumping up Flask version 1.x to 2.x.\r\n\r\nIn Flask, there is Configuration Option for static file's Expire time for cache-control.\r\nit is `SEND_FILE_MAX_AGE_DEFAULT`.\r\n\r\nIn Flask 1.1.x, the default value of `SEND_FILE_MAX_AGE_DEFAULT` is 12 hours.\r\nhttps://flask.palletsprojects.com/en/1.1.x/api/#flask.Flask.send_file_max_age_default\r\n\r\n\r\nAfter Flask 2.0.x, the default value of `SEND_FILE_MAX_AGE_DEFAULT` is changed to None.\r\nhttps://flask.palletsprojects.com/en/2.0.x/api/#flask.Flask.send_file_max_age_default\r\n\r\n\r\nafter Airflow 2.3.0, Flask version is upgraded to 2.0.x.\r\n\r\nWhen i added flask configuration in app.py\r\nstatic file caching works well.\r\n\r\n\r\nexample is here. https://github.com/VertexToEdge/airflow-docker-compose\r\n\r\n\n\n### How to reproduce\n\nI followed setup instruction in https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html\r\n1. Setup Airflow 2.9.0 with docker-compose\r\n``` bash\r\ncurl -LfO 'https://airflow.apache.org/docs/apache-airflow/2.9.0/docker-compose.yaml'\r\nAIRFLOW_UID=$(id -u) docker compose up\r\n```\r\n\r\n2. access airflow webserver with webbrowser. and refresh the page.\r\n\r\n\n\n### Operating System\n\nUbuntu 22.04.3 LTS\n\n### Versions of Apache Airflow Providers\n\n_No response_\n\n### Deployment\n\nDocker-Compose\n\n### Deployment details\n\n_No response_\n\n### Anything else?\n\nI am newbie in Airflow.\r\nI read airflow installation guide and configruation reference. but I'm not sure if there is a better way to do this without changing the code.\r\nIs there any way to configure static file caching? please just talk to me.\r\n\r\nthanks for reading. have a good day.\n\n### Are you willing to submit PR?\n\n- [X] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nimport warnings\nfrom datetime import timedelta\n\nfrom flask import Flask\nfrom flask_appbuilder import SQLA\nfrom flask_wtf.csrf import CSRFProtect\nfrom markupsafe import Markup\nfrom sqlalchemy.engine.url import make_url\n\nfrom airflow import settings\nfrom airflow.api_internal.internal_api_call import InternalApiConfig\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowConfigException, RemovedInAirflow3Warning\nfrom airflow.logging_config import configure_logging\nfrom airflow.models import import_all_models\nfrom airflow.settings import _ENABLE_AIP_44\nfrom airflow.utils.json import AirflowJsonProvider\nfrom airflow.www.extensions.init_appbuilder import init_appbuilder\nfrom airflow.www.extensions.init_appbuilder_links import init_appbuilder_links\nfrom airflow.www.extensions.init_auth_manager import get_auth_manager\nfrom airflow.www.extensions.init_cache import init_cache\nfrom airflow.www.extensions.init_dagbag import init_dagbag\nfrom airflow.www.extensions.init_jinja_globals import init_jinja_globals\nfrom airflow.www.extensions.init_manifest_files import configure_manifest_files\nfrom airflow.www.extensions.init_robots import init_robots\nfrom airflow.www.extensions.init_security import (\n init_api_experimental_auth,\n init_check_user_active,\n init_xframe_protection,\n)\nfrom airflow.www.extensions.init_session import init_airflow_session_interface\nfrom airflow.www.extensions.init_views import (\n init_api_auth_provider,\n init_api_connexion,\n init_api_error_handlers,\n init_api_experimental,\n init_api_internal,\n init_appbuilder_views,\n init_error_handlers,\n init_flash_views,\n init_plugins,\n)\nfrom airflow.www.extensions.init_wsgi_middlewares import init_wsgi_middleware\n\napp: Flask | None = None\n\n# Initializes at the module level, so plugins can access it.\n# See: /docs/plugins.rst\ncsrf = CSRFProtect()\n\n\ndef create_app(config=None, testing=False):\n \"\"\"Create a new instance of Airflow WWW app.\"\"\"\n flask_app = Flask(__name__)\n flask_app.secret_key = conf.get(\"webserver\", \"SECRET_KEY\")\n\n flask_app.config[\"PERMANENT_SESSION_LIFETIME\"] = timedelta(minutes=settings.get_session_lifetime_config())\n\n flask_app.config[\"MAX_CONTENT_LENGTH\"] = conf.getfloat(\"webserver\", \"allowed_payload_size\") * 1024 * 1024\n\n webserver_config = conf.get_mandatory_value(\"webserver\", \"config_file\")\n # Enable customizations in webserver_config.py to be applied via Flask.current_app.\n with flask_app.app_context():\n flask_app.config.from_pyfile(webserver_config, silent=True)\n\n flask_app.config[\"TESTING\"] = testing\n flask_app.config[\"SQLALCHEMY_DATABASE_URI\"] = conf.get(\"database\", \"SQL_ALCHEMY_CONN\")\n\n instance_name = conf.get(section=\"webserver\", key=\"instance_name\", fallback=\"Airflow\")\n require_confirmation_dag_change = conf.getboolean(\n section=\"webserver\", key=\"require_confirmation_dag_change\", fallback=False\n )\n instance_name_has_markup = conf.getboolean(\n section=\"webserver\", key=\"instance_name_has_markup\", fallback=False\n )\n if instance_name_has_markup:\n instance_name = Markup(instance_name).striptags()\n\n flask_app.config[\"APP_NAME\"] = instance_name\n flask_app.config[\"REQUIRE_CONFIRMATION_DAG_CHANGE\"] = require_confirmation_dag_change\n\n url = make_url(flask_app.config[\"SQLALCHEMY_DATABASE_URI\"])\n if url.drivername == \"sqlite\" and url.database and not url.database.startswith(\"/\"):\n raise AirflowConfigException(\n f'Cannot use relative path: `{conf.get(\"database\", \"SQL_ALCHEMY_CONN\")}` to connect to sqlite. '\n \"Please use absolute path such as `sqlite:////tmp/airflow.db`.\"\n )\n\n flask_app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n flask_app.config[\"SESSION_COOKIE_HTTPONLY\"] = True\n flask_app.config[\"SESSION_COOKIE_SECURE\"] = conf.getboolean(\"webserver\", \"COOKIE_SECURE\")\n\n cookie_samesite_config = conf.get(\"webserver\", \"COOKIE_SAMESITE\")\n if cookie_samesite_config == \"\":\n warnings.warn(\n \"Old deprecated value found for `cookie_samesite` option in `[webserver]` section. \"\n \"Using `Lax` instead. Change the value to `Lax` in airflow.cfg to remove this warning.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n cookie_samesite_config = \"Lax\"\n flask_app.config[\"SESSION_COOKIE_SAMESITE\"] = cookie_samesite_config\n\n if config:\n flask_app.config.from_mapping(config)\n\n if \"SQLALCHEMY_ENGINE_OPTIONS\" not in flask_app.config:\n flask_app.config[\"SQLALCHEMY_ENGINE_OPTIONS\"] = settings.prepare_engine_args()\n\n # Configure the JSON encoder used by `|tojson` filter from Flask\n flask_app.json_provider_class = AirflowJsonProvider\n flask_app.json = AirflowJsonProvider(flask_app)\n\n InternalApiConfig.force_database_direct_access()\n\n csrf.init_app(flask_app)\n\n init_wsgi_middleware(flask_app)\n\n db = SQLA()\n db.session = settings.Session\n db.init_app(flask_app)\n\n init_dagbag(flask_app)\n\n init_api_experimental_auth(flask_app)\n\n init_robots(flask_app)\n\n init_cache(flask_app)\n\n init_flash_views(flask_app)\n\n configure_logging()\n configure_manifest_files(flask_app)\n\n import_all_models()\n\n with flask_app.app_context():\n init_appbuilder(flask_app)\n\n init_appbuilder_views(flask_app)\n init_appbuilder_links(flask_app)\n init_plugins(flask_app)\n init_error_handlers(flask_app)\n init_api_connexion(flask_app)\n if conf.getboolean(\"webserver\", \"run_internal_api\", fallback=False):\n if not _ENABLE_AIP_44:\n raise RuntimeError(\"The AIP_44 is not enabled so you cannot use it.\")\n init_api_internal(flask_app)\n init_api_experimental(flask_app)\n init_api_auth_provider(flask_app)\n init_api_error_handlers(flask_app) # needs to be after all api inits to let them add their path first\n\n get_auth_manager().init()\n\n init_jinja_globals(flask_app)\n init_xframe_protection(flask_app)\n init_airflow_session_interface(flask_app)\n init_check_user_active(flask_app)\n return flask_app\n\n\ndef cached_app(config=None, testing=False):\n \"\"\"Return cached instance of Airflow WWW app.\"\"\"\n global app\n if not app:\n app = create_app(config=config, testing=testing)\n return app\n\n\ndef purge_cached_app():\n \"\"\"Remove the cached version of the app in global state.\"\"\"\n global app\n app = None\n", "path": "airflow/www/app.py"}], "after_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nimport warnings\nfrom datetime import timedelta\n\nfrom flask import Flask\nfrom flask_appbuilder import SQLA\nfrom flask_wtf.csrf import CSRFProtect\nfrom markupsafe import Markup\nfrom sqlalchemy.engine.url import make_url\n\nfrom airflow import settings\nfrom airflow.api_internal.internal_api_call import InternalApiConfig\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowConfigException, RemovedInAirflow3Warning\nfrom airflow.logging_config import configure_logging\nfrom airflow.models import import_all_models\nfrom airflow.settings import _ENABLE_AIP_44\nfrom airflow.utils.json import AirflowJsonProvider\nfrom airflow.www.extensions.init_appbuilder import init_appbuilder\nfrom airflow.www.extensions.init_appbuilder_links import init_appbuilder_links\nfrom airflow.www.extensions.init_auth_manager import get_auth_manager\nfrom airflow.www.extensions.init_cache import init_cache\nfrom airflow.www.extensions.init_dagbag import init_dagbag\nfrom airflow.www.extensions.init_jinja_globals import init_jinja_globals\nfrom airflow.www.extensions.init_manifest_files import configure_manifest_files\nfrom airflow.www.extensions.init_robots import init_robots\nfrom airflow.www.extensions.init_security import (\n init_api_experimental_auth,\n init_check_user_active,\n init_xframe_protection,\n)\nfrom airflow.www.extensions.init_session import init_airflow_session_interface\nfrom airflow.www.extensions.init_views import (\n init_api_auth_provider,\n init_api_connexion,\n init_api_error_handlers,\n init_api_experimental,\n init_api_internal,\n init_appbuilder_views,\n init_error_handlers,\n init_flash_views,\n init_plugins,\n)\nfrom airflow.www.extensions.init_wsgi_middlewares import init_wsgi_middleware\n\napp: Flask | None = None\n\n# Initializes at the module level, so plugins can access it.\n# See: /docs/plugins.rst\ncsrf = CSRFProtect()\n\n\ndef create_app(config=None, testing=False):\n \"\"\"Create a new instance of Airflow WWW app.\"\"\"\n flask_app = Flask(__name__)\n flask_app.secret_key = conf.get(\"webserver\", \"SECRET_KEY\")\n\n flask_app.config[\"PERMANENT_SESSION_LIFETIME\"] = timedelta(minutes=settings.get_session_lifetime_config())\n\n flask_app.config[\"MAX_CONTENT_LENGTH\"] = conf.getfloat(\"webserver\", \"allowed_payload_size\") * 1024 * 1024\n\n webserver_config = conf.get_mandatory_value(\"webserver\", \"config_file\")\n # Enable customizations in webserver_config.py to be applied via Flask.current_app.\n with flask_app.app_context():\n flask_app.config.from_pyfile(webserver_config, silent=True)\n\n flask_app.config[\"TESTING\"] = testing\n flask_app.config[\"SQLALCHEMY_DATABASE_URI\"] = conf.get(\"database\", \"SQL_ALCHEMY_CONN\")\n\n instance_name = conf.get(section=\"webserver\", key=\"instance_name\", fallback=\"Airflow\")\n require_confirmation_dag_change = conf.getboolean(\n section=\"webserver\", key=\"require_confirmation_dag_change\", fallback=False\n )\n instance_name_has_markup = conf.getboolean(\n section=\"webserver\", key=\"instance_name_has_markup\", fallback=False\n )\n if instance_name_has_markup:\n instance_name = Markup(instance_name).striptags()\n\n flask_app.config[\"APP_NAME\"] = instance_name\n flask_app.config[\"REQUIRE_CONFIRMATION_DAG_CHANGE\"] = require_confirmation_dag_change\n\n url = make_url(flask_app.config[\"SQLALCHEMY_DATABASE_URI\"])\n if url.drivername == \"sqlite\" and url.database and not url.database.startswith(\"/\"):\n raise AirflowConfigException(\n f'Cannot use relative path: `{conf.get(\"database\", \"SQL_ALCHEMY_CONN\")}` to connect to sqlite. '\n \"Please use absolute path such as `sqlite:////tmp/airflow.db`.\"\n )\n\n flask_app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n flask_app.config[\"SESSION_COOKIE_HTTPONLY\"] = True\n flask_app.config[\"SESSION_COOKIE_SECURE\"] = conf.getboolean(\"webserver\", \"COOKIE_SECURE\")\n\n cookie_samesite_config = conf.get(\"webserver\", \"COOKIE_SAMESITE\")\n if cookie_samesite_config == \"\":\n warnings.warn(\n \"Old deprecated value found for `cookie_samesite` option in `[webserver]` section. \"\n \"Using `Lax` instead. Change the value to `Lax` in airflow.cfg to remove this warning.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n cookie_samesite_config = \"Lax\"\n flask_app.config[\"SESSION_COOKIE_SAMESITE\"] = cookie_samesite_config\n\n # Above Flask 2.0.x, default value of SEND_FILE_MAX_AGE_DEFAULT changed 12 hours to None.\n # for static file caching, it needs to set value explicitly.\n flask_app.config[\"SEND_FILE_MAX_AGE_DEFAULT\"] = timedelta(seconds=43200)\n\n if config:\n flask_app.config.from_mapping(config)\n\n if \"SQLALCHEMY_ENGINE_OPTIONS\" not in flask_app.config:\n flask_app.config[\"SQLALCHEMY_ENGINE_OPTIONS\"] = settings.prepare_engine_args()\n\n # Configure the JSON encoder used by `|tojson` filter from Flask\n flask_app.json_provider_class = AirflowJsonProvider\n flask_app.json = AirflowJsonProvider(flask_app)\n\n InternalApiConfig.force_database_direct_access()\n\n csrf.init_app(flask_app)\n\n init_wsgi_middleware(flask_app)\n\n db = SQLA()\n db.session = settings.Session\n db.init_app(flask_app)\n\n init_dagbag(flask_app)\n\n init_api_experimental_auth(flask_app)\n\n init_robots(flask_app)\n\n init_cache(flask_app)\n\n init_flash_views(flask_app)\n\n configure_logging()\n configure_manifest_files(flask_app)\n\n import_all_models()\n\n with flask_app.app_context():\n init_appbuilder(flask_app)\n\n init_appbuilder_views(flask_app)\n init_appbuilder_links(flask_app)\n init_plugins(flask_app)\n init_error_handlers(flask_app)\n init_api_connexion(flask_app)\n if conf.getboolean(\"webserver\", \"run_internal_api\", fallback=False):\n if not _ENABLE_AIP_44:\n raise RuntimeError(\"The AIP_44 is not enabled so you cannot use it.\")\n init_api_internal(flask_app)\n init_api_experimental(flask_app)\n init_api_auth_provider(flask_app)\n init_api_error_handlers(flask_app) # needs to be after all api inits to let them add their path first\n\n get_auth_manager().init()\n\n init_jinja_globals(flask_app)\n init_xframe_protection(flask_app)\n init_airflow_session_interface(flask_app)\n init_check_user_active(flask_app)\n return flask_app\n\n\ndef cached_app(config=None, testing=False):\n \"\"\"Return cached instance of Airflow WWW app.\"\"\"\n global app\n if not app:\n app = create_app(config=config, testing=testing)\n return app\n\n\ndef purge_cached_app():\n \"\"\"Remove the cached version of the app in global state.\"\"\"\n global app\n app = None\n", "path": "airflow/www/app.py"}]} | 3,421 | 159 |
gh_patches_debug_31778 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-1326 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add MultiPlotItem to docs
I noticed it's missing in reviewing #1324. Also [MultiPlotWidget](https://pyqtgraph.readthedocs.io/en/latest/widgets/multiplotwidget.html) should link to it and indicate it wraps the item's methods. `MultiPlotItem.plot` could also use a docstring.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/widgets/MultiPlotWidget.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 MultiPlotWidget.py - Convenience class--GraphicsView widget displaying a MultiPlotItem
4 Copyright 2010 Luke Campagnola
5 Distributed under MIT/X11 license. See license.txt for more information.
6 """
7 from ..Qt import QtCore
8 from .GraphicsView import GraphicsView
9 from ..graphicsItems import MultiPlotItem as MultiPlotItem
10
11 __all__ = ['MultiPlotWidget']
12 class MultiPlotWidget(GraphicsView):
13 """Widget implementing a graphicsView with a single MultiPlotItem inside."""
14 def __init__(self, parent=None):
15 self.minPlotHeight = 50
16 self.mPlotItem = MultiPlotItem.MultiPlotItem()
17 GraphicsView.__init__(self, parent)
18 self.enableMouse(False)
19 self.setCentralItem(self.mPlotItem)
20 ## Explicitly wrap methods from mPlotItem
21 #for m in ['setData']:
22 #setattr(self, m, getattr(self.mPlotItem, m))
23 self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
24 self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
25
26 def __getattr__(self, attr): ## implicitly wrap methods from plotItem
27 if hasattr(self.mPlotItem, attr):
28 m = getattr(self.mPlotItem, attr)
29 if hasattr(m, '__call__'):
30 return m
31 raise AttributeError(attr)
32
33 def setMinimumPlotHeight(self, min):
34 """Set the minimum height for each sub-plot displayed.
35
36 If the total height of all plots is greater than the height of the
37 widget, then a scroll bar will appear to provide access to the entire
38 set of plots.
39
40 Added in version 0.9.9
41 """
42 self.minPlotHeight = min
43 self.resizeEvent(None)
44
45 def widgetGroupInterface(self):
46 return (None, MultiPlotWidget.saveState, MultiPlotWidget.restoreState)
47
48 def saveState(self):
49 return {}
50 #return self.plotItem.saveState()
51
52 def restoreState(self, state):
53 pass
54 #return self.plotItem.restoreState(state)
55
56 def close(self):
57 self.mPlotItem.close()
58 self.mPlotItem = None
59 self.setParent(None)
60 GraphicsView.close(self)
61
62 def setRange(self, *args, **kwds):
63 GraphicsView.setRange(self, *args, **kwds)
64 if self.centralWidget is not None:
65 r = self.range
66 minHeight = len(self.mPlotItem.plots) * self.minPlotHeight
67 if r.height() < minHeight:
68 r.setHeight(minHeight)
69 r.setWidth(r.width() - self.verticalScrollBar().width())
70 self.centralWidget.setGeometry(r)
71
72 def resizeEvent(self, ev):
73 if self.closed:
74 return
75 if self.autoPixelRange:
76 self.range = QtCore.QRectF(0, 0, self.size().width(), self.size().height())
77 MultiPlotWidget.setRange(self, self.range, padding=0, disableAutoPixel=False) ## we do this because some subclasses like to redefine setRange in an incompatible way.
78 self.updateMatrix()
79
```
Path: `pyqtgraph/graphicsItems/MultiPlotItem.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 MultiPlotItem.py - Graphics item used for displaying an array of PlotItems
4 Copyright 2010 Luke Campagnola
5 Distributed under MIT/X11 license. See license.txt for more information.
6 """
7 from numpy import ndarray
8 from . import GraphicsLayout
9 from ..metaarray import *
10
11
12 __all__ = ['MultiPlotItem']
13 class MultiPlotItem(GraphicsLayout.GraphicsLayout):
14 """
15 Automatically generates a grid of plots from a multi-dimensional array
16 """
17 def __init__(self, *args, **kwds):
18 GraphicsLayout.GraphicsLayout.__init__(self, *args, **kwds)
19 self.plots = []
20
21
22 def plot(self, data, **plotArgs):
23 #self.layout.clear()
24
25 if hasattr(data, 'implements') and data.implements('MetaArray'):
26 if data.ndim != 2:
27 raise Exception("MultiPlot currently only accepts 2D MetaArray.")
28 ic = data.infoCopy()
29 ax = 0
30 for i in [0, 1]:
31 if 'cols' in ic[i]:
32 ax = i
33 break
34 #print "Plotting using axis %d as columns (%d plots)" % (ax, data.shape[ax])
35 for i in range(data.shape[ax]):
36 pi = self.addPlot()
37 self.nextRow()
38 sl = [slice(None)] * 2
39 sl[ax] = i
40 pi.plot(data[tuple(sl)], **plotArgs)
41 #self.layout.addItem(pi, i, 0)
42 self.plots.append((pi, i, 0))
43 info = ic[ax]['cols'][i]
44 title = info.get('title', info.get('name', None))
45 units = info.get('units', None)
46 pi.setLabel('left', text=title, units=units)
47 info = ic[1-ax]
48 title = info.get('title', info.get('name', None))
49 units = info.get('units', None)
50 pi.setLabel('bottom', text=title, units=units)
51 else:
52 raise Exception("Data type %s not (yet?) supported for MultiPlot." % type(data))
53
54 def close(self):
55 for p in self.plots:
56 p[0].close()
57 self.plots = None
58 self.clear()
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyqtgraph/graphicsItems/MultiPlotItem.py b/pyqtgraph/graphicsItems/MultiPlotItem.py
--- a/pyqtgraph/graphicsItems/MultiPlotItem.py
+++ b/pyqtgraph/graphicsItems/MultiPlotItem.py
@@ -4,22 +4,33 @@
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more information.
"""
-from numpy import ndarray
from . import GraphicsLayout
from ..metaarray import *
-
__all__ = ['MultiPlotItem']
+
+
class MultiPlotItem(GraphicsLayout.GraphicsLayout):
"""
- Automatically generates a grid of plots from a multi-dimensional array
+ :class:`~pyqtgraph.GraphicsLayout` that automatically generates a grid of
+ plots from a MetaArray.
+
+ .. seealso:: :class:`~pyqtgraph.MultiPlotWidget`: Widget containing a MultiPlotItem
"""
+
def __init__(self, *args, **kwds):
GraphicsLayout.GraphicsLayout.__init__(self, *args, **kwds)
self.plots = []
-
def plot(self, data, **plotArgs):
+ """Plot the data from a MetaArray with each array column as a separate
+ :class:`~pyqtgraph.PlotItem`.
+
+ Axis labels are automatically extracted from the array info.
+
+ ``plotArgs`` are passed to :meth:`PlotItem.plot
+ <pyqtgraph.PlotItem.plot>`.
+ """
#self.layout.clear()
if hasattr(data, 'implements') and data.implements('MetaArray'):
diff --git a/pyqtgraph/widgets/MultiPlotWidget.py b/pyqtgraph/widgets/MultiPlotWidget.py
--- a/pyqtgraph/widgets/MultiPlotWidget.py
+++ b/pyqtgraph/widgets/MultiPlotWidget.py
@@ -10,7 +10,8 @@
__all__ = ['MultiPlotWidget']
class MultiPlotWidget(GraphicsView):
- """Widget implementing a graphicsView with a single MultiPlotItem inside."""
+ """Widget implementing a :class:`~pyqtgraph.GraphicsView` with a single
+ :class:`~pyqtgraph.MultiPlotItem` inside."""
def __init__(self, parent=None):
self.minPlotHeight = 50
self.mPlotItem = MultiPlotItem.MultiPlotItem()
| {"golden_diff": "diff --git a/pyqtgraph/graphicsItems/MultiPlotItem.py b/pyqtgraph/graphicsItems/MultiPlotItem.py\n--- a/pyqtgraph/graphicsItems/MultiPlotItem.py\n+++ b/pyqtgraph/graphicsItems/MultiPlotItem.py\n@@ -4,22 +4,33 @@\n Copyright 2010 Luke Campagnola\n Distributed under MIT/X11 license. See license.txt for more information.\n \"\"\"\n-from numpy import ndarray\n from . import GraphicsLayout\n from ..metaarray import *\n \n-\n __all__ = ['MultiPlotItem']\n+\n+\n class MultiPlotItem(GraphicsLayout.GraphicsLayout):\n \"\"\"\n- Automatically generates a grid of plots from a multi-dimensional array\n+ :class:`~pyqtgraph.GraphicsLayout` that automatically generates a grid of\n+ plots from a MetaArray.\n+\n+ .. seealso:: :class:`~pyqtgraph.MultiPlotWidget`: Widget containing a MultiPlotItem\n \"\"\"\n+\n def __init__(self, *args, **kwds):\n GraphicsLayout.GraphicsLayout.__init__(self, *args, **kwds)\n self.plots = []\n \n-\n def plot(self, data, **plotArgs):\n+ \"\"\"Plot the data from a MetaArray with each array column as a separate\n+ :class:`~pyqtgraph.PlotItem`.\n+\n+ Axis labels are automatically extracted from the array info.\n+\n+ ``plotArgs`` are passed to :meth:`PlotItem.plot\n+ <pyqtgraph.PlotItem.plot>`.\n+ \"\"\"\n #self.layout.clear()\n \n if hasattr(data, 'implements') and data.implements('MetaArray'):\ndiff --git a/pyqtgraph/widgets/MultiPlotWidget.py b/pyqtgraph/widgets/MultiPlotWidget.py\n--- a/pyqtgraph/widgets/MultiPlotWidget.py\n+++ b/pyqtgraph/widgets/MultiPlotWidget.py\n@@ -10,7 +10,8 @@\n \n __all__ = ['MultiPlotWidget']\n class MultiPlotWidget(GraphicsView):\n- \"\"\"Widget implementing a graphicsView with a single MultiPlotItem inside.\"\"\"\n+ \"\"\"Widget implementing a :class:`~pyqtgraph.GraphicsView` with a single\n+ :class:`~pyqtgraph.MultiPlotItem` inside.\"\"\"\n def __init__(self, parent=None):\n self.minPlotHeight = 50\n self.mPlotItem = MultiPlotItem.MultiPlotItem()\n", "issue": "Add MultiPlotItem to docs\nI noticed it's missing in reviewing #1324. Also [MultiPlotWidget](https://pyqtgraph.readthedocs.io/en/latest/widgets/multiplotwidget.html) should link to it and indicate it wraps the item's methods. `MultiPlotItem.plot` could also use a docstring.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nMultiPlotWidget.py - Convenience class--GraphicsView widget displaying a MultiPlotItem\nCopyright 2010 Luke Campagnola\nDistributed under MIT/X11 license. See license.txt for more information.\n\"\"\"\nfrom ..Qt import QtCore\nfrom .GraphicsView import GraphicsView\nfrom ..graphicsItems import MultiPlotItem as MultiPlotItem\n\n__all__ = ['MultiPlotWidget']\nclass MultiPlotWidget(GraphicsView):\n \"\"\"Widget implementing a graphicsView with a single MultiPlotItem inside.\"\"\"\n def __init__(self, parent=None):\n self.minPlotHeight = 50\n self.mPlotItem = MultiPlotItem.MultiPlotItem()\n GraphicsView.__init__(self, parent)\n self.enableMouse(False)\n self.setCentralItem(self.mPlotItem)\n ## Explicitly wrap methods from mPlotItem\n #for m in ['setData']:\n #setattr(self, m, getattr(self.mPlotItem, m))\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)\n \n def __getattr__(self, attr): ## implicitly wrap methods from plotItem\n if hasattr(self.mPlotItem, attr):\n m = getattr(self.mPlotItem, attr)\n if hasattr(m, '__call__'):\n return m\n raise AttributeError(attr)\n\n def setMinimumPlotHeight(self, min):\n \"\"\"Set the minimum height for each sub-plot displayed. \n \n If the total height of all plots is greater than the height of the \n widget, then a scroll bar will appear to provide access to the entire\n set of plots.\n \n Added in version 0.9.9\n \"\"\"\n self.minPlotHeight = min\n self.resizeEvent(None)\n\n def widgetGroupInterface(self):\n return (None, MultiPlotWidget.saveState, MultiPlotWidget.restoreState)\n\n def saveState(self):\n return {}\n #return self.plotItem.saveState()\n \n def restoreState(self, state):\n pass\n #return self.plotItem.restoreState(state)\n\n def close(self):\n self.mPlotItem.close()\n self.mPlotItem = None\n self.setParent(None)\n GraphicsView.close(self)\n\n def setRange(self, *args, **kwds):\n GraphicsView.setRange(self, *args, **kwds)\n if self.centralWidget is not None:\n r = self.range\n minHeight = len(self.mPlotItem.plots) * self.minPlotHeight\n if r.height() < minHeight:\n r.setHeight(minHeight)\n r.setWidth(r.width() - self.verticalScrollBar().width())\n self.centralWidget.setGeometry(r)\n\n def resizeEvent(self, ev):\n if self.closed:\n return\n if self.autoPixelRange:\n self.range = QtCore.QRectF(0, 0, self.size().width(), self.size().height())\n MultiPlotWidget.setRange(self, self.range, padding=0, disableAutoPixel=False) ## we do this because some subclasses like to redefine setRange in an incompatible way.\n self.updateMatrix()\n", "path": "pyqtgraph/widgets/MultiPlotWidget.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"\nMultiPlotItem.py - Graphics item used for displaying an array of PlotItems\nCopyright 2010 Luke Campagnola\nDistributed under MIT/X11 license. See license.txt for more information.\n\"\"\"\nfrom numpy import ndarray\nfrom . import GraphicsLayout\nfrom ..metaarray import *\n\n\n__all__ = ['MultiPlotItem']\nclass MultiPlotItem(GraphicsLayout.GraphicsLayout):\n \"\"\"\n Automatically generates a grid of plots from a multi-dimensional array\n \"\"\"\n def __init__(self, *args, **kwds):\n GraphicsLayout.GraphicsLayout.__init__(self, *args, **kwds)\n self.plots = []\n\n\n def plot(self, data, **plotArgs):\n #self.layout.clear()\n\n if hasattr(data, 'implements') and data.implements('MetaArray'):\n if data.ndim != 2:\n raise Exception(\"MultiPlot currently only accepts 2D MetaArray.\")\n ic = data.infoCopy()\n ax = 0\n for i in [0, 1]:\n if 'cols' in ic[i]:\n ax = i\n break\n #print \"Plotting using axis %d as columns (%d plots)\" % (ax, data.shape[ax])\n for i in range(data.shape[ax]):\n pi = self.addPlot()\n self.nextRow()\n sl = [slice(None)] * 2\n sl[ax] = i\n pi.plot(data[tuple(sl)], **plotArgs)\n #self.layout.addItem(pi, i, 0)\n self.plots.append((pi, i, 0))\n info = ic[ax]['cols'][i]\n title = info.get('title', info.get('name', None))\n units = info.get('units', None)\n pi.setLabel('left', text=title, units=units)\n info = ic[1-ax]\n title = info.get('title', info.get('name', None))\n units = info.get('units', None)\n pi.setLabel('bottom', text=title, units=units)\n else:\n raise Exception(\"Data type %s not (yet?) supported for MultiPlot.\" % type(data))\n\n def close(self):\n for p in self.plots:\n p[0].close()\n self.plots = None\n self.clear()\n", "path": "pyqtgraph/graphicsItems/MultiPlotItem.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nMultiPlotWidget.py - Convenience class--GraphicsView widget displaying a MultiPlotItem\nCopyright 2010 Luke Campagnola\nDistributed under MIT/X11 license. See license.txt for more information.\n\"\"\"\nfrom ..Qt import QtCore\nfrom .GraphicsView import GraphicsView\nfrom ..graphicsItems import MultiPlotItem as MultiPlotItem\n\n__all__ = ['MultiPlotWidget']\nclass MultiPlotWidget(GraphicsView):\n \"\"\"Widget implementing a :class:`~pyqtgraph.GraphicsView` with a single\n :class:`~pyqtgraph.MultiPlotItem` inside.\"\"\"\n def __init__(self, parent=None):\n self.minPlotHeight = 50\n self.mPlotItem = MultiPlotItem.MultiPlotItem()\n GraphicsView.__init__(self, parent)\n self.enableMouse(False)\n self.setCentralItem(self.mPlotItem)\n ## Explicitly wrap methods from mPlotItem\n #for m in ['setData']:\n #setattr(self, m, getattr(self.mPlotItem, m))\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)\n \n def __getattr__(self, attr): ## implicitly wrap methods from plotItem\n if hasattr(self.mPlotItem, attr):\n m = getattr(self.mPlotItem, attr)\n if hasattr(m, '__call__'):\n return m\n raise AttributeError(attr)\n\n def setMinimumPlotHeight(self, min):\n \"\"\"Set the minimum height for each sub-plot displayed. \n \n If the total height of all plots is greater than the height of the \n widget, then a scroll bar will appear to provide access to the entire\n set of plots.\n \n Added in version 0.9.9\n \"\"\"\n self.minPlotHeight = min\n self.resizeEvent(None)\n\n def widgetGroupInterface(self):\n return (None, MultiPlotWidget.saveState, MultiPlotWidget.restoreState)\n\n def saveState(self):\n return {}\n #return self.plotItem.saveState()\n \n def restoreState(self, state):\n pass\n #return self.plotItem.restoreState(state)\n\n def close(self):\n self.mPlotItem.close()\n self.mPlotItem = None\n self.setParent(None)\n GraphicsView.close(self)\n\n def setRange(self, *args, **kwds):\n GraphicsView.setRange(self, *args, **kwds)\n if self.centralWidget is not None:\n r = self.range\n minHeight = len(self.mPlotItem.plots) * self.minPlotHeight\n if r.height() < minHeight:\n r.setHeight(minHeight)\n r.setWidth(r.width() - self.verticalScrollBar().width())\n self.centralWidget.setGeometry(r)\n\n def resizeEvent(self, ev):\n if self.closed:\n return\n if self.autoPixelRange:\n self.range = QtCore.QRectF(0, 0, self.size().width(), self.size().height())\n MultiPlotWidget.setRange(self, self.range, padding=0, disableAutoPixel=False) ## we do this because some subclasses like to redefine setRange in an incompatible way.\n self.updateMatrix()\n", "path": "pyqtgraph/widgets/MultiPlotWidget.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"\nMultiPlotItem.py - Graphics item used for displaying an array of PlotItems\nCopyright 2010 Luke Campagnola\nDistributed under MIT/X11 license. See license.txt for more information.\n\"\"\"\nfrom . import GraphicsLayout\nfrom ..metaarray import *\n\n__all__ = ['MultiPlotItem']\n\n\nclass MultiPlotItem(GraphicsLayout.GraphicsLayout):\n \"\"\"\n :class:`~pyqtgraph.GraphicsLayout` that automatically generates a grid of\n plots from a MetaArray.\n\n .. seealso:: :class:`~pyqtgraph.MultiPlotWidget`: Widget containing a MultiPlotItem\n \"\"\"\n\n def __init__(self, *args, **kwds):\n GraphicsLayout.GraphicsLayout.__init__(self, *args, **kwds)\n self.plots = []\n\n def plot(self, data, **plotArgs):\n \"\"\"Plot the data from a MetaArray with each array column as a separate\n :class:`~pyqtgraph.PlotItem`.\n\n Axis labels are automatically extracted from the array info.\n\n ``plotArgs`` are passed to :meth:`PlotItem.plot\n <pyqtgraph.PlotItem.plot>`.\n \"\"\"\n #self.layout.clear()\n\n if hasattr(data, 'implements') and data.implements('MetaArray'):\n if data.ndim != 2:\n raise Exception(\"MultiPlot currently only accepts 2D MetaArray.\")\n ic = data.infoCopy()\n ax = 0\n for i in [0, 1]:\n if 'cols' in ic[i]:\n ax = i\n break\n #print \"Plotting using axis %d as columns (%d plots)\" % (ax, data.shape[ax])\n for i in range(data.shape[ax]):\n pi = self.addPlot()\n self.nextRow()\n sl = [slice(None)] * 2\n sl[ax] = i\n pi.plot(data[tuple(sl)], **plotArgs)\n #self.layout.addItem(pi, i, 0)\n self.plots.append((pi, i, 0))\n info = ic[ax]['cols'][i]\n title = info.get('title', info.get('name', None))\n units = info.get('units', None)\n pi.setLabel('left', text=title, units=units)\n info = ic[1-ax]\n title = info.get('title', info.get('name', None))\n units = info.get('units', None)\n pi.setLabel('bottom', text=title, units=units)\n else:\n raise Exception(\"Data type %s not (yet?) supported for MultiPlot.\" % type(data))\n\n def close(self):\n for p in self.plots:\n p[0].close()\n self.plots = None\n self.clear()\n", "path": "pyqtgraph/graphicsItems/MultiPlotItem.py"}]} | 1,776 | 511 |
gh_patches_debug_33409 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4614 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False positive for tests in bicep: CKV_AZURE_4, CKV_AZURE_5, CKV_AZURE_7, CKV_AZURE_8
**Describe the issue**
false positive for tests CKV_AZURE_4, CKV_AZURE_5, CKV_AZURE_7, CKV_AZURE_8
**Examples**
Here is the bicep code
Code made shorter.
```
resource aks 'Microsoft.ContainerService/managedClusters@2022-11-01' = {
properties: {
enableRBAC: true
}
addonProfiles: {
omsAgent: {
enabled: true
config: {
logAnalyticsWorkspaceResourceId: logAnalyticsWorkspaceId
}
}
azurePolicy: {
enabled: true
config: {
version: 'v2'
}
}
kubeDashboard: {
enabled: false
}
}
networkProfile: {
networkPlugin: 'azure'
networkPolicy: 'azure'
}
}
}
```
**Version (please complete the following information):**
- checkov v2.1.244
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/arm/checks/resource/AKSLoggingEnabled.py`
Content:
```
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.arm.base_resource_check import BaseResourceCheck
3
4
5 class AKSLoggingEnabled(BaseResourceCheck):
6 def __init__(self):
7 # apiVersion 2017-08-03 = Fail - No addonProfiles option to configure
8 name = "Ensure AKS logging to Azure Monitoring is Configured"
9 id = "CKV_AZURE_4"
10 supported_resources = ['Microsoft.ContainerService/managedClusters']
11 categories = [CheckCategories.KUBERNETES]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def scan_resource_conf(self, conf):
15 if conf.get("apiVersion"):
16 if conf["apiVersion"] == "2017-08-31":
17 # No addonProfiles option to configure
18 return CheckResult.FAILED
19
20 properties = conf.get("properties")
21 if isinstance(properties, dict):
22 addon_profiles = properties.get("addonProfiles")
23 if isinstance(addon_profiles, dict):
24 omsagent = addon_profiles.get("omsagent")
25 if isinstance(omsagent, dict) and omsagent.get("enabled"):
26 return CheckResult.PASSED
27
28 return CheckResult.FAILED
29
30
31 check = AKSLoggingEnabled()
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/arm/checks/resource/AKSLoggingEnabled.py b/checkov/arm/checks/resource/AKSLoggingEnabled.py
--- a/checkov/arm/checks/resource/AKSLoggingEnabled.py
+++ b/checkov/arm/checks/resource/AKSLoggingEnabled.py
@@ -1,18 +1,22 @@
+from __future__ import annotations
+
+from typing import Any
+
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.arm.base_resource_check import BaseResourceCheck
class AKSLoggingEnabled(BaseResourceCheck):
- def __init__(self):
+ def __init__(self) -> None:
# apiVersion 2017-08-03 = Fail - No addonProfiles option to configure
name = "Ensure AKS logging to Azure Monitoring is Configured"
id = "CKV_AZURE_4"
- supported_resources = ['Microsoft.ContainerService/managedClusters']
- categories = [CheckCategories.KUBERNETES]
+ supported_resources = ("Microsoft.ContainerService/managedClusters",)
+ categories = (CheckCategories.KUBERNETES,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def scan_resource_conf(self, conf):
- if conf.get("apiVersion"):
+ def scan_resource_conf(self, conf: dict[str, Any]) -> CheckResult:
+ if "apiVersion" in conf:
if conf["apiVersion"] == "2017-08-31":
# No addonProfiles option to configure
return CheckResult.FAILED
@@ -22,6 +26,10 @@
addon_profiles = properties.get("addonProfiles")
if isinstance(addon_profiles, dict):
omsagent = addon_profiles.get("omsagent")
+ if not omsagent:
+ # it can be written in lowercase or camelCase
+ omsagent = addon_profiles.get("omsAgent")
+
if isinstance(omsagent, dict) and omsagent.get("enabled"):
return CheckResult.PASSED
| {"golden_diff": "diff --git a/checkov/arm/checks/resource/AKSLoggingEnabled.py b/checkov/arm/checks/resource/AKSLoggingEnabled.py\n--- a/checkov/arm/checks/resource/AKSLoggingEnabled.py\n+++ b/checkov/arm/checks/resource/AKSLoggingEnabled.py\n@@ -1,18 +1,22 @@\n+from __future__ import annotations\n+\n+from typing import Any\n+\n from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.arm.base_resource_check import BaseResourceCheck\n \n \n class AKSLoggingEnabled(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n # apiVersion 2017-08-03 = Fail - No addonProfiles option to configure\n name = \"Ensure AKS logging to Azure Monitoring is Configured\"\n id = \"CKV_AZURE_4\"\n- supported_resources = ['Microsoft.ContainerService/managedClusters']\n- categories = [CheckCategories.KUBERNETES]\n+ supported_resources = (\"Microsoft.ContainerService/managedClusters\",)\n+ categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n- if conf.get(\"apiVersion\"):\n+ def scan_resource_conf(self, conf: dict[str, Any]) -> CheckResult:\n+ if \"apiVersion\" in conf:\n if conf[\"apiVersion\"] == \"2017-08-31\":\n # No addonProfiles option to configure\n return CheckResult.FAILED\n@@ -22,6 +26,10 @@\n addon_profiles = properties.get(\"addonProfiles\")\n if isinstance(addon_profiles, dict):\n omsagent = addon_profiles.get(\"omsagent\")\n+ if not omsagent:\n+ # it can be written in lowercase or camelCase\n+ omsagent = addon_profiles.get(\"omsAgent\")\n+\n if isinstance(omsagent, dict) and omsagent.get(\"enabled\"):\n return CheckResult.PASSED\n", "issue": "False positive for tests in bicep: CKV_AZURE_4, CKV_AZURE_5, CKV_AZURE_7, CKV_AZURE_8\n**Describe the issue**\r\nfalse positive for tests CKV_AZURE_4, CKV_AZURE_5, CKV_AZURE_7, CKV_AZURE_8\r\n\r\n\r\n**Examples**\r\nHere is the bicep code\r\n Code made shorter.\r\n```\r\nresource aks 'Microsoft.ContainerService/managedClusters@2022-11-01' = {\r\n properties: {\r\n enableRBAC: true\r\n }\r\n addonProfiles: {\r\n omsAgent: {\r\n enabled: true\r\n config: {\r\n logAnalyticsWorkspaceResourceId: logAnalyticsWorkspaceId\r\n }\r\n }\r\n azurePolicy: {\r\n enabled: true\r\n config: {\r\n version: 'v2'\r\n }\r\n }\r\n kubeDashboard: {\r\n enabled: false\r\n }\r\n }\r\n networkProfile: {\r\n networkPlugin: 'azure'\r\n networkPolicy: 'azure'\r\n }\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - checkov v2.1.244\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.arm.base_resource_check import BaseResourceCheck\n\n\nclass AKSLoggingEnabled(BaseResourceCheck):\n def __init__(self):\n # apiVersion 2017-08-03 = Fail - No addonProfiles option to configure\n name = \"Ensure AKS logging to Azure Monitoring is Configured\"\n id = \"CKV_AZURE_4\"\n supported_resources = ['Microsoft.ContainerService/managedClusters']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if conf.get(\"apiVersion\"):\n if conf[\"apiVersion\"] == \"2017-08-31\":\n # No addonProfiles option to configure\n return CheckResult.FAILED\n\n properties = conf.get(\"properties\")\n if isinstance(properties, dict):\n addon_profiles = properties.get(\"addonProfiles\")\n if isinstance(addon_profiles, dict):\n omsagent = addon_profiles.get(\"omsagent\")\n if isinstance(omsagent, dict) and omsagent.get(\"enabled\"):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = AKSLoggingEnabled()\n", "path": "checkov/arm/checks/resource/AKSLoggingEnabled.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.arm.base_resource_check import BaseResourceCheck\n\n\nclass AKSLoggingEnabled(BaseResourceCheck):\n def __init__(self) -> None:\n # apiVersion 2017-08-03 = Fail - No addonProfiles option to configure\n name = \"Ensure AKS logging to Azure Monitoring is Configured\"\n id = \"CKV_AZURE_4\"\n supported_resources = (\"Microsoft.ContainerService/managedClusters\",)\n categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, Any]) -> CheckResult:\n if \"apiVersion\" in conf:\n if conf[\"apiVersion\"] == \"2017-08-31\":\n # No addonProfiles option to configure\n return CheckResult.FAILED\n\n properties = conf.get(\"properties\")\n if isinstance(properties, dict):\n addon_profiles = properties.get(\"addonProfiles\")\n if isinstance(addon_profiles, dict):\n omsagent = addon_profiles.get(\"omsagent\")\n if not omsagent:\n # it can be written in lowercase or camelCase\n omsagent = addon_profiles.get(\"omsAgent\")\n\n if isinstance(omsagent, dict) and omsagent.get(\"enabled\"):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = AKSLoggingEnabled()\n", "path": "checkov/arm/checks/resource/AKSLoggingEnabled.py"}]} | 865 | 454 |
gh_patches_debug_18581 | rasdani/github-patches | git_diff | scikit-image__scikit-image-219 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
message about unit tests not available
If scikits-image is installed without nose, a message is printed about unit tests not being available is printed on import but it is a little bit distracting to the casual end-user.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/__init__.py`
Content:
```
1 """Image Processing SciKit (Toolbox for SciPy)
2
3 ``scikits-image`` (a.k.a. ``skimage``) is a collection of algorithms for image
4 processing and computer vision.
5
6 The main package of ``skimage`` only provides a few utilities for converting
7 between image data types; for most features, you need to import one of the
8 following subpackages:
9
10 Subpackages
11 -----------
12 color
13 Color space conversion.
14 data
15 Test images and example data.
16 draw
17 Image drawing primitives (lines, text, etc.).
18 exposure
19 Image intensity adjustment (e.g., histogram equalization).
20 feature
21 Feature detection (e.g. texture analysis, corners, etc.).
22 filter
23 Sharpening, edge finding, denoising, etc.
24 graph
25 Graph-theoretic operations, e.g. dynamic programming (shortest paths).
26 io
27 Reading, saving, and displaying images and video.
28 measure
29 Measurement of image properties, e.g., similarity and contours.
30 morphology
31 Morphological operations, e.g. opening or skeletonization.
32 segmentation
33 Splitting an image into self-similar regions.
34 transform
35 Geometric and other transforms, e.g. rotation or the Radon transform.
36 util
37 Generic utilities.
38
39 Utility Functions
40 -----------------
41 get_log
42 Returns the ``skimage`` log. Use this to print debug output.
43 img_as_float
44 Convert an image to floating point format, with values in [0, 1].
45 img_as_uint
46 Convert an image to unsigned integer format, with values in [0, 65535].
47 img_as_int
48 Convert an image to signed integer format, with values in [-32768, 32767].
49 img_as_ubyte
50 Convert an image to unsigned byte format, with values in [0, 255].
51
52 """
53
54 import os.path as _osp
55
56 pkg_dir = _osp.abspath(_osp.dirname(__file__))
57 data_dir = _osp.join(pkg_dir, 'data')
58
59 try:
60 from .version import version as __version__
61 except ImportError:
62 __version__ = "unbuilt-dev"
63
64
65 def _setup_test(verbose=False):
66 import functools
67
68 args = ['', '--exe', '-w', pkg_dir]
69 if verbose:
70 args.extend(['-v', '-s'])
71
72 try:
73 import nose as _nose
74 except ImportError:
75 print("Could not load nose. Unit tests not available.")
76 return None
77 else:
78 f = functools.partial(_nose.run, 'skimage', argv=args)
79 f.__doc__ = 'Invoke the skimage test suite.'
80 return f
81
82 test = _setup_test()
83 if test is None:
84 try:
85 del test
86 except NameError:
87 pass
88
89 test_verbose = _setup_test(verbose=True)
90 if test_verbose is None:
91 try:
92 del test
93 except NameError:
94 pass
95
96
97 def get_log(name=None):
98 """Return a console logger.
99
100 Output may be sent to the logger using the `debug`, `info`, `warning`,
101 `error` and `critical` methods.
102
103 Parameters
104 ----------
105 name : str
106 Name of the log.
107
108 References
109 ----------
110 .. [1] Logging facility for Python,
111 http://docs.python.org/library/logging.html
112
113 """
114 import logging
115
116 if name is None:
117 name = 'skimage'
118 else:
119 name = 'skimage.' + name
120
121 log = logging.getLogger(name)
122 return log
123
124
125 def _setup_log():
126 """Configure root logger.
127
128 """
129 import logging
130 import sys
131
132 log = logging.getLogger()
133
134 try:
135 handler = logging.StreamHandler(stream=sys.stdout)
136 except TypeError:
137 handler = logging.StreamHandler(strm=sys.stdout)
138
139 formatter = logging.Formatter(
140 '%(name)s: %(levelname)s: %(message)s'
141 )
142 handler.setFormatter(formatter)
143
144 log.addHandler(handler)
145 log.setLevel(logging.WARNING)
146
147 _setup_log()
148
149 from .util.dtype import *
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/__init__.py b/skimage/__init__.py
--- a/skimage/__init__.py
+++ b/skimage/__init__.py
@@ -72,26 +72,20 @@
try:
import nose as _nose
except ImportError:
- print("Could not load nose. Unit tests not available.")
- return None
+ def broken_test_func():
+ """This would invoke the skimage test suite, but nose couldn't be
+ imported so the test suite can not run.
+ """
+ raise ImportError("Could not load nose. Unit tests not available.")
+ return broken_test_func
else:
f = functools.partial(_nose.run, 'skimage', argv=args)
f.__doc__ = 'Invoke the skimage test suite.'
return f
-test = _setup_test()
-if test is None:
- try:
- del test
- except NameError:
- pass
+test = _setup_test()
test_verbose = _setup_test(verbose=True)
-if test_verbose is None:
- try:
- del test
- except NameError:
- pass
def get_log(name=None):
| {"golden_diff": "diff --git a/skimage/__init__.py b/skimage/__init__.py\n--- a/skimage/__init__.py\n+++ b/skimage/__init__.py\n@@ -72,26 +72,20 @@\n try:\n import nose as _nose\n except ImportError:\n- print(\"Could not load nose. Unit tests not available.\")\n- return None\n+ def broken_test_func():\n+ \"\"\"This would invoke the skimage test suite, but nose couldn't be\n+ imported so the test suite can not run.\n+ \"\"\"\n+ raise ImportError(\"Could not load nose. Unit tests not available.\")\n+ return broken_test_func\n else:\n f = functools.partial(_nose.run, 'skimage', argv=args)\n f.__doc__ = 'Invoke the skimage test suite.'\n return f\n \n-test = _setup_test()\n-if test is None:\n- try:\n- del test\n- except NameError:\n- pass\n \n+test = _setup_test()\n test_verbose = _setup_test(verbose=True)\n-if test_verbose is None:\n- try:\n- del test\n- except NameError:\n- pass\n \n \n def get_log(name=None):\n", "issue": "message about unit tests not available\nIf scikits-image is installed without nose, a message is printed about unit tests not being available is printed on import but it is a little bit distracting to the casual end-user.\n\n", "before_files": [{"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikits-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Image drawing primitives (lines, text, etc.).\nexposure\n Image intensity adjustment (e.g., histogram equalization).\nfeature\n Feature detection (e.g. texture analysis, corners, etc.).\nfilter\n Sharpening, edge finding, denoising, etc.\ngraph\n Graph-theoretic operations, e.g. dynamic programming (shortest paths).\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g. opening or skeletonization.\nsegmentation\n Splitting an image into self-similar regions.\ntransform\n Geometric and other transforms, e.g. rotation or the Radon transform.\nutil\n Generic utilities.\n\nUtility Functions\n-----------------\nget_log\n Returns the ``skimage`` log. Use this to print debug output.\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as _osp\n\npkg_dir = _osp.abspath(_osp.dirname(__file__))\ndata_dir = _osp.join(pkg_dir, 'data')\n\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\n\n\ndef _setup_test(verbose=False):\n import functools\n\n args = ['', '--exe', '-w', pkg_dir]\n if verbose:\n args.extend(['-v', '-s'])\n\n try:\n import nose as _nose\n except ImportError:\n print(\"Could not load nose. Unit tests not available.\")\n return None\n else:\n f = functools.partial(_nose.run, 'skimage', argv=args)\n f.__doc__ = 'Invoke the skimage test suite.'\n return f\n\ntest = _setup_test()\nif test is None:\n try:\n del test\n except NameError:\n pass\n\ntest_verbose = _setup_test(verbose=True)\nif test_verbose is None:\n try:\n del test\n except NameError:\n pass\n\n\ndef get_log(name=None):\n \"\"\"Return a console logger.\n\n Output may be sent to the logger using the `debug`, `info`, `warning`,\n `error` and `critical` methods.\n\n Parameters\n ----------\n name : str\n Name of the log.\n\n References\n ----------\n .. [1] Logging facility for Python,\n http://docs.python.org/library/logging.html\n\n \"\"\"\n import logging\n\n if name is None:\n name = 'skimage'\n else:\n name = 'skimage.' + name\n\n log = logging.getLogger(name)\n return log\n\n\ndef _setup_log():\n \"\"\"Configure root logger.\n\n \"\"\"\n import logging\n import sys\n\n log = logging.getLogger()\n\n try:\n handler = logging.StreamHandler(stream=sys.stdout)\n except TypeError:\n handler = logging.StreamHandler(strm=sys.stdout)\n\n formatter = logging.Formatter(\n '%(name)s: %(levelname)s: %(message)s'\n )\n handler.setFormatter(formatter)\n\n log.addHandler(handler)\n log.setLevel(logging.WARNING)\n\n_setup_log()\n\nfrom .util.dtype import *\n", "path": "skimage/__init__.py"}], "after_files": [{"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikits-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Image drawing primitives (lines, text, etc.).\nexposure\n Image intensity adjustment (e.g., histogram equalization).\nfeature\n Feature detection (e.g. texture analysis, corners, etc.).\nfilter\n Sharpening, edge finding, denoising, etc.\ngraph\n Graph-theoretic operations, e.g. dynamic programming (shortest paths).\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g. opening or skeletonization.\nsegmentation\n Splitting an image into self-similar regions.\ntransform\n Geometric and other transforms, e.g. rotation or the Radon transform.\nutil\n Generic utilities.\n\nUtility Functions\n-----------------\nget_log\n Returns the ``skimage`` log. Use this to print debug output.\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as _osp\n\npkg_dir = _osp.abspath(_osp.dirname(__file__))\ndata_dir = _osp.join(pkg_dir, 'data')\n\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\n\n\ndef _setup_test(verbose=False):\n import functools\n\n args = ['', '--exe', '-w', pkg_dir]\n if verbose:\n args.extend(['-v', '-s'])\n\n try:\n import nose as _nose\n except ImportError:\n def broken_test_func():\n \"\"\"This would invoke the skimage test suite, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Unit tests not available.\")\n return broken_test_func\n else:\n f = functools.partial(_nose.run, 'skimage', argv=args)\n f.__doc__ = 'Invoke the skimage test suite.'\n return f\n\n\ntest = _setup_test()\ntest_verbose = _setup_test(verbose=True)\n\n\ndef get_log(name=None):\n \"\"\"Return a console logger.\n\n Output may be sent to the logger using the `debug`, `info`, `warning`,\n `error` and `critical` methods.\n\n Parameters\n ----------\n name : str\n Name of the log.\n\n References\n ----------\n .. [1] Logging facility for Python,\n http://docs.python.org/library/logging.html\n\n \"\"\"\n import logging\n\n if name is None:\n name = 'skimage'\n else:\n name = 'skimage.' + name\n\n log = logging.getLogger(name)\n return log\n\n\ndef _setup_log():\n \"\"\"Configure root logger.\n\n \"\"\"\n import logging\n import sys\n\n log = logging.getLogger()\n\n try:\n handler = logging.StreamHandler(stream=sys.stdout)\n except TypeError:\n handler = logging.StreamHandler(strm=sys.stdout)\n\n formatter = logging.Formatter(\n '%(name)s: %(levelname)s: %(message)s'\n )\n handler.setFormatter(formatter)\n\n log.addHandler(handler)\n log.setLevel(logging.WARNING)\n\n_setup_log()\n\nfrom .util.dtype import *\n", "path": "skimage/__init__.py"}]} | 1,516 | 265 |
gh_patches_debug_18589 | rasdani/github-patches | git_diff | ethereum__web3.py-1763 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade Parity version in test infrastructure
### What was wrong?
Our Parity tests run on version 2.3.5. According to a recent OpenEthereum [blog post](https://medium.com/openethereum/vision-for-openethereum-ex-parity-client-eb7b11f6eef8), latest stable recommendation is 2.5.13.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/middleware/validation.py`
Content:
```
1 from typing import (
2 TYPE_CHECKING,
3 Any,
4 Callable,
5 )
6
7 from eth_utils.curried import (
8 apply_formatter_at_index,
9 apply_formatter_if,
10 apply_formatters_to_dict,
11 is_null,
12 )
13 from eth_utils.toolz import (
14 complement,
15 compose,
16 curry,
17 dissoc,
18 )
19 from hexbytes import (
20 HexBytes,
21 )
22
23 from web3._utils.rpc_abi import (
24 RPC,
25 )
26 from web3.exceptions import (
27 ExtraDataLengthError,
28 ValidationError,
29 )
30 from web3.middleware.formatting import (
31 construct_web3_formatting_middleware,
32 )
33 from web3.types import (
34 FormattersDict,
35 TxParams,
36 )
37
38 if TYPE_CHECKING:
39 from web3 import Web3 # noqa: F401
40
41 MAX_EXTRADATA_LENGTH = 32
42
43 is_not_null = complement(is_null)
44
45
46 @curry
47 def validate_chain_id(web3: "Web3", chain_id: int) -> int:
48 if int(chain_id) == web3.eth.chainId:
49 return chain_id
50 else:
51 raise ValidationError(
52 "The transaction declared chain ID %r, "
53 "but the connected node is on %r" % (
54 chain_id,
55 web3.eth.chainId,
56 )
57 )
58
59
60 def check_extradata_length(val: Any) -> Any:
61 if not isinstance(val, (str, int, bytes)):
62 return val
63 result = HexBytes(val)
64 if len(result) > MAX_EXTRADATA_LENGTH:
65 raise ExtraDataLengthError(
66 "The field extraData is %d bytes, but should be %d. "
67 "It is quite likely that you are connected to a POA chain. "
68 "Refer to "
69 "http://web3py.readthedocs.io/en/stable/middleware.html#geth-style-proof-of-authority "
70 "for more details. The full extraData is: %r" % (
71 len(result), MAX_EXTRADATA_LENGTH, result
72 )
73 )
74 return val
75
76
77 def transaction_normalizer(transaction: TxParams) -> TxParams:
78 return dissoc(transaction, 'chainId')
79
80
81 def transaction_param_validator(web3: "Web3") -> Callable[..., Any]:
82 transactions_params_validators = {
83 "chainId": apply_formatter_if(
84 # Bypass `validate_chain_id` if chainId can't be determined
85 lambda _: is_not_null(web3.eth.chainId),
86 validate_chain_id(web3),
87 ),
88 }
89 return apply_formatter_at_index(
90 apply_formatters_to_dict(transactions_params_validators),
91 0
92 )
93
94
95 BLOCK_VALIDATORS = {
96 'extraData': check_extradata_length,
97 }
98
99
100 block_validator = apply_formatter_if(
101 is_not_null,
102 apply_formatters_to_dict(BLOCK_VALIDATORS)
103 )
104
105
106 @curry
107 def chain_id_validator(web3: "Web3") -> Callable[..., Any]:
108 return compose(
109 apply_formatter_at_index(transaction_normalizer, 0),
110 transaction_param_validator(web3)
111 )
112
113
114 def build_validators_with_web3(w3: "Web3") -> FormattersDict:
115 return dict(
116 request_formatters={
117 RPC.eth_sendTransaction: chain_id_validator(w3),
118 RPC.eth_estimateGas: chain_id_validator(w3),
119 RPC.eth_call: chain_id_validator(w3),
120 },
121 result_formatters={
122 RPC.eth_getBlockByHash: block_validator,
123 RPC.eth_getBlockByNumber: block_validator,
124 },
125 )
126
127
128 validation_middleware = construct_web3_formatting_middleware(build_validators_with_web3)
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/web3/middleware/validation.py b/web3/middleware/validation.py
--- a/web3/middleware/validation.py
+++ b/web3/middleware/validation.py
@@ -9,6 +9,7 @@
apply_formatter_if,
apply_formatters_to_dict,
is_null,
+ is_string,
)
from eth_utils.toolz import (
complement,
@@ -20,6 +21,9 @@
HexBytes,
)
+from web3._utils.formatters import (
+ hex_to_integer,
+)
from web3._utils.rpc_abi import (
RPC,
)
@@ -42,10 +46,12 @@
is_not_null = complement(is_null)
+to_integer_if_hex = apply_formatter_if(is_string, hex_to_integer)
+
@curry
def validate_chain_id(web3: "Web3", chain_id: int) -> int:
- if int(chain_id) == web3.eth.chainId:
+ if to_integer_if_hex(chain_id) == web3.eth.chainId:
return chain_id
else:
raise ValidationError(
| {"golden_diff": "diff --git a/web3/middleware/validation.py b/web3/middleware/validation.py\n--- a/web3/middleware/validation.py\n+++ b/web3/middleware/validation.py\n@@ -9,6 +9,7 @@\n apply_formatter_if,\n apply_formatters_to_dict,\n is_null,\n+ is_string,\n )\n from eth_utils.toolz import (\n complement,\n@@ -20,6 +21,9 @@\n HexBytes,\n )\n \n+from web3._utils.formatters import (\n+ hex_to_integer,\n+)\n from web3._utils.rpc_abi import (\n RPC,\n )\n@@ -42,10 +46,12 @@\n \n is_not_null = complement(is_null)\n \n+to_integer_if_hex = apply_formatter_if(is_string, hex_to_integer)\n+\n \n @curry\n def validate_chain_id(web3: \"Web3\", chain_id: int) -> int:\n- if int(chain_id) == web3.eth.chainId:\n+ if to_integer_if_hex(chain_id) == web3.eth.chainId:\n return chain_id\n else:\n raise ValidationError(\n", "issue": "Upgrade Parity version in test infrastructure\n### What was wrong?\r\n\r\nOur Parity tests run on version 2.3.5. According to a recent OpenEthereum [blog post](https://medium.com/openethereum/vision-for-openethereum-ex-parity-client-eb7b11f6eef8), latest stable recommendation is 2.5.13.\n", "before_files": [{"content": "from typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n)\n\nfrom eth_utils.curried import (\n apply_formatter_at_index,\n apply_formatter_if,\n apply_formatters_to_dict,\n is_null,\n)\nfrom eth_utils.toolz import (\n complement,\n compose,\n curry,\n dissoc,\n)\nfrom hexbytes import (\n HexBytes,\n)\n\nfrom web3._utils.rpc_abi import (\n RPC,\n)\nfrom web3.exceptions import (\n ExtraDataLengthError,\n ValidationError,\n)\nfrom web3.middleware.formatting import (\n construct_web3_formatting_middleware,\n)\nfrom web3.types import (\n FormattersDict,\n TxParams,\n)\n\nif TYPE_CHECKING:\n from web3 import Web3 # noqa: F401\n\nMAX_EXTRADATA_LENGTH = 32\n\nis_not_null = complement(is_null)\n\n\n@curry\ndef validate_chain_id(web3: \"Web3\", chain_id: int) -> int:\n if int(chain_id) == web3.eth.chainId:\n return chain_id\n else:\n raise ValidationError(\n \"The transaction declared chain ID %r, \"\n \"but the connected node is on %r\" % (\n chain_id,\n web3.eth.chainId,\n )\n )\n\n\ndef check_extradata_length(val: Any) -> Any:\n if not isinstance(val, (str, int, bytes)):\n return val\n result = HexBytes(val)\n if len(result) > MAX_EXTRADATA_LENGTH:\n raise ExtraDataLengthError(\n \"The field extraData is %d bytes, but should be %d. \"\n \"It is quite likely that you are connected to a POA chain. \"\n \"Refer to \"\n \"http://web3py.readthedocs.io/en/stable/middleware.html#geth-style-proof-of-authority \"\n \"for more details. The full extraData is: %r\" % (\n len(result), MAX_EXTRADATA_LENGTH, result\n )\n )\n return val\n\n\ndef transaction_normalizer(transaction: TxParams) -> TxParams:\n return dissoc(transaction, 'chainId')\n\n\ndef transaction_param_validator(web3: \"Web3\") -> Callable[..., Any]:\n transactions_params_validators = {\n \"chainId\": apply_formatter_if(\n # Bypass `validate_chain_id` if chainId can't be determined\n lambda _: is_not_null(web3.eth.chainId),\n validate_chain_id(web3),\n ),\n }\n return apply_formatter_at_index(\n apply_formatters_to_dict(transactions_params_validators),\n 0\n )\n\n\nBLOCK_VALIDATORS = {\n 'extraData': check_extradata_length,\n}\n\n\nblock_validator = apply_formatter_if(\n is_not_null,\n apply_formatters_to_dict(BLOCK_VALIDATORS)\n)\n\n\n@curry\ndef chain_id_validator(web3: \"Web3\") -> Callable[..., Any]:\n return compose(\n apply_formatter_at_index(transaction_normalizer, 0),\n transaction_param_validator(web3)\n )\n\n\ndef build_validators_with_web3(w3: \"Web3\") -> FormattersDict:\n return dict(\n request_formatters={\n RPC.eth_sendTransaction: chain_id_validator(w3),\n RPC.eth_estimateGas: chain_id_validator(w3),\n RPC.eth_call: chain_id_validator(w3),\n },\n result_formatters={\n RPC.eth_getBlockByHash: block_validator,\n RPC.eth_getBlockByNumber: block_validator,\n },\n )\n\n\nvalidation_middleware = construct_web3_formatting_middleware(build_validators_with_web3)\n", "path": "web3/middleware/validation.py"}], "after_files": [{"content": "from typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n)\n\nfrom eth_utils.curried import (\n apply_formatter_at_index,\n apply_formatter_if,\n apply_formatters_to_dict,\n is_null,\n is_string,\n)\nfrom eth_utils.toolz import (\n complement,\n compose,\n curry,\n dissoc,\n)\nfrom hexbytes import (\n HexBytes,\n)\n\nfrom web3._utils.formatters import (\n hex_to_integer,\n)\nfrom web3._utils.rpc_abi import (\n RPC,\n)\nfrom web3.exceptions import (\n ExtraDataLengthError,\n ValidationError,\n)\nfrom web3.middleware.formatting import (\n construct_web3_formatting_middleware,\n)\nfrom web3.types import (\n FormattersDict,\n TxParams,\n)\n\nif TYPE_CHECKING:\n from web3 import Web3 # noqa: F401\n\nMAX_EXTRADATA_LENGTH = 32\n\nis_not_null = complement(is_null)\n\nto_integer_if_hex = apply_formatter_if(is_string, hex_to_integer)\n\n\n@curry\ndef validate_chain_id(web3: \"Web3\", chain_id: int) -> int:\n if to_integer_if_hex(chain_id) == web3.eth.chainId:\n return chain_id\n else:\n raise ValidationError(\n \"The transaction declared chain ID %r, \"\n \"but the connected node is on %r\" % (\n chain_id,\n web3.eth.chainId,\n )\n )\n\n\ndef check_extradata_length(val: Any) -> Any:\n if not isinstance(val, (str, int, bytes)):\n return val\n result = HexBytes(val)\n if len(result) > MAX_EXTRADATA_LENGTH:\n raise ExtraDataLengthError(\n \"The field extraData is %d bytes, but should be %d. \"\n \"It is quite likely that you are connected to a POA chain. \"\n \"Refer to \"\n \"http://web3py.readthedocs.io/en/stable/middleware.html#geth-style-proof-of-authority \"\n \"for more details. The full extraData is: %r\" % (\n len(result), MAX_EXTRADATA_LENGTH, result\n )\n )\n return val\n\n\ndef transaction_normalizer(transaction: TxParams) -> TxParams:\n return dissoc(transaction, 'chainId')\n\n\ndef transaction_param_validator(web3: \"Web3\") -> Callable[..., Any]:\n transactions_params_validators = {\n \"chainId\": apply_formatter_if(\n # Bypass `validate_chain_id` if chainId can't be determined\n lambda _: is_not_null(web3.eth.chainId),\n validate_chain_id(web3),\n ),\n }\n return apply_formatter_at_index(\n apply_formatters_to_dict(transactions_params_validators),\n 0\n )\n\n\nBLOCK_VALIDATORS = {\n 'extraData': check_extradata_length,\n}\n\n\nblock_validator = apply_formatter_if(\n is_not_null,\n apply_formatters_to_dict(BLOCK_VALIDATORS)\n)\n\n\n@curry\ndef chain_id_validator(web3: \"Web3\") -> Callable[..., Any]:\n return compose(\n apply_formatter_at_index(transaction_normalizer, 0),\n transaction_param_validator(web3)\n )\n\n\ndef build_validators_with_web3(w3: \"Web3\") -> FormattersDict:\n return dict(\n request_formatters={\n RPC.eth_sendTransaction: chain_id_validator(w3),\n RPC.eth_estimateGas: chain_id_validator(w3),\n RPC.eth_call: chain_id_validator(w3),\n },\n result_formatters={\n RPC.eth_getBlockByHash: block_validator,\n RPC.eth_getBlockByNumber: block_validator,\n },\n )\n\n\nvalidation_middleware = construct_web3_formatting_middleware(build_validators_with_web3)\n", "path": "web3/middleware/validation.py"}]} | 1,386 | 236 |
gh_patches_debug_10520 | rasdani/github-patches | git_diff | spyder-ide__spyder-4456 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add more annotation keywords to the Editor (e.g. BUG, OPTIMIZE, etc)
Code annotation analysis makes a blue check sign in the editor, which is a useful feature to find TODOs or FIXMEs.
But could it add more words? On the net, and more importantly in [pep8 index](https://www.python.org/dev/peps/pep-0350/#mnemonics), we can find many other tags, eg BUG, HACK, OPTIMIZE, etc.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spyder/utils/codeanalysis.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """
8 Source code analysis utilities
9 """
10
11 import sys
12 import re
13 import os
14 import tempfile
15 import traceback
16
17 # Local import
18 from spyder.config.base import _, DEBUG
19 from spyder.utils import programs, encoding
20 from spyder.py3compat import to_text_string, to_binary_string, PY3
21 from spyder import dependencies
22 DEBUG_EDITOR = DEBUG >= 3
23
24 #==============================================================================
25 # Pyflakes/pep8 code analysis
26 #==============================================================================
27 TASKS_PATTERN = r"(^|#)[ ]*(TODO|FIXME|XXX|HINT|TIP|@todo)([^#]*)"
28
29 #TODO: this is a test for the following function
30 def find_tasks(source_code):
31 """Find tasks in source code (TODO, FIXME, XXX, ...)"""
32 results = []
33 for line, text in enumerate(source_code.splitlines()):
34 for todo in re.findall(TASKS_PATTERN, text):
35 results.append((todo[-1].strip().capitalize(), line+1))
36 return results
37
38
39 def check_with_pyflakes(source_code, filename=None):
40 """Check source code with pyflakes
41 Returns an empty list if pyflakes is not installed"""
42 try:
43 if filename is None:
44 filename = '<string>'
45 try:
46 source_code += '\n'
47 except TypeError:
48 # Python 3
49 source_code += to_binary_string('\n')
50
51 import _ast
52 from pyflakes.checker import Checker
53 # First, compile into an AST and handle syntax errors.
54 try:
55 tree = compile(source_code, filename, "exec", _ast.PyCF_ONLY_AST)
56 except SyntaxError as value:
57 # If there's an encoding problem with the file, the text is None.
58 if value.text is None:
59 results = []
60 else:
61 results = [(value.args[0], value.lineno)]
62 except (ValueError, TypeError):
63 # Example of ValueError: file contains invalid \x escape character
64 # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=674797)
65 # Example of TypeError: file contains null character
66 # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=674796)
67 results = []
68 else:
69 # Okay, it's syntactically valid. Now check it.
70 w = Checker(tree, filename)
71 w.messages.sort(key=lambda x: x.lineno)
72 results = []
73 coding = encoding.get_coding(source_code)
74 lines = source_code.splitlines()
75 for warning in w.messages:
76 if 'analysis:ignore' not in \
77 to_text_string(lines[warning.lineno-1], coding):
78 results.append((warning.message % warning.message_args,
79 warning.lineno))
80 except Exception:
81 # Never return None to avoid lock in spyder/widgets/editor.py
82 # See Issue 1547
83 results = []
84 if DEBUG_EDITOR:
85 traceback.print_exc() # Print exception in internal console
86 return results
87
88 # Required version:
89 # Why 0.5 (Python2)? Because it's based on _ast (thread-safe)
90 PYFLAKES_REQVER = '>=0.6.0' if PY3 else '>=0.5.0'
91 dependencies.add("pyflakes", _("Real-time code analysis on the Editor"),
92 required_version=PYFLAKES_REQVER)
93
94 PYCODESTYLE_REQVER = '>=2.3'
95 dependencies.add("pycodestyle", _("Real-time code style analysis on the Editor"),
96 required_version=PYCODESTYLE_REQVER)
97
98
99 def is_pyflakes_installed():
100 """Return True if pyflakes required version is installed"""
101 return programs.is_module_installed('pyflakes', PYFLAKES_REQVER)
102
103
104 def get_checker_executable(name):
105 """Return checker executable in the form of a list of arguments
106 for subprocess.Popen"""
107 if programs.is_program_installed(name):
108 # Checker is properly installed
109 return [name]
110 else:
111 path1 = programs.python_script_exists(package=None,
112 module=name+'_script')
113 path2 = programs.python_script_exists(package=None, module=name)
114 if path1 is not None: # checker_script.py is available
115 # Checker script is available but has not been installed
116 # (this may work with pyflakes)
117 return [sys.executable, path1]
118 elif path2 is not None: # checker.py is available
119 # Checker package is available but its script has not been
120 # installed (this works with pycodestyle but not with pyflakes)
121 return [sys.executable, path2]
122
123
124 def check(args, source_code, filename=None, options=None):
125 """Check source code with checker defined with *args* (list)
126 Returns an empty list if checker is not installed"""
127 if args is None:
128 return []
129 if options is not None:
130 args += options
131 if any(['pyflakes' in arg for arg in args]):
132 # Pyflakes requires an ending new line (pycodestyle don't! -- see Issue 1123)
133 # Note: this code is not used right now as it is faster to invoke
134 # pyflakes in current Python interpreter (see `check_with_pyflakes`
135 # function above) than calling it through a subprocess
136 source_code += '\n'
137 if filename is None:
138 # Creating a temporary file because file does not exist yet
139 # or is not up-to-date
140 tempfd = tempfile.NamedTemporaryFile(suffix=".py", delete=False)
141 tempfd.write(source_code)
142 tempfd.close()
143 args.append(tempfd.name)
144 else:
145 args.append(filename)
146 cmd = args[0]
147 cmdargs = args[1:]
148 proc = programs.run_program(cmd, cmdargs)
149 output = proc.communicate()[0].strip().decode().splitlines()
150 if filename is None:
151 os.unlink(tempfd.name)
152 results = []
153 coding = encoding.get_coding(source_code)
154 lines = source_code.splitlines()
155 for line in output:
156 lineno = int(re.search(r'(\:[\d]+\:)', line).group()[1:-1])
157 try:
158 text = to_text_string(lines[lineno-1], coding)
159 except TypeError:
160 text = to_text_string(lines[lineno-1])
161 if 'analysis:ignore' not in text:
162 message = line[line.find(': ')+2:]
163 results.append((message, lineno))
164 return results
165
166
167 def check_with_pep8(source_code, filename=None):
168 """Check source code with pycodestyle"""
169 try:
170 args = get_checker_executable('pycodestyle')
171 results = check(args, source_code, filename=filename, options=['-r'])
172 except Exception:
173 # Never return None to avoid lock in spyder/widgets/editor.py
174 # See Issue 1547
175 results = []
176 if DEBUG_EDITOR:
177 traceback.print_exc() # Print exception in internal console
178 return results
179
180
181 if __name__ == '__main__':
182 # fname = __file__
183 fname = os.path.join(os.path.dirname(__file__),
184 os.pardir, os.pardir, 'bootstrap.py')
185 code = open(fname).read()
186 check_results = check_with_pyflakes(code, fname)+\
187 check_with_pep8(code, fname)+find_tasks(code)
188 # check_results = check_with_pep8(code, fname)
189 for message, line in check_results:
190 sys.stdout.write("Message: %s -- Line: %s\n" % (message, line))
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/spyder/utils/codeanalysis.py b/spyder/utils/codeanalysis.py
--- a/spyder/utils/codeanalysis.py
+++ b/spyder/utils/codeanalysis.py
@@ -24,7 +24,8 @@
#==============================================================================
# Pyflakes/pep8 code analysis
#==============================================================================
-TASKS_PATTERN = r"(^|#)[ ]*(TODO|FIXME|XXX|HINT|TIP|@todo)([^#]*)"
+TASKS_PATTERN = r"(^|#)[ ]*(TODO|FIXME|XXX|HINT|TIP|@todo|" \
+ r"HACK|BUG|OPTIMIZE|!!!|\?\?\?)([^#]*)"
#TODO: this is a test for the following function
def find_tasks(source_code):
| {"golden_diff": "diff --git a/spyder/utils/codeanalysis.py b/spyder/utils/codeanalysis.py\n--- a/spyder/utils/codeanalysis.py\n+++ b/spyder/utils/codeanalysis.py\n@@ -24,7 +24,8 @@\n #==============================================================================\r\n # Pyflakes/pep8 code analysis\r\n #==============================================================================\r\n-TASKS_PATTERN = r\"(^|#)[ ]*(TODO|FIXME|XXX|HINT|TIP|@todo)([^#]*)\"\r\n+TASKS_PATTERN = r\"(^|#)[ ]*(TODO|FIXME|XXX|HINT|TIP|@todo|\" \\\r\n+ r\"HACK|BUG|OPTIMIZE|!!!|\\?\\?\\?)([^#]*)\"\r\n \r\n #TODO: this is a test for the following function\r\n def find_tasks(source_code):\n", "issue": "Add more annotation keywords to the Editor (e.g. BUG, OPTIMIZE, etc)\nCode annotation analysis makes a blue check sign in the editor, which is a useful feature to find TODOs or FIXMEs.\r\n\r\nBut could it add more words? On the net, and more importantly in [pep8 index](https://www.python.org/dev/peps/pep-0350/#mnemonics), we can find many other tags, eg BUG, HACK, OPTIMIZE, etc.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\r\n#\r\n# Copyright \u00a9 Spyder Project Contributors\r\n# Licensed under the terms of the MIT License\r\n# (see spyder/__init__.py for details)\r\n\r\n\"\"\"\r\nSource code analysis utilities\r\n\"\"\"\r\n\r\nimport sys\r\nimport re\r\nimport os\r\nimport tempfile\r\nimport traceback\r\n\r\n# Local import\r\nfrom spyder.config.base import _, DEBUG\r\nfrom spyder.utils import programs, encoding\r\nfrom spyder.py3compat import to_text_string, to_binary_string, PY3\r\nfrom spyder import dependencies\r\nDEBUG_EDITOR = DEBUG >= 3\r\n\r\n#==============================================================================\r\n# Pyflakes/pep8 code analysis\r\n#==============================================================================\r\nTASKS_PATTERN = r\"(^|#)[ ]*(TODO|FIXME|XXX|HINT|TIP|@todo)([^#]*)\"\r\n\r\n#TODO: this is a test for the following function\r\ndef find_tasks(source_code):\r\n \"\"\"Find tasks in source code (TODO, FIXME, XXX, ...)\"\"\"\r\n results = []\r\n for line, text in enumerate(source_code.splitlines()):\r\n for todo in re.findall(TASKS_PATTERN, text):\r\n results.append((todo[-1].strip().capitalize(), line+1))\r\n return results\r\n\r\n\r\ndef check_with_pyflakes(source_code, filename=None):\r\n \"\"\"Check source code with pyflakes\r\n Returns an empty list if pyflakes is not installed\"\"\"\r\n try:\r\n if filename is None:\r\n filename = '<string>'\r\n try:\r\n source_code += '\\n'\r\n except TypeError:\r\n # Python 3\r\n source_code += to_binary_string('\\n')\r\n \r\n import _ast\r\n from pyflakes.checker import Checker\r\n # First, compile into an AST and handle syntax errors.\r\n try:\r\n tree = compile(source_code, filename, \"exec\", _ast.PyCF_ONLY_AST)\r\n except SyntaxError as value:\r\n # If there's an encoding problem with the file, the text is None.\r\n if value.text is None:\r\n results = []\r\n else:\r\n results = [(value.args[0], value.lineno)]\r\n except (ValueError, TypeError):\r\n # Example of ValueError: file contains invalid \\x escape character\r\n # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=674797)\r\n # Example of TypeError: file contains null character\r\n # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=674796)\r\n results = []\r\n else:\r\n # Okay, it's syntactically valid. Now check it.\r\n w = Checker(tree, filename)\r\n w.messages.sort(key=lambda x: x.lineno)\r\n results = []\r\n coding = encoding.get_coding(source_code)\r\n lines = source_code.splitlines()\r\n for warning in w.messages:\r\n if 'analysis:ignore' not in \\\r\n to_text_string(lines[warning.lineno-1], coding):\r\n results.append((warning.message % warning.message_args,\r\n warning.lineno))\r\n except Exception:\r\n # Never return None to avoid lock in spyder/widgets/editor.py\r\n # See Issue 1547\r\n results = []\r\n if DEBUG_EDITOR:\r\n traceback.print_exc() # Print exception in internal console\r\n return results\r\n\r\n# Required version:\r\n# Why 0.5 (Python2)? Because it's based on _ast (thread-safe)\r\nPYFLAKES_REQVER = '>=0.6.0' if PY3 else '>=0.5.0'\r\ndependencies.add(\"pyflakes\", _(\"Real-time code analysis on the Editor\"),\r\n required_version=PYFLAKES_REQVER)\r\n\r\nPYCODESTYLE_REQVER = '>=2.3'\r\ndependencies.add(\"pycodestyle\", _(\"Real-time code style analysis on the Editor\"),\r\n required_version=PYCODESTYLE_REQVER)\r\n\r\n\r\ndef is_pyflakes_installed():\r\n \"\"\"Return True if pyflakes required version is installed\"\"\"\r\n return programs.is_module_installed('pyflakes', PYFLAKES_REQVER)\r\n\r\n\r\ndef get_checker_executable(name):\r\n \"\"\"Return checker executable in the form of a list of arguments\r\n for subprocess.Popen\"\"\"\r\n if programs.is_program_installed(name):\r\n # Checker is properly installed\r\n return [name]\r\n else:\r\n path1 = programs.python_script_exists(package=None,\r\n module=name+'_script')\r\n path2 = programs.python_script_exists(package=None, module=name)\r\n if path1 is not None: # checker_script.py is available\r\n # Checker script is available but has not been installed\r\n # (this may work with pyflakes)\r\n return [sys.executable, path1]\r\n elif path2 is not None: # checker.py is available\r\n # Checker package is available but its script has not been\r\n # installed (this works with pycodestyle but not with pyflakes)\r\n return [sys.executable, path2]\r\n\r\n\r\ndef check(args, source_code, filename=None, options=None):\r\n \"\"\"Check source code with checker defined with *args* (list)\r\n Returns an empty list if checker is not installed\"\"\"\r\n if args is None:\r\n return []\r\n if options is not None:\r\n args += options\r\n if any(['pyflakes' in arg for arg in args]):\r\n # Pyflakes requires an ending new line (pycodestyle don't! -- see Issue 1123)\r\n # Note: this code is not used right now as it is faster to invoke \r\n # pyflakes in current Python interpreter (see `check_with_pyflakes` \r\n # function above) than calling it through a subprocess\r\n source_code += '\\n'\r\n if filename is None:\r\n # Creating a temporary file because file does not exist yet \r\n # or is not up-to-date\r\n tempfd = tempfile.NamedTemporaryFile(suffix=\".py\", delete=False)\r\n tempfd.write(source_code)\r\n tempfd.close()\r\n args.append(tempfd.name)\r\n else:\r\n args.append(filename)\r\n cmd = args[0]\r\n cmdargs = args[1:]\r\n proc = programs.run_program(cmd, cmdargs)\r\n output = proc.communicate()[0].strip().decode().splitlines()\r\n if filename is None:\r\n os.unlink(tempfd.name)\r\n results = []\r\n coding = encoding.get_coding(source_code)\r\n lines = source_code.splitlines()\r\n for line in output:\r\n lineno = int(re.search(r'(\\:[\\d]+\\:)', line).group()[1:-1])\r\n try:\r\n text = to_text_string(lines[lineno-1], coding)\r\n except TypeError:\r\n text = to_text_string(lines[lineno-1])\r\n if 'analysis:ignore' not in text:\r\n message = line[line.find(': ')+2:]\r\n results.append((message, lineno))\r\n return results\r\n\r\n\r\ndef check_with_pep8(source_code, filename=None):\r\n \"\"\"Check source code with pycodestyle\"\"\"\r\n try:\r\n args = get_checker_executable('pycodestyle')\r\n results = check(args, source_code, filename=filename, options=['-r'])\r\n except Exception:\r\n # Never return None to avoid lock in spyder/widgets/editor.py\r\n # See Issue 1547\r\n results = []\r\n if DEBUG_EDITOR:\r\n traceback.print_exc() # Print exception in internal console\r\n return results\r\n\r\n\r\nif __name__ == '__main__':\r\n# fname = __file__\r\n fname = os.path.join(os.path.dirname(__file__),\r\n os.pardir, os.pardir, 'bootstrap.py')\r\n code = open(fname).read()\r\n check_results = check_with_pyflakes(code, fname)+\\\r\n check_with_pep8(code, fname)+find_tasks(code)\r\n# check_results = check_with_pep8(code, fname)\r\n for message, line in check_results:\r\n sys.stdout.write(\"Message: %s -- Line: %s\\n\" % (message, line))\r\n", "path": "spyder/utils/codeanalysis.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\r\n#\r\n# Copyright \u00a9 Spyder Project Contributors\r\n# Licensed under the terms of the MIT License\r\n# (see spyder/__init__.py for details)\r\n\r\n\"\"\"\r\nSource code analysis utilities\r\n\"\"\"\r\n\r\nimport sys\r\nimport re\r\nimport os\r\nimport tempfile\r\nimport traceback\r\n\r\n# Local import\r\nfrom spyder.config.base import _, DEBUG\r\nfrom spyder.utils import programs, encoding\r\nfrom spyder.py3compat import to_text_string, to_binary_string, PY3\r\nfrom spyder import dependencies\r\nDEBUG_EDITOR = DEBUG >= 3\r\n\r\n#==============================================================================\r\n# Pyflakes/pep8 code analysis\r\n#==============================================================================\r\nTASKS_PATTERN = r\"(^|#)[ ]*(TODO|FIXME|XXX|HINT|TIP|@todo|\" \\\r\n r\"HACK|BUG|OPTIMIZE|!!!|\\?\\?\\?)([^#]*)\"\r\n\r\n#TODO: this is a test for the following function\r\ndef find_tasks(source_code):\r\n \"\"\"Find tasks in source code (TODO, FIXME, XXX, ...)\"\"\"\r\n results = []\r\n for line, text in enumerate(source_code.splitlines()):\r\n for todo in re.findall(TASKS_PATTERN, text):\r\n results.append((todo[-1].strip().capitalize(), line+1))\r\n return results\r\n\r\n\r\ndef check_with_pyflakes(source_code, filename=None):\r\n \"\"\"Check source code with pyflakes\r\n Returns an empty list if pyflakes is not installed\"\"\"\r\n try:\r\n if filename is None:\r\n filename = '<string>'\r\n try:\r\n source_code += '\\n'\r\n except TypeError:\r\n # Python 3\r\n source_code += to_binary_string('\\n')\r\n \r\n import _ast\r\n from pyflakes.checker import Checker\r\n # First, compile into an AST and handle syntax errors.\r\n try:\r\n tree = compile(source_code, filename, \"exec\", _ast.PyCF_ONLY_AST)\r\n except SyntaxError as value:\r\n # If there's an encoding problem with the file, the text is None.\r\n if value.text is None:\r\n results = []\r\n else:\r\n results = [(value.args[0], value.lineno)]\r\n except (ValueError, TypeError):\r\n # Example of ValueError: file contains invalid \\x escape character\r\n # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=674797)\r\n # Example of TypeError: file contains null character\r\n # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=674796)\r\n results = []\r\n else:\r\n # Okay, it's syntactically valid. Now check it.\r\n w = Checker(tree, filename)\r\n w.messages.sort(key=lambda x: x.lineno)\r\n results = []\r\n coding = encoding.get_coding(source_code)\r\n lines = source_code.splitlines()\r\n for warning in w.messages:\r\n if 'analysis:ignore' not in \\\r\n to_text_string(lines[warning.lineno-1], coding):\r\n results.append((warning.message % warning.message_args,\r\n warning.lineno))\r\n except Exception:\r\n # Never return None to avoid lock in spyder/widgets/editor.py\r\n # See Issue 1547\r\n results = []\r\n if DEBUG_EDITOR:\r\n traceback.print_exc() # Print exception in internal console\r\n return results\r\n\r\n# Required version:\r\n# Why 0.5 (Python2)? Because it's based on _ast (thread-safe)\r\nPYFLAKES_REQVER = '>=0.6.0' if PY3 else '>=0.5.0'\r\ndependencies.add(\"pyflakes\", _(\"Real-time code analysis on the Editor\"),\r\n required_version=PYFLAKES_REQVER)\r\n\r\nPYCODESTYLE_REQVER = '>=2.3'\r\ndependencies.add(\"pycodestyle\", _(\"Real-time code style analysis on the Editor\"),\r\n required_version=PYCODESTYLE_REQVER)\r\n\r\n\r\ndef is_pyflakes_installed():\r\n \"\"\"Return True if pyflakes required version is installed\"\"\"\r\n return programs.is_module_installed('pyflakes', PYFLAKES_REQVER)\r\n\r\n\r\ndef get_checker_executable(name):\r\n \"\"\"Return checker executable in the form of a list of arguments\r\n for subprocess.Popen\"\"\"\r\n if programs.is_program_installed(name):\r\n # Checker is properly installed\r\n return [name]\r\n else:\r\n path1 = programs.python_script_exists(package=None,\r\n module=name+'_script')\r\n path2 = programs.python_script_exists(package=None, module=name)\r\n if path1 is not None: # checker_script.py is available\r\n # Checker script is available but has not been installed\r\n # (this may work with pyflakes)\r\n return [sys.executable, path1]\r\n elif path2 is not None: # checker.py is available\r\n # Checker package is available but its script has not been\r\n # installed (this works with pycodestyle but not with pyflakes)\r\n return [sys.executable, path2]\r\n\r\n\r\ndef check(args, source_code, filename=None, options=None):\r\n \"\"\"Check source code with checker defined with *args* (list)\r\n Returns an empty list if checker is not installed\"\"\"\r\n if args is None:\r\n return []\r\n if options is not None:\r\n args += options\r\n if any(['pyflakes' in arg for arg in args]):\r\n # Pyflakes requires an ending new line (pycodestyle don't! -- see Issue 1123)\r\n # Note: this code is not used right now as it is faster to invoke \r\n # pyflakes in current Python interpreter (see `check_with_pyflakes` \r\n # function above) than calling it through a subprocess\r\n source_code += '\\n'\r\n if filename is None:\r\n # Creating a temporary file because file does not exist yet \r\n # or is not up-to-date\r\n tempfd = tempfile.NamedTemporaryFile(suffix=\".py\", delete=False)\r\n tempfd.write(source_code)\r\n tempfd.close()\r\n args.append(tempfd.name)\r\n else:\r\n args.append(filename)\r\n cmd = args[0]\r\n cmdargs = args[1:]\r\n proc = programs.run_program(cmd, cmdargs)\r\n output = proc.communicate()[0].strip().decode().splitlines()\r\n if filename is None:\r\n os.unlink(tempfd.name)\r\n results = []\r\n coding = encoding.get_coding(source_code)\r\n lines = source_code.splitlines()\r\n for line in output:\r\n lineno = int(re.search(r'(\\:[\\d]+\\:)', line).group()[1:-1])\r\n try:\r\n text = to_text_string(lines[lineno-1], coding)\r\n except TypeError:\r\n text = to_text_string(lines[lineno-1])\r\n if 'analysis:ignore' not in text:\r\n message = line[line.find(': ')+2:]\r\n results.append((message, lineno))\r\n return results\r\n\r\n\r\ndef check_with_pep8(source_code, filename=None):\r\n \"\"\"Check source code with pycodestyle\"\"\"\r\n try:\r\n args = get_checker_executable('pycodestyle')\r\n results = check(args, source_code, filename=filename, options=['-r'])\r\n except Exception:\r\n # Never return None to avoid lock in spyder/widgets/editor.py\r\n # See Issue 1547\r\n results = []\r\n if DEBUG_EDITOR:\r\n traceback.print_exc() # Print exception in internal console\r\n return results\r\n\r\n\r\nif __name__ == '__main__':\r\n# fname = __file__\r\n fname = os.path.join(os.path.dirname(__file__),\r\n os.pardir, os.pardir, 'bootstrap.py')\r\n code = open(fname).read()\r\n check_results = check_with_pyflakes(code, fname)+\\\r\n check_with_pep8(code, fname)+find_tasks(code)\r\n# check_results = check_with_pep8(code, fname)\r\n for message, line in check_results:\r\n sys.stdout.write(\"Message: %s -- Line: %s\\n\" % (message, line))\r\n", "path": "spyder/utils/codeanalysis.py"}]} | 2,511 | 170 |
gh_patches_debug_23409 | rasdani/github-patches | git_diff | medtagger__MedTagger-467 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enable E2E tests automation on Travis CI
## Current Behavior
We've got E2E tests but they can only be run on a local setup.
## Expected Behavior
E2E tests should run after each commit merge to master and on schedule (eg. each day at midnight).
## Tasks to do
- [ ] Prepare separate Docker-Compose configuration for E2E Tests
- [ ] Automate setup and tests in similar fashion that it is currently done in Backend functional tests
- [ ] Add E2E Tests to Travis CI configuration
- [ ] Update documentation for testing
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/scripts/sync_configuration.py`
Content:
```
1 """Script for MedTagger's configuration synchronization."""
2 import logging.config
3 from typing import Dict
4
5 import yaml
6 from sqlalchemy.exc import IntegrityError
7
8 from medtagger.database.models import Task
9 from medtagger.definitions import LabelTool
10 from medtagger.repositories import (
11 datasets as DatasetsRepository,
12 tasks as TasksRepository,
13 label_tags as LabelTagsRepository,
14 )
15 from medtagger.types import TaskID
16
17 logging.config.fileConfig('logging.conf')
18 logger = logging.getLogger(__name__)
19
20 CONFIGURATION_FILE_NAME = '.medtagger.yml'
21
22
23 def sync_configuration(configuration: Dict) -> None:
24 """Synchronize configuration file with database entries.
25
26 :param configuration: content of YAML configuration file
27 """
28 logger.info('Running Configuration Synchronization...')
29 _sync_datasets(configuration)
30 _sync_tasks(configuration)
31
32
33 def _sync_datasets(configuration: Dict) -> None:
34 """Synchronize Datasets from configuration file with database entries.
35
36 Example DataSets in the configuration file:
37 ```
38 datasets:
39 - name: Kidneys
40 key: KIDNEYS
41 tasks:
42 - KIDNEYS_SEGMENTATION
43 ```
44
45 :param configuration: content of YAML configuration file
46 """
47 datasets = configuration.get('datasets', []) or []
48 configuration_datasets_keys = {dataset['key'] for dataset in datasets}
49 database_datasets_keys = {dataset.key for dataset in DatasetsRepository.get_all_datasets(include_disabled=True)}
50
51 datasets_to_add = configuration_datasets_keys - database_datasets_keys
52 datasets_to_disable = database_datasets_keys - configuration_datasets_keys
53 datasets_to_enable = database_datasets_keys & configuration_datasets_keys
54
55 for dataset_key in datasets_to_add:
56 dataset = next(dataset for dataset in datasets if dataset['key'] == dataset_key)
57 DatasetsRepository.add_new_dataset(dataset['key'], dataset['name'])
58 logger.info('New DataSet added: %s', dataset['key'])
59
60 for dataset_key in datasets_to_enable:
61 dataset = next(dataset for dataset in datasets if dataset['key'] == dataset_key)
62 DatasetsRepository.enable(dataset['key'])
63 DatasetsRepository.update(dataset['key'], dataset['name'])
64 logger.info('DataSet enabled: %s', dataset['key'])
65
66 for dataset_key in datasets_to_disable:
67 DatasetsRepository.disable(dataset_key)
68 logger.info('DataSet disabled: %s', dataset_key)
69
70
71 def _sync_tasks(configuration: Dict) -> None:
72 """Synchronize Tasks from configuration file with database entries.
73
74 Example Tasks in the configuration file:
75 ```
76 tasks:
77 - key: KIDNEYS_SEGMENTATION
78 name: Kidneys segmentation
79 image_path: assets/icon/kidneys_dataset_icon.svg
80 tags:
81 - key: LEFT_KIDNEY
82 name: Left Kidney
83 tools:
84 - CHAIN
85 - key: RIGHT_KIDNEY
86 name: Right Kidney
87 tools:
88 - CHAIN
89 ```
90
91 :param configuration: content of YAML configuration file
92 """
93 datasets = configuration.get('datasets', []) or []
94 tasks = configuration.get('tasks', []) or []
95 configuration_tasks_keys = {task['key'] for task in tasks}
96 database_tasks_keys = {task.key for task in Task.query.all()}
97
98 tasks_to_add = configuration_tasks_keys - database_tasks_keys
99 tasks_to_disable = database_tasks_keys - configuration_tasks_keys
100 tasks_to_enable = database_tasks_keys & configuration_tasks_keys
101
102 # Add all new Tasks that haven't ever been in the DB
103 for task_key in tasks_to_add:
104 task = next(task for task in tasks if task['key'] == task_key)
105 datasets_keys = [dataset['key'] for dataset in datasets if task['key'] in dataset['tasks']]
106 TasksRepository.add_task(task['key'], task['name'], task['image_path'], datasets_keys, [])
107 _sync_label_tags_in_task(configuration, task_key)
108 logger.info('New Task added: %s', task['key'])
109
110 # Enable back all Tasks that were previously commented-out or removed from configuration file
111 for task_key in tasks_to_enable:
112 TasksRepository.enable(task_key)
113 _sync_label_tags_in_task(configuration, task_key)
114 task = next(task for task in tasks if task['key'] == task_key)
115 datasets_keys = [dataset['key'] for dataset in datasets if task['key'] in dataset['tasks']]
116 TasksRepository.update(task_key, task['name'], task['image_path'], datasets_keys)
117 logger.info('Task enabled: %s', task_key)
118
119 # Disable all Tasks that exists in the DB but are missing in configuration file
120 for task_key in tasks_to_disable:
121 TasksRepository.disable(task_key)
122 task = TasksRepository.get_task_by_key(task_key)
123 for tag in task.available_tags:
124 LabelTagsRepository.disable(tag.key)
125 logger.info('LabelTag disabled: %s', tag.key)
126 logger.info('Task disabled: %s', task_key)
127
128
129 def _sync_label_tags_in_task(configuration: Dict, task_key: str) -> None:
130 """Synchronize Label Tags in given Task based on configuration file and database entries.
131
132 :param configuration: content of YAML configuration file
133 :param task_key: key for the Task that should be synchronized
134 """
135 db_task = TasksRepository.get_task_by_key(task_key)
136 configuration_task = next(task for task in configuration['tasks'] if task['key'] == task_key)
137 configuration_tags_keys = {tag['key'] for tag in configuration_task['tags']}
138 database_tags_keys = {tag.key for tag in db_task.available_tags}
139
140 tags_to_add = configuration_tags_keys - database_tags_keys
141 tags_to_disable = database_tags_keys - configuration_tags_keys
142 tags_to_enable = database_tags_keys & configuration_tags_keys
143
144 for tag_key in tags_to_add:
145 tag = next(tag for tag in configuration_task['tags'] if tag_key == tag['key'])
146 _add_label_tag(tag, db_task.id)
147 logger.info('New LabelTag added: %s', tag_key)
148
149 for tag_key in tags_to_disable:
150 LabelTagsRepository.disable(tag_key)
151 logger.info('LabelTag disabled: %s', tag_key)
152
153 for tag_key in tags_to_enable:
154 tag = next(tag for tag in configuration_task['tags'] if tag_key == tag['key'])
155 tools = [LabelTool[tool] for tool in tag['tools']]
156 LabelTagsRepository.enable(tag_key)
157 LabelTagsRepository.update(tag_key, tag['name'], tools)
158 logger.info('LabelTag enabled: %s', tag_key)
159
160
161 def _add_label_tag(tag: Dict, db_task_id: TaskID) -> None:
162 """Add Label Tag or reuse previously created one.
163
164 :param tag: configuration of a Label Tag
165 :param db_task_id: TaskID which should be connected with this Label Tag
166 """
167 tools = [LabelTool[tool] for tool in tag['tools']]
168 try:
169 LabelTagsRepository.add_new_tag(tag['key'], tag['name'], tools, db_task_id)
170 except IntegrityError:
171 # Such Label Tag could be previously used in another Task
172 logger.warning('Reusing previously existing Label Tag (%s)! This may cause data inconsistency! '
173 'Make sure you know what you are doing and clear database entries if necessary!',
174 tag['key'])
175 LabelTagsRepository.update(tag['key'], tag['name'], tools, db_task_id)
176 LabelTagsRepository.enable(tag['key'])
177
178
179 def run() -> None:
180 """Entry point for this script."""
181 try:
182 with open(CONFIGURATION_FILE_NAME) as config_file:
183 configuration = yaml.load(config_file)
184 sync_configuration(configuration)
185 except yaml.YAMLError:
186 logger.exception('Invalid MedTagger configuration file format.')
187
188
189 if __name__ == '__main__':
190 run()
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/scripts/sync_configuration.py b/backend/scripts/sync_configuration.py
--- a/backend/scripts/sync_configuration.py
+++ b/backend/scripts/sync_configuration.py
@@ -1,4 +1,5 @@
"""Script for MedTagger's configuration synchronization."""
+import argparse
import logging.config
from typing import Dict
@@ -17,7 +18,7 @@
logging.config.fileConfig('logging.conf')
logger = logging.getLogger(__name__)
-CONFIGURATION_FILE_NAME = '.medtagger.yml'
+DEFAULT_CONFIGURATION_FILE_NAME = '.medtagger.yml'
def sync_configuration(configuration: Dict) -> None:
@@ -178,8 +179,12 @@
def run() -> None:
"""Entry point for this script."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--configuration', type=str, default=DEFAULT_CONFIGURATION_FILE_NAME)
+ arguments = parser.parse_args()
+
try:
- with open(CONFIGURATION_FILE_NAME) as config_file:
+ with open(arguments.configuration) as config_file:
configuration = yaml.load(config_file)
sync_configuration(configuration)
except yaml.YAMLError:
| {"golden_diff": "diff --git a/backend/scripts/sync_configuration.py b/backend/scripts/sync_configuration.py\n--- a/backend/scripts/sync_configuration.py\n+++ b/backend/scripts/sync_configuration.py\n@@ -1,4 +1,5 @@\n \"\"\"Script for MedTagger's configuration synchronization.\"\"\"\n+import argparse\n import logging.config\n from typing import Dict\n \n@@ -17,7 +18,7 @@\n logging.config.fileConfig('logging.conf')\n logger = logging.getLogger(__name__)\n \n-CONFIGURATION_FILE_NAME = '.medtagger.yml'\n+DEFAULT_CONFIGURATION_FILE_NAME = '.medtagger.yml'\n \n \n def sync_configuration(configuration: Dict) -> None:\n@@ -178,8 +179,12 @@\n \n def run() -> None:\n \"\"\"Entry point for this script.\"\"\"\n+ parser = argparse.ArgumentParser()\n+ parser.add_argument('--configuration', type=str, default=DEFAULT_CONFIGURATION_FILE_NAME)\n+ arguments = parser.parse_args()\n+\n try:\n- with open(CONFIGURATION_FILE_NAME) as config_file:\n+ with open(arguments.configuration) as config_file:\n configuration = yaml.load(config_file)\n sync_configuration(configuration)\n except yaml.YAMLError:\n", "issue": "Enable E2E tests automation on Travis CI\n## Current Behavior\r\n\r\nWe've got E2E tests but they can only be run on a local setup.\r\n\r\n## Expected Behavior\r\n\r\nE2E tests should run after each commit merge to master and on schedule (eg. each day at midnight).\r\n\r\n## Tasks to do\r\n\r\n- [ ] Prepare separate Docker-Compose configuration for E2E Tests\r\n- [ ] Automate setup and tests in similar fashion that it is currently done in Backend functional tests\r\n- [ ] Add E2E Tests to Travis CI configuration\r\n- [ ] Update documentation for testing\r\n\n", "before_files": [{"content": "\"\"\"Script for MedTagger's configuration synchronization.\"\"\"\nimport logging.config\nfrom typing import Dict\n\nimport yaml\nfrom sqlalchemy.exc import IntegrityError\n\nfrom medtagger.database.models import Task\nfrom medtagger.definitions import LabelTool\nfrom medtagger.repositories import (\n datasets as DatasetsRepository,\n tasks as TasksRepository,\n label_tags as LabelTagsRepository,\n)\nfrom medtagger.types import TaskID\n\nlogging.config.fileConfig('logging.conf')\nlogger = logging.getLogger(__name__)\n\nCONFIGURATION_FILE_NAME = '.medtagger.yml'\n\n\ndef sync_configuration(configuration: Dict) -> None:\n \"\"\"Synchronize configuration file with database entries.\n\n :param configuration: content of YAML configuration file\n \"\"\"\n logger.info('Running Configuration Synchronization...')\n _sync_datasets(configuration)\n _sync_tasks(configuration)\n\n\ndef _sync_datasets(configuration: Dict) -> None:\n \"\"\"Synchronize Datasets from configuration file with database entries.\n\n Example DataSets in the configuration file:\n ```\n datasets:\n - name: Kidneys\n key: KIDNEYS\n tasks:\n - KIDNEYS_SEGMENTATION\n ```\n\n :param configuration: content of YAML configuration file\n \"\"\"\n datasets = configuration.get('datasets', []) or []\n configuration_datasets_keys = {dataset['key'] for dataset in datasets}\n database_datasets_keys = {dataset.key for dataset in DatasetsRepository.get_all_datasets(include_disabled=True)}\n\n datasets_to_add = configuration_datasets_keys - database_datasets_keys\n datasets_to_disable = database_datasets_keys - configuration_datasets_keys\n datasets_to_enable = database_datasets_keys & configuration_datasets_keys\n\n for dataset_key in datasets_to_add:\n dataset = next(dataset for dataset in datasets if dataset['key'] == dataset_key)\n DatasetsRepository.add_new_dataset(dataset['key'], dataset['name'])\n logger.info('New DataSet added: %s', dataset['key'])\n\n for dataset_key in datasets_to_enable:\n dataset = next(dataset for dataset in datasets if dataset['key'] == dataset_key)\n DatasetsRepository.enable(dataset['key'])\n DatasetsRepository.update(dataset['key'], dataset['name'])\n logger.info('DataSet enabled: %s', dataset['key'])\n\n for dataset_key in datasets_to_disable:\n DatasetsRepository.disable(dataset_key)\n logger.info('DataSet disabled: %s', dataset_key)\n\n\ndef _sync_tasks(configuration: Dict) -> None:\n \"\"\"Synchronize Tasks from configuration file with database entries.\n\n Example Tasks in the configuration file:\n ```\n tasks:\n - key: KIDNEYS_SEGMENTATION\n name: Kidneys segmentation\n image_path: assets/icon/kidneys_dataset_icon.svg\n tags:\n - key: LEFT_KIDNEY\n name: Left Kidney\n tools:\n - CHAIN\n - key: RIGHT_KIDNEY\n name: Right Kidney\n tools:\n - CHAIN\n ```\n\n :param configuration: content of YAML configuration file\n \"\"\"\n datasets = configuration.get('datasets', []) or []\n tasks = configuration.get('tasks', []) or []\n configuration_tasks_keys = {task['key'] for task in tasks}\n database_tasks_keys = {task.key for task in Task.query.all()}\n\n tasks_to_add = configuration_tasks_keys - database_tasks_keys\n tasks_to_disable = database_tasks_keys - configuration_tasks_keys\n tasks_to_enable = database_tasks_keys & configuration_tasks_keys\n\n # Add all new Tasks that haven't ever been in the DB\n for task_key in tasks_to_add:\n task = next(task for task in tasks if task['key'] == task_key)\n datasets_keys = [dataset['key'] for dataset in datasets if task['key'] in dataset['tasks']]\n TasksRepository.add_task(task['key'], task['name'], task['image_path'], datasets_keys, [])\n _sync_label_tags_in_task(configuration, task_key)\n logger.info('New Task added: %s', task['key'])\n\n # Enable back all Tasks that were previously commented-out or removed from configuration file\n for task_key in tasks_to_enable:\n TasksRepository.enable(task_key)\n _sync_label_tags_in_task(configuration, task_key)\n task = next(task for task in tasks if task['key'] == task_key)\n datasets_keys = [dataset['key'] for dataset in datasets if task['key'] in dataset['tasks']]\n TasksRepository.update(task_key, task['name'], task['image_path'], datasets_keys)\n logger.info('Task enabled: %s', task_key)\n\n # Disable all Tasks that exists in the DB but are missing in configuration file\n for task_key in tasks_to_disable:\n TasksRepository.disable(task_key)\n task = TasksRepository.get_task_by_key(task_key)\n for tag in task.available_tags:\n LabelTagsRepository.disable(tag.key)\n logger.info('LabelTag disabled: %s', tag.key)\n logger.info('Task disabled: %s', task_key)\n\n\ndef _sync_label_tags_in_task(configuration: Dict, task_key: str) -> None:\n \"\"\"Synchronize Label Tags in given Task based on configuration file and database entries.\n\n :param configuration: content of YAML configuration file\n :param task_key: key for the Task that should be synchronized\n \"\"\"\n db_task = TasksRepository.get_task_by_key(task_key)\n configuration_task = next(task for task in configuration['tasks'] if task['key'] == task_key)\n configuration_tags_keys = {tag['key'] for tag in configuration_task['tags']}\n database_tags_keys = {tag.key for tag in db_task.available_tags}\n\n tags_to_add = configuration_tags_keys - database_tags_keys\n tags_to_disable = database_tags_keys - configuration_tags_keys\n tags_to_enable = database_tags_keys & configuration_tags_keys\n\n for tag_key in tags_to_add:\n tag = next(tag for tag in configuration_task['tags'] if tag_key == tag['key'])\n _add_label_tag(tag, db_task.id)\n logger.info('New LabelTag added: %s', tag_key)\n\n for tag_key in tags_to_disable:\n LabelTagsRepository.disable(tag_key)\n logger.info('LabelTag disabled: %s', tag_key)\n\n for tag_key in tags_to_enable:\n tag = next(tag for tag in configuration_task['tags'] if tag_key == tag['key'])\n tools = [LabelTool[tool] for tool in tag['tools']]\n LabelTagsRepository.enable(tag_key)\n LabelTagsRepository.update(tag_key, tag['name'], tools)\n logger.info('LabelTag enabled: %s', tag_key)\n\n\ndef _add_label_tag(tag: Dict, db_task_id: TaskID) -> None:\n \"\"\"Add Label Tag or reuse previously created one.\n\n :param tag: configuration of a Label Tag\n :param db_task_id: TaskID which should be connected with this Label Tag\n \"\"\"\n tools = [LabelTool[tool] for tool in tag['tools']]\n try:\n LabelTagsRepository.add_new_tag(tag['key'], tag['name'], tools, db_task_id)\n except IntegrityError:\n # Such Label Tag could be previously used in another Task\n logger.warning('Reusing previously existing Label Tag (%s)! This may cause data inconsistency! '\n 'Make sure you know what you are doing and clear database entries if necessary!',\n tag['key'])\n LabelTagsRepository.update(tag['key'], tag['name'], tools, db_task_id)\n LabelTagsRepository.enable(tag['key'])\n\n\ndef run() -> None:\n \"\"\"Entry point for this script.\"\"\"\n try:\n with open(CONFIGURATION_FILE_NAME) as config_file:\n configuration = yaml.load(config_file)\n sync_configuration(configuration)\n except yaml.YAMLError:\n logger.exception('Invalid MedTagger configuration file format.')\n\n\nif __name__ == '__main__':\n run()\n", "path": "backend/scripts/sync_configuration.py"}], "after_files": [{"content": "\"\"\"Script for MedTagger's configuration synchronization.\"\"\"\nimport argparse\nimport logging.config\nfrom typing import Dict\n\nimport yaml\nfrom sqlalchemy.exc import IntegrityError\n\nfrom medtagger.database.models import Task\nfrom medtagger.definitions import LabelTool\nfrom medtagger.repositories import (\n datasets as DatasetsRepository,\n tasks as TasksRepository,\n label_tags as LabelTagsRepository,\n)\nfrom medtagger.types import TaskID\n\nlogging.config.fileConfig('logging.conf')\nlogger = logging.getLogger(__name__)\n\nDEFAULT_CONFIGURATION_FILE_NAME = '.medtagger.yml'\n\n\ndef sync_configuration(configuration: Dict) -> None:\n \"\"\"Synchronize configuration file with database entries.\n\n :param configuration: content of YAML configuration file\n \"\"\"\n logger.info('Running Configuration Synchronization...')\n _sync_datasets(configuration)\n _sync_tasks(configuration)\n\n\ndef _sync_datasets(configuration: Dict) -> None:\n \"\"\"Synchronize Datasets from configuration file with database entries.\n\n Example DataSets in the configuration file:\n ```\n datasets:\n - name: Kidneys\n key: KIDNEYS\n tasks:\n - KIDNEYS_SEGMENTATION\n ```\n\n :param configuration: content of YAML configuration file\n \"\"\"\n datasets = configuration.get('datasets', []) or []\n configuration_datasets_keys = {dataset['key'] for dataset in datasets}\n database_datasets_keys = {dataset.key for dataset in DatasetsRepository.get_all_datasets(include_disabled=True)}\n\n datasets_to_add = configuration_datasets_keys - database_datasets_keys\n datasets_to_disable = database_datasets_keys - configuration_datasets_keys\n datasets_to_enable = database_datasets_keys & configuration_datasets_keys\n\n for dataset_key in datasets_to_add:\n dataset = next(dataset for dataset in datasets if dataset['key'] == dataset_key)\n DatasetsRepository.add_new_dataset(dataset['key'], dataset['name'])\n logger.info('New DataSet added: %s', dataset['key'])\n\n for dataset_key in datasets_to_enable:\n dataset = next(dataset for dataset in datasets if dataset['key'] == dataset_key)\n DatasetsRepository.enable(dataset['key'])\n DatasetsRepository.update(dataset['key'], dataset['name'])\n logger.info('DataSet enabled: %s', dataset['key'])\n\n for dataset_key in datasets_to_disable:\n DatasetsRepository.disable(dataset_key)\n logger.info('DataSet disabled: %s', dataset_key)\n\n\ndef _sync_tasks(configuration: Dict) -> None:\n \"\"\"Synchronize Tasks from configuration file with database entries.\n\n Example Tasks in the configuration file:\n ```\n tasks:\n - key: KIDNEYS_SEGMENTATION\n name: Kidneys segmentation\n image_path: assets/icon/kidneys_dataset_icon.svg\n tags:\n - key: LEFT_KIDNEY\n name: Left Kidney\n tools:\n - CHAIN\n - key: RIGHT_KIDNEY\n name: Right Kidney\n tools:\n - CHAIN\n ```\n\n :param configuration: content of YAML configuration file\n \"\"\"\n datasets = configuration.get('datasets', []) or []\n tasks = configuration.get('tasks', []) or []\n configuration_tasks_keys = {task['key'] for task in tasks}\n database_tasks_keys = {task.key for task in Task.query.all()}\n\n tasks_to_add = configuration_tasks_keys - database_tasks_keys\n tasks_to_disable = database_tasks_keys - configuration_tasks_keys\n tasks_to_enable = database_tasks_keys & configuration_tasks_keys\n\n # Add all new Tasks that haven't ever been in the DB\n for task_key in tasks_to_add:\n task = next(task for task in tasks if task['key'] == task_key)\n datasets_keys = [dataset['key'] for dataset in datasets if task['key'] in dataset['tasks']]\n TasksRepository.add_task(task['key'], task['name'], task['image_path'], datasets_keys, [])\n _sync_label_tags_in_task(configuration, task_key)\n logger.info('New Task added: %s', task['key'])\n\n # Enable back all Tasks that were previously commented-out or removed from configuration file\n for task_key in tasks_to_enable:\n TasksRepository.enable(task_key)\n _sync_label_tags_in_task(configuration, task_key)\n task = next(task for task in tasks if task['key'] == task_key)\n datasets_keys = [dataset['key'] for dataset in datasets if task['key'] in dataset['tasks']]\n TasksRepository.update(task_key, task['name'], task['image_path'], datasets_keys)\n logger.info('Task enabled: %s', task_key)\n\n # Disable all Tasks that exists in the DB but are missing in configuration file\n for task_key in tasks_to_disable:\n TasksRepository.disable(task_key)\n task = TasksRepository.get_task_by_key(task_key)\n for tag in task.available_tags:\n LabelTagsRepository.disable(tag.key)\n logger.info('LabelTag disabled: %s', tag.key)\n logger.info('Task disabled: %s', task_key)\n\n\ndef _sync_label_tags_in_task(configuration: Dict, task_key: str) -> None:\n \"\"\"Synchronize Label Tags in given Task based on configuration file and database entries.\n\n :param configuration: content of YAML configuration file\n :param task_key: key for the Task that should be synchronized\n \"\"\"\n db_task = TasksRepository.get_task_by_key(task_key)\n configuration_task = next(task for task in configuration['tasks'] if task['key'] == task_key)\n configuration_tags_keys = {tag['key'] for tag in configuration_task['tags']}\n database_tags_keys = {tag.key for tag in db_task.available_tags}\n\n tags_to_add = configuration_tags_keys - database_tags_keys\n tags_to_disable = database_tags_keys - configuration_tags_keys\n tags_to_enable = database_tags_keys & configuration_tags_keys\n\n for tag_key in tags_to_add:\n tag = next(tag for tag in configuration_task['tags'] if tag_key == tag['key'])\n _add_label_tag(tag, db_task.id)\n logger.info('New LabelTag added: %s', tag_key)\n\n for tag_key in tags_to_disable:\n LabelTagsRepository.disable(tag_key)\n logger.info('LabelTag disabled: %s', tag_key)\n\n for tag_key in tags_to_enable:\n tag = next(tag for tag in configuration_task['tags'] if tag_key == tag['key'])\n tools = [LabelTool[tool] for tool in tag['tools']]\n LabelTagsRepository.enable(tag_key)\n LabelTagsRepository.update(tag_key, tag['name'], tools)\n logger.info('LabelTag enabled: %s', tag_key)\n\n\ndef _add_label_tag(tag: Dict, db_task_id: TaskID) -> None:\n \"\"\"Add Label Tag or reuse previously created one.\n\n :param tag: configuration of a Label Tag\n :param db_task_id: TaskID which should be connected with this Label Tag\n \"\"\"\n tools = [LabelTool[tool] for tool in tag['tools']]\n try:\n LabelTagsRepository.add_new_tag(tag['key'], tag['name'], tools, db_task_id)\n except IntegrityError:\n # Such Label Tag could be previously used in another Task\n logger.warning('Reusing previously existing Label Tag (%s)! This may cause data inconsistency! '\n 'Make sure you know what you are doing and clear database entries if necessary!',\n tag['key'])\n LabelTagsRepository.update(tag['key'], tag['name'], tools, db_task_id)\n LabelTagsRepository.enable(tag['key'])\n\n\ndef run() -> None:\n \"\"\"Entry point for this script.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--configuration', type=str, default=DEFAULT_CONFIGURATION_FILE_NAME)\n arguments = parser.parse_args()\n\n try:\n with open(arguments.configuration) as config_file:\n configuration = yaml.load(config_file)\n sync_configuration(configuration)\n except yaml.YAMLError:\n logger.exception('Invalid MedTagger configuration file format.')\n\n\nif __name__ == '__main__':\n run()\n", "path": "backend/scripts/sync_configuration.py"}]} | 2,513 | 243 |
gh_patches_debug_24807 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1928 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Project list widget throws an internal server error
## Test plan
GIVEN the 'Grab a widget' page
WHEN a project has no partners
THEN the project list and project map options should not be available
WHEN a project has partners
THEN all widgets should be available and not display an error
## Issue description
The project list widget gives an internal server error. And the map widget as well: http://sentry.support.akvo-ops.org/rsr/live/group/727/. This is due to the fact that they don't have a reporting organisation and the map and project listing widgets expect an organisation as an input.
The reporting organisation here should be changed to the project's primary organisation, and we should create a 404 exception when the project has no partners at all, just in case.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/views/widgets.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Akvo RSR is covered by the GNU Affero General Public License.
3
4 See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6 """
7
8 from __future__ import absolute_import
9
10 import random
11
12 from django.shortcuts import get_object_or_404
13 from django.views.generic import TemplateView
14 from akvo.rsr.models import Organisation, Project
15
16
17 __all__ = [
18 'ProjectMapView',
19 'CobrandedBannerView',
20 'ProjectNarrowView',
21 ]
22
23
24 class BaseWidgetView(TemplateView):
25
26 """Setup a common base widget."""
27
28 def get_context_data(self, **kwargs):
29 context = super(BaseWidgetView, self).get_context_data(**kwargs)
30 context['style'] = 'darkBG'
31 if self.request.GET.get('style') == 'light':
32 context['style'] = 'lightBG'
33 return context
34
35
36 class ProjectBaseWidgetView(BaseWidgetView):
37
38 """Extends the base widget with a project from url."""
39
40 def get_context_data(self, **kwargs):
41 context = super(ProjectBaseWidgetView, self).get_context_data(**kwargs)
42 context['project'] = get_object_or_404(
43 Project, pk=int(self.kwargs['project_id']))
44 return context
45
46
47 class RandomBaseWidgetView(BaseWidgetView):
48
49 """Extends the base widget with random project."""
50
51 def get_context_data(self, **kwargs):
52 context = super(RandomBaseWidgetView, self).get_context_data(**kwargs)
53 org_id = self.request.GET.get('organisation_id', '0')
54 organisation = get_object_or_404(Organisation, pk=org_id)
55 context['project'] = random.choice(organisation.active_projects())
56 return context
57
58
59 class CobrandedBannerView(ProjectBaseWidgetView):
60 template_name = 'widgets/cobranded_banner.html'
61
62
63 class RandomCobrandedBannerView(RandomBaseWidgetView):
64 template_name = 'widgets/cobranded_banner.html'
65
66
67 class ProjectNarrowView(ProjectBaseWidgetView):
68 template_name = 'widgets/project_narrow.html'
69
70
71 class RandomProjectNarrowView(RandomBaseWidgetView):
72 template_name = 'widgets/project_narrow.html'
73
74
75 class ProjectSmallView(ProjectBaseWidgetView):
76 template_name = 'widgets/project_small.html'
77
78
79 class RandomProjectSmallView(RandomBaseWidgetView):
80 template_name = 'widgets/project_small.html'
81
82
83 class ProjectListView(BaseWidgetView):
84 template_name = 'widgets/project_list.html'
85
86 def get_context_data(self, **kwargs):
87 context = super(ProjectListView, self).get_context_data(**kwargs)
88 order_by = self.request.GET.get('order_by', 'title')
89 org_id = self.request.GET.get('organisation_id', '0')
90 organisation = get_object_or_404(Organisation, pk=org_id)
91 projects = Project.objects.select_related(
92 'publishingstatus__status',
93 'primary_location',
94 'primary_location__country'
95 ).prefetch_related(
96 'last_update'
97 ).filter(
98 partnerships__organisation__id=org_id,
99 publishingstatus__status__exact='published'
100 ).order_by('-id').distinct()
101
102 if order_by == 'status':
103 projects = projects.order_by('status', 'title')
104 elif order_by == 'last_update':
105 projects = projects.order_by('-last_update', 'title')
106 elif order_by in ['budget', 'funds_needed']:
107 projects = projects.extra(order_by=['-%s' % order_by, 'title'])
108 else:
109 projects = projects.order_by('title') # default to project title
110
111 context['organisation'] = organisation
112 context['projects_count'] = projects.count() # Limit to 100 projects
113 context['projects'] = projects[:100] # Limit to 100 projects
114 return context
115
116
117 class ProjectMapView(BaseWidgetView):
118 template_name = 'widgets/projects_map.html'
119
120 def get_context_data(self, **kwargs):
121 context = super(ProjectMapView, self).get_context_data(**kwargs)
122 context['height'] = self.request.GET.get('height', '300')
123 context['heightpx'] = str(int(context['height']) - 30) + 'px'
124 context['width'] = self.request.GET.get('width', '600')
125 context['widthpx'] = str(context['width']) + 'px'
126 context['style'] = self.request.GET.get('style', 'dark')
127 context['state'] = self.request.GET.get('state', 'dynamic')
128 org_id = self.request.GET.get('organisation_id', '0')
129 org = get_object_or_404(Organisation, pk=org_id)
130 context['projects'] = org.published_projects()
131 return context
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rsr/views/widgets.py b/akvo/rsr/views/widgets.py
--- a/akvo/rsr/views/widgets.py
+++ b/akvo/rsr/views/widgets.py
@@ -86,7 +86,7 @@
def get_context_data(self, **kwargs):
context = super(ProjectListView, self).get_context_data(**kwargs)
order_by = self.request.GET.get('order_by', 'title')
- org_id = self.request.GET.get('organisation_id', '0')
+ org_id = self.request.GET.get('organisation_id')
organisation = get_object_or_404(Organisation, pk=org_id)
projects = Project.objects.select_related(
'publishingstatus__status',
@@ -125,7 +125,7 @@
context['widthpx'] = str(context['width']) + 'px'
context['style'] = self.request.GET.get('style', 'dark')
context['state'] = self.request.GET.get('state', 'dynamic')
- org_id = self.request.GET.get('organisation_id', '0')
+ org_id = self.request.GET.get('organisation_id')
org = get_object_or_404(Organisation, pk=org_id)
context['projects'] = org.published_projects()
return context
| {"golden_diff": "diff --git a/akvo/rsr/views/widgets.py b/akvo/rsr/views/widgets.py\n--- a/akvo/rsr/views/widgets.py\n+++ b/akvo/rsr/views/widgets.py\n@@ -86,7 +86,7 @@\n def get_context_data(self, **kwargs):\n context = super(ProjectListView, self).get_context_data(**kwargs)\n order_by = self.request.GET.get('order_by', 'title')\n- org_id = self.request.GET.get('organisation_id', '0')\n+ org_id = self.request.GET.get('organisation_id')\n organisation = get_object_or_404(Organisation, pk=org_id)\n projects = Project.objects.select_related(\n 'publishingstatus__status',\n@@ -125,7 +125,7 @@\n context['widthpx'] = str(context['width']) + 'px'\n context['style'] = self.request.GET.get('style', 'dark')\n context['state'] = self.request.GET.get('state', 'dynamic')\n- org_id = self.request.GET.get('organisation_id', '0')\n+ org_id = self.request.GET.get('organisation_id')\n org = get_object_or_404(Organisation, pk=org_id)\n context['projects'] = org.published_projects()\n return context\n", "issue": "Project list widget throws an internal server error\n## Test plan\n\nGIVEN the 'Grab a widget' page\n\nWHEN a project has no partners\nTHEN the project list and project map options should not be available\n\nWHEN a project has partners\nTHEN all widgets should be available and not display an error\n## Issue description\n\nThe project list widget gives an internal server error. And the map widget as well: http://sentry.support.akvo-ops.org/rsr/live/group/727/. This is due to the fact that they don't have a reporting organisation and the map and project listing widgets expect an organisation as an input.\n\nThe reporting organisation here should be changed to the project's primary organisation, and we should create a 404 exception when the project has no partners at all, just in case.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport random\n\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import TemplateView\nfrom akvo.rsr.models import Organisation, Project\n\n\n__all__ = [\n 'ProjectMapView',\n 'CobrandedBannerView',\n 'ProjectNarrowView',\n]\n\n\nclass BaseWidgetView(TemplateView):\n\n \"\"\"Setup a common base widget.\"\"\"\n\n def get_context_data(self, **kwargs):\n context = super(BaseWidgetView, self).get_context_data(**kwargs)\n context['style'] = 'darkBG'\n if self.request.GET.get('style') == 'light':\n context['style'] = 'lightBG'\n return context\n\n\nclass ProjectBaseWidgetView(BaseWidgetView):\n\n \"\"\"Extends the base widget with a project from url.\"\"\"\n\n def get_context_data(self, **kwargs):\n context = super(ProjectBaseWidgetView, self).get_context_data(**kwargs)\n context['project'] = get_object_or_404(\n Project, pk=int(self.kwargs['project_id']))\n return context\n\n\nclass RandomBaseWidgetView(BaseWidgetView):\n\n \"\"\"Extends the base widget with random project.\"\"\"\n\n def get_context_data(self, **kwargs):\n context = super(RandomBaseWidgetView, self).get_context_data(**kwargs)\n org_id = self.request.GET.get('organisation_id', '0')\n organisation = get_object_or_404(Organisation, pk=org_id)\n context['project'] = random.choice(organisation.active_projects())\n return context\n\n\nclass CobrandedBannerView(ProjectBaseWidgetView):\n template_name = 'widgets/cobranded_banner.html'\n\n\nclass RandomCobrandedBannerView(RandomBaseWidgetView):\n template_name = 'widgets/cobranded_banner.html'\n\n\nclass ProjectNarrowView(ProjectBaseWidgetView):\n template_name = 'widgets/project_narrow.html'\n\n\nclass RandomProjectNarrowView(RandomBaseWidgetView):\n template_name = 'widgets/project_narrow.html'\n\n\nclass ProjectSmallView(ProjectBaseWidgetView):\n template_name = 'widgets/project_small.html'\n\n\nclass RandomProjectSmallView(RandomBaseWidgetView):\n template_name = 'widgets/project_small.html'\n\n\nclass ProjectListView(BaseWidgetView):\n template_name = 'widgets/project_list.html'\n\n def get_context_data(self, **kwargs):\n context = super(ProjectListView, self).get_context_data(**kwargs)\n order_by = self.request.GET.get('order_by', 'title')\n org_id = self.request.GET.get('organisation_id', '0')\n organisation = get_object_or_404(Organisation, pk=org_id)\n projects = Project.objects.select_related(\n 'publishingstatus__status',\n 'primary_location',\n 'primary_location__country'\n ).prefetch_related(\n 'last_update'\n ).filter(\n partnerships__organisation__id=org_id,\n publishingstatus__status__exact='published'\n ).order_by('-id').distinct()\n\n if order_by == 'status':\n projects = projects.order_by('status', 'title')\n elif order_by == 'last_update':\n projects = projects.order_by('-last_update', 'title')\n elif order_by in ['budget', 'funds_needed']:\n projects = projects.extra(order_by=['-%s' % order_by, 'title'])\n else:\n projects = projects.order_by('title') # default to project title\n\n context['organisation'] = organisation\n context['projects_count'] = projects.count() # Limit to 100 projects\n context['projects'] = projects[:100] # Limit to 100 projects\n return context\n\n\nclass ProjectMapView(BaseWidgetView):\n template_name = 'widgets/projects_map.html'\n\n def get_context_data(self, **kwargs):\n context = super(ProjectMapView, self).get_context_data(**kwargs)\n context['height'] = self.request.GET.get('height', '300')\n context['heightpx'] = str(int(context['height']) - 30) + 'px'\n context['width'] = self.request.GET.get('width', '600')\n context['widthpx'] = str(context['width']) + 'px'\n context['style'] = self.request.GET.get('style', 'dark')\n context['state'] = self.request.GET.get('state', 'dynamic')\n org_id = self.request.GET.get('organisation_id', '0')\n org = get_object_or_404(Organisation, pk=org_id)\n context['projects'] = org.published_projects()\n return context\n", "path": "akvo/rsr/views/widgets.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport random\n\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import TemplateView\nfrom akvo.rsr.models import Organisation, Project\n\n\n__all__ = [\n 'ProjectMapView',\n 'CobrandedBannerView',\n 'ProjectNarrowView',\n]\n\n\nclass BaseWidgetView(TemplateView):\n\n \"\"\"Setup a common base widget.\"\"\"\n\n def get_context_data(self, **kwargs):\n context = super(BaseWidgetView, self).get_context_data(**kwargs)\n context['style'] = 'darkBG'\n if self.request.GET.get('style') == 'light':\n context['style'] = 'lightBG'\n return context\n\n\nclass ProjectBaseWidgetView(BaseWidgetView):\n\n \"\"\"Extends the base widget with a project from url.\"\"\"\n\n def get_context_data(self, **kwargs):\n context = super(ProjectBaseWidgetView, self).get_context_data(**kwargs)\n context['project'] = get_object_or_404(\n Project, pk=int(self.kwargs['project_id']))\n return context\n\n\nclass RandomBaseWidgetView(BaseWidgetView):\n\n \"\"\"Extends the base widget with random project.\"\"\"\n\n def get_context_data(self, **kwargs):\n context = super(RandomBaseWidgetView, self).get_context_data(**kwargs)\n org_id = self.request.GET.get('organisation_id', '0')\n organisation = get_object_or_404(Organisation, pk=org_id)\n context['project'] = random.choice(organisation.active_projects())\n return context\n\n\nclass CobrandedBannerView(ProjectBaseWidgetView):\n template_name = 'widgets/cobranded_banner.html'\n\n\nclass RandomCobrandedBannerView(RandomBaseWidgetView):\n template_name = 'widgets/cobranded_banner.html'\n\n\nclass ProjectNarrowView(ProjectBaseWidgetView):\n template_name = 'widgets/project_narrow.html'\n\n\nclass RandomProjectNarrowView(RandomBaseWidgetView):\n template_name = 'widgets/project_narrow.html'\n\n\nclass ProjectSmallView(ProjectBaseWidgetView):\n template_name = 'widgets/project_small.html'\n\n\nclass RandomProjectSmallView(RandomBaseWidgetView):\n template_name = 'widgets/project_small.html'\n\n\nclass ProjectListView(BaseWidgetView):\n template_name = 'widgets/project_list.html'\n\n def get_context_data(self, **kwargs):\n context = super(ProjectListView, self).get_context_data(**kwargs)\n order_by = self.request.GET.get('order_by', 'title')\n org_id = self.request.GET.get('organisation_id')\n organisation = get_object_or_404(Organisation, pk=org_id)\n projects = Project.objects.select_related(\n 'publishingstatus__status',\n 'primary_location',\n 'primary_location__country'\n ).prefetch_related(\n 'last_update'\n ).filter(\n partnerships__organisation__id=org_id,\n publishingstatus__status__exact='published'\n ).order_by('-id').distinct()\n\n if order_by == 'status':\n projects = projects.order_by('status', 'title')\n elif order_by == 'last_update':\n projects = projects.order_by('-last_update', 'title')\n elif order_by in ['budget', 'funds_needed']:\n projects = projects.extra(order_by=['-%s' % order_by, 'title'])\n else:\n projects = projects.order_by('title') # default to project title\n\n context['organisation'] = organisation\n context['projects_count'] = projects.count() # Limit to 100 projects\n context['projects'] = projects[:100] # Limit to 100 projects\n return context\n\n\nclass ProjectMapView(BaseWidgetView):\n template_name = 'widgets/projects_map.html'\n\n def get_context_data(self, **kwargs):\n context = super(ProjectMapView, self).get_context_data(**kwargs)\n context['height'] = self.request.GET.get('height', '300')\n context['heightpx'] = str(int(context['height']) - 30) + 'px'\n context['width'] = self.request.GET.get('width', '600')\n context['widthpx'] = str(context['width']) + 'px'\n context['style'] = self.request.GET.get('style', 'dark')\n context['state'] = self.request.GET.get('state', 'dynamic')\n org_id = self.request.GET.get('organisation_id')\n org = get_object_or_404(Organisation, pk=org_id)\n context['projects'] = org.published_projects()\n return context\n", "path": "akvo/rsr/views/widgets.py"}]} | 1,782 | 284 |
gh_patches_debug_18666 | rasdani/github-patches | git_diff | mozilla__pontoon-3061 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fluent pretranslations not serialized in canonical form
Fluent pretranslations are not stored in a canonical form. For example, `key = {example}` should actually be stored as `key = { example }` (mind the whitespace).
In the exported files in the repository, the strings appear as they should, which results in sync trying to import pretranslations, because they aren't the same as in Pontoon DB. That also triggers #2284.
In the UI (editor, string list) the strings are also rendered properly, which only makes the problem more difficult to spot.
We should make sure that pretranslations are always stored them in the canonical form.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/pretranslation/pretranslate.py`
Content:
```
1 import logging
2 import operator
3 import re
4
5 from django.db.models import CharField, Value as V
6 from django.db.models.functions import Concat
7
8 from fluent.syntax import FluentParser, FluentSerializer
9 from functools import reduce
10
11 from pontoon.base.models import User, TranslatedResource
12 from pontoon.machinery.utils import (
13 get_google_translate_data,
14 get_translation_memory_data,
15 )
16 from pontoon.pretranslation import AUTHORS
17 from .transformer import ApplyPretranslation
18
19
20 log = logging.getLogger(__name__)
21
22 parser = FluentParser()
23 serializer = FluentSerializer()
24
25
26 def get_pretranslations(entity, locale, preserve_placeables=False):
27 """
28 Get pretranslations for the entity-locale pair using internal translation memory and
29 Google's machine translation.
30
31 For Fluent strings, uplift SelectExpressions, serialize Placeables as TextElements
32 and then only pretranslate TextElements. Set the most frequent TextElement
33 pretranslation author as the author of the entire pretranslation.
34
35 :arg Entity entity: the Entity object
36 :arg Locale locale: the Locale object
37 :arg boolean preserve_placeables
38
39 :returns: a list of tuples, consisting of:
40 - a pretranslation of the entity
41 - a plural form
42 - a user (representing TM or GT service)
43 """
44 source = entity.string
45 services = {k: User.objects.get(email=email) for k, email in AUTHORS.items()}
46
47 if entity.resource.format == "ftl":
48 entry = parser.parse_entry(source)
49 pretranslate = ApplyPretranslation(
50 locale, entry, get_pretranslated_data, preserve_placeables
51 )
52
53 try:
54 pretranslate.visit(entry)
55 except ValueError as e:
56 log.info(f"Fluent pretranslation error: {e}")
57 return []
58
59 pretranslation = serializer.serialize_entry(entry)
60
61 authors = [services[service] for service in pretranslate.services]
62 author = max(set(authors), key=authors.count) if authors else services["tm"]
63
64 return [(pretranslation, None, author)]
65
66 else:
67 pretranslation, service = get_pretranslated_data(
68 source, locale, preserve_placeables
69 )
70
71 if pretranslation is None:
72 return []
73
74 author = services[service]
75 if entity.string_plural == "":
76 return [(pretranslation, None, author)]
77 else:
78 plural_forms = range(0, locale.nplurals or 1)
79 return [
80 (pretranslation, plural_form, author) for plural_form in plural_forms
81 ]
82
83
84 def get_pretranslated_data(source, locale, preserve_placeables):
85 # Empty strings do not need translation
86 if re.search("^\\s*$", source):
87 return source, "tm"
88
89 # Try to get matches from Translation Memory
90 tm_response = get_translation_memory_data(text=source, locale=locale)
91 tm_perfect = [t for t in tm_response if int(t["quality"]) == 100]
92 if tm_perfect:
93 return tm_perfect[0]["target"], "tm"
94
95 # Fetch from Google Translate
96 elif locale.google_translate_code:
97 gt_response = get_google_translate_data(
98 text=source, locale=locale, preserve_placeables=preserve_placeables
99 )
100 if gt_response["status"]:
101 return gt_response["translation"], "gt"
102
103 return None, None
104
105
106 def update_changed_instances(tr_filter, tr_dict, translations):
107 """
108 Update the latest activity and stats for changed Locales, ProjectLocales
109 & TranslatedResources
110 """
111 tr_filter = tuple(tr_filter)
112 # Combine all generated filters with an OK operator.
113 # `operator.ior` is the '|' Python operator, which turns into a logical OR
114 # when used between django ORM query objects.
115 tr_query = reduce(operator.ior, tr_filter)
116
117 translatedresources = TranslatedResource.objects.filter(tr_query).annotate(
118 locale_resource=Concat(
119 "locale_id", V("-"), "resource_id", output_field=CharField()
120 )
121 )
122
123 translatedresources.update_stats()
124
125 for tr in translatedresources:
126 index = tr_dict[tr.locale_resource]
127 translation = translations[index]
128 translation.update_latest_translation()
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pontoon/pretranslation/pretranslate.py b/pontoon/pretranslation/pretranslate.py
--- a/pontoon/pretranslation/pretranslate.py
+++ b/pontoon/pretranslation/pretranslate.py
@@ -58,6 +58,10 @@
pretranslation = serializer.serialize_entry(entry)
+ # Parse and serialize pretranslation again in order to assure cannonical style
+ parsed_pretranslation = parser.parse_entry(pretranslation)
+ pretranslation = serializer.serialize_entry(parsed_pretranslation)
+
authors = [services[service] for service in pretranslate.services]
author = max(set(authors), key=authors.count) if authors else services["tm"]
@@ -82,7 +86,7 @@
def get_pretranslated_data(source, locale, preserve_placeables):
- # Empty strings do not need translation
+ # Empty strings and strings containing whitespace only do not need translation
if re.search("^\\s*$", source):
return source, "tm"
| {"golden_diff": "diff --git a/pontoon/pretranslation/pretranslate.py b/pontoon/pretranslation/pretranslate.py\n--- a/pontoon/pretranslation/pretranslate.py\n+++ b/pontoon/pretranslation/pretranslate.py\n@@ -58,6 +58,10 @@\n \n pretranslation = serializer.serialize_entry(entry)\n \n+ # Parse and serialize pretranslation again in order to assure cannonical style\n+ parsed_pretranslation = parser.parse_entry(pretranslation)\n+ pretranslation = serializer.serialize_entry(parsed_pretranslation)\n+\n authors = [services[service] for service in pretranslate.services]\n author = max(set(authors), key=authors.count) if authors else services[\"tm\"]\n \n@@ -82,7 +86,7 @@\n \n \n def get_pretranslated_data(source, locale, preserve_placeables):\n- # Empty strings do not need translation\n+ # Empty strings and strings containing whitespace only do not need translation\n if re.search(\"^\\\\s*$\", source):\n return source, \"tm\"\n", "issue": "Fluent pretranslations not serialized in canonical form\nFluent pretranslations are not stored in a canonical form. For example, `key = {example}` should actually be stored as `key = { example }` (mind the whitespace).\r\n\r\nIn the exported files in the repository, the strings appear as they should, which results in sync trying to import pretranslations, because they aren't the same as in Pontoon DB. That also triggers #2284. \r\n\r\nIn the UI (editor, string list) the strings are also rendered properly, which only makes the problem more difficult to spot.\r\n\r\nWe should make sure that pretranslations are always stored them in the canonical form.\n", "before_files": [{"content": "import logging\nimport operator\nimport re\n\nfrom django.db.models import CharField, Value as V\nfrom django.db.models.functions import Concat\n\nfrom fluent.syntax import FluentParser, FluentSerializer\nfrom functools import reduce\n\nfrom pontoon.base.models import User, TranslatedResource\nfrom pontoon.machinery.utils import (\n get_google_translate_data,\n get_translation_memory_data,\n)\nfrom pontoon.pretranslation import AUTHORS\nfrom .transformer import ApplyPretranslation\n\n\nlog = logging.getLogger(__name__)\n\nparser = FluentParser()\nserializer = FluentSerializer()\n\n\ndef get_pretranslations(entity, locale, preserve_placeables=False):\n \"\"\"\n Get pretranslations for the entity-locale pair using internal translation memory and\n Google's machine translation.\n\n For Fluent strings, uplift SelectExpressions, serialize Placeables as TextElements\n and then only pretranslate TextElements. Set the most frequent TextElement\n pretranslation author as the author of the entire pretranslation.\n\n :arg Entity entity: the Entity object\n :arg Locale locale: the Locale object\n :arg boolean preserve_placeables\n\n :returns: a list of tuples, consisting of:\n - a pretranslation of the entity\n - a plural form\n - a user (representing TM or GT service)\n \"\"\"\n source = entity.string\n services = {k: User.objects.get(email=email) for k, email in AUTHORS.items()}\n\n if entity.resource.format == \"ftl\":\n entry = parser.parse_entry(source)\n pretranslate = ApplyPretranslation(\n locale, entry, get_pretranslated_data, preserve_placeables\n )\n\n try:\n pretranslate.visit(entry)\n except ValueError as e:\n log.info(f\"Fluent pretranslation error: {e}\")\n return []\n\n pretranslation = serializer.serialize_entry(entry)\n\n authors = [services[service] for service in pretranslate.services]\n author = max(set(authors), key=authors.count) if authors else services[\"tm\"]\n\n return [(pretranslation, None, author)]\n\n else:\n pretranslation, service = get_pretranslated_data(\n source, locale, preserve_placeables\n )\n\n if pretranslation is None:\n return []\n\n author = services[service]\n if entity.string_plural == \"\":\n return [(pretranslation, None, author)]\n else:\n plural_forms = range(0, locale.nplurals or 1)\n return [\n (pretranslation, plural_form, author) for plural_form in plural_forms\n ]\n\n\ndef get_pretranslated_data(source, locale, preserve_placeables):\n # Empty strings do not need translation\n if re.search(\"^\\\\s*$\", source):\n return source, \"tm\"\n\n # Try to get matches from Translation Memory\n tm_response = get_translation_memory_data(text=source, locale=locale)\n tm_perfect = [t for t in tm_response if int(t[\"quality\"]) == 100]\n if tm_perfect:\n return tm_perfect[0][\"target\"], \"tm\"\n\n # Fetch from Google Translate\n elif locale.google_translate_code:\n gt_response = get_google_translate_data(\n text=source, locale=locale, preserve_placeables=preserve_placeables\n )\n if gt_response[\"status\"]:\n return gt_response[\"translation\"], \"gt\"\n\n return None, None\n\n\ndef update_changed_instances(tr_filter, tr_dict, translations):\n \"\"\"\n Update the latest activity and stats for changed Locales, ProjectLocales\n & TranslatedResources\n \"\"\"\n tr_filter = tuple(tr_filter)\n # Combine all generated filters with an OK operator.\n # `operator.ior` is the '|' Python operator, which turns into a logical OR\n # when used between django ORM query objects.\n tr_query = reduce(operator.ior, tr_filter)\n\n translatedresources = TranslatedResource.objects.filter(tr_query).annotate(\n locale_resource=Concat(\n \"locale_id\", V(\"-\"), \"resource_id\", output_field=CharField()\n )\n )\n\n translatedresources.update_stats()\n\n for tr in translatedresources:\n index = tr_dict[tr.locale_resource]\n translation = translations[index]\n translation.update_latest_translation()\n", "path": "pontoon/pretranslation/pretranslate.py"}], "after_files": [{"content": "import logging\nimport operator\nimport re\n\nfrom django.db.models import CharField, Value as V\nfrom django.db.models.functions import Concat\n\nfrom fluent.syntax import FluentParser, FluentSerializer\nfrom functools import reduce\n\nfrom pontoon.base.models import User, TranslatedResource\nfrom pontoon.machinery.utils import (\n get_google_translate_data,\n get_translation_memory_data,\n)\nfrom pontoon.pretranslation import AUTHORS\nfrom .transformer import ApplyPretranslation\n\n\nlog = logging.getLogger(__name__)\n\nparser = FluentParser()\nserializer = FluentSerializer()\n\n\ndef get_pretranslations(entity, locale, preserve_placeables=False):\n \"\"\"\n Get pretranslations for the entity-locale pair using internal translation memory and\n Google's machine translation.\n\n For Fluent strings, uplift SelectExpressions, serialize Placeables as TextElements\n and then only pretranslate TextElements. Set the most frequent TextElement\n pretranslation author as the author of the entire pretranslation.\n\n :arg Entity entity: the Entity object\n :arg Locale locale: the Locale object\n :arg boolean preserve_placeables\n\n :returns: a list of tuples, consisting of:\n - a pretranslation of the entity\n - a plural form\n - a user (representing TM or GT service)\n \"\"\"\n source = entity.string\n services = {k: User.objects.get(email=email) for k, email in AUTHORS.items()}\n\n if entity.resource.format == \"ftl\":\n entry = parser.parse_entry(source)\n pretranslate = ApplyPretranslation(\n locale, entry, get_pretranslated_data, preserve_placeables\n )\n\n try:\n pretranslate.visit(entry)\n except ValueError as e:\n log.info(f\"Fluent pretranslation error: {e}\")\n return []\n\n pretranslation = serializer.serialize_entry(entry)\n\n # Parse and serialize pretranslation again in order to assure cannonical style\n parsed_pretranslation = parser.parse_entry(pretranslation)\n pretranslation = serializer.serialize_entry(parsed_pretranslation)\n\n authors = [services[service] for service in pretranslate.services]\n author = max(set(authors), key=authors.count) if authors else services[\"tm\"]\n\n return [(pretranslation, None, author)]\n\n else:\n pretranslation, service = get_pretranslated_data(\n source, locale, preserve_placeables\n )\n\n if pretranslation is None:\n return []\n\n author = services[service]\n if entity.string_plural == \"\":\n return [(pretranslation, None, author)]\n else:\n plural_forms = range(0, locale.nplurals or 1)\n return [\n (pretranslation, plural_form, author) for plural_form in plural_forms\n ]\n\n\ndef get_pretranslated_data(source, locale, preserve_placeables):\n # Empty strings and strings containing whitespace only do not need translation\n if re.search(\"^\\\\s*$\", source):\n return source, \"tm\"\n\n # Try to get matches from Translation Memory\n tm_response = get_translation_memory_data(text=source, locale=locale)\n tm_perfect = [t for t in tm_response if int(t[\"quality\"]) == 100]\n if tm_perfect:\n return tm_perfect[0][\"target\"], \"tm\"\n\n # Fetch from Google Translate\n elif locale.google_translate_code:\n gt_response = get_google_translate_data(\n text=source, locale=locale, preserve_placeables=preserve_placeables\n )\n if gt_response[\"status\"]:\n return gt_response[\"translation\"], \"gt\"\n\n return None, None\n\n\ndef update_changed_instances(tr_filter, tr_dict, translations):\n \"\"\"\n Update the latest activity and stats for changed Locales, ProjectLocales\n & TranslatedResources\n \"\"\"\n tr_filter = tuple(tr_filter)\n # Combine all generated filters with an OK operator.\n # `operator.ior` is the '|' Python operator, which turns into a logical OR\n # when used between django ORM query objects.\n tr_query = reduce(operator.ior, tr_filter)\n\n translatedresources = TranslatedResource.objects.filter(tr_query).annotate(\n locale_resource=Concat(\n \"locale_id\", V(\"-\"), \"resource_id\", output_field=CharField()\n )\n )\n\n translatedresources.update_stats()\n\n for tr in translatedresources:\n index = tr_dict[tr.locale_resource]\n translation = translations[index]\n translation.update_latest_translation()\n", "path": "pontoon/pretranslation/pretranslate.py"}]} | 1,575 | 215 |
gh_patches_debug_18957 | rasdani/github-patches | git_diff | wagtail__wagtail-8171 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tags field needs universal help text to explain need to quote multi-word tags
Wagtail's tagging system requires multi-word tags to be quoted, to prevent a two-word tag becoming two separate tags. This appears to catch users out repeatedly and has generated several Issues in recent months.
The tag field widget, wherever used (page models, images, documents etc) should have standard help text (appearing beneath it like all help text) explaining the need for quoting. Explaining the need for comma-delimiting might also be worth adding while we're at it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/widgets/tags.py`
Content:
```
1 import json
2
3 from django.conf import settings
4 from django.urls import reverse
5 from taggit.forms import TagWidget
6 from taggit.models import Tag
7
8
9 class AdminTagWidget(TagWidget):
10 template_name = "wagtailadmin/widgets/tag_widget.html"
11
12 def __init__(self, *args, **kwargs):
13 self.tag_model = kwargs.pop("tag_model", Tag)
14 # free_tagging = None means defer to the tag model's setting
15 self.free_tagging = kwargs.pop("free_tagging", None)
16 super().__init__(*args, **kwargs)
17
18 def get_context(self, name, value, attrs):
19 context = super().get_context(name, value, attrs)
20
21 if self.tag_model == Tag:
22 autocomplete_url = reverse("wagtailadmin_tag_autocomplete")
23 else:
24 autocomplete_url = reverse(
25 "wagtailadmin_tag_model_autocomplete",
26 args=(self.tag_model._meta.app_label, self.tag_model._meta.model_name),
27 )
28
29 if self.free_tagging is None:
30 free_tagging = getattr(self.tag_model, "free_tagging", True)
31 else:
32 free_tagging = self.free_tagging
33
34 context["widget"]["autocomplete_url"] = autocomplete_url
35 context["widget"]["options_json"] = json.dumps(
36 {
37 "allowSpaces": getattr(settings, "TAG_SPACES_ALLOWED", True),
38 "tagLimit": getattr(settings, "TAG_LIMIT", None),
39 "autocompleteOnly": not free_tagging,
40 }
41 )
42
43 return context
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/admin/widgets/tags.py b/wagtail/admin/widgets/tags.py
--- a/wagtail/admin/widgets/tags.py
+++ b/wagtail/admin/widgets/tags.py
@@ -2,6 +2,7 @@
from django.conf import settings
from django.urls import reverse
+from django.utils.translation import gettext_lazy as _
from taggit.forms import TagWidget
from taggit.models import Tag
@@ -31,6 +32,15 @@
else:
free_tagging = self.free_tagging
+ tag_spaces_allowed = getattr(settings, "TAG_SPACES_ALLOWED", True)
+ if tag_spaces_allowed:
+ help_text = _(
+ 'Multi-word tags with spaces will automatically be enclosed in double quotes (").'
+ )
+ else:
+ help_text = _("Tags can only consist of a single word, no spaces allowed.")
+
+ context["widget"]["help_text"] = help_text
context["widget"]["autocomplete_url"] = autocomplete_url
context["widget"]["options_json"] = json.dumps(
{
| {"golden_diff": "diff --git a/wagtail/admin/widgets/tags.py b/wagtail/admin/widgets/tags.py\n--- a/wagtail/admin/widgets/tags.py\n+++ b/wagtail/admin/widgets/tags.py\n@@ -2,6 +2,7 @@\n \n from django.conf import settings\n from django.urls import reverse\n+from django.utils.translation import gettext_lazy as _\n from taggit.forms import TagWidget\n from taggit.models import Tag\n \n@@ -31,6 +32,15 @@\n else:\n free_tagging = self.free_tagging\n \n+ tag_spaces_allowed = getattr(settings, \"TAG_SPACES_ALLOWED\", True)\n+ if tag_spaces_allowed:\n+ help_text = _(\n+ 'Multi-word tags with spaces will automatically be enclosed in double quotes (\").'\n+ )\n+ else:\n+ help_text = _(\"Tags can only consist of a single word, no spaces allowed.\")\n+\n+ context[\"widget\"][\"help_text\"] = help_text\n context[\"widget\"][\"autocomplete_url\"] = autocomplete_url\n context[\"widget\"][\"options_json\"] = json.dumps(\n {\n", "issue": "Tags field needs universal help text to explain need to quote multi-word tags\nWagtail's tagging system requires multi-word tags to be quoted, to prevent a two-word tag becoming two separate tags. This appears to catch users out repeatedly and has generated several Issues in recent months.\n\nThe tag field widget, wherever used (page models, images, documents etc) should have standard help text (appearing beneath it like all help text) explaining the need for quoting. Explaining the need for comma-delimiting might also be worth adding while we're at it.\n\n", "before_files": [{"content": "import json\n\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom taggit.forms import TagWidget\nfrom taggit.models import Tag\n\n\nclass AdminTagWidget(TagWidget):\n template_name = \"wagtailadmin/widgets/tag_widget.html\"\n\n def __init__(self, *args, **kwargs):\n self.tag_model = kwargs.pop(\"tag_model\", Tag)\n # free_tagging = None means defer to the tag model's setting\n self.free_tagging = kwargs.pop(\"free_tagging\", None)\n super().__init__(*args, **kwargs)\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n\n if self.tag_model == Tag:\n autocomplete_url = reverse(\"wagtailadmin_tag_autocomplete\")\n else:\n autocomplete_url = reverse(\n \"wagtailadmin_tag_model_autocomplete\",\n args=(self.tag_model._meta.app_label, self.tag_model._meta.model_name),\n )\n\n if self.free_tagging is None:\n free_tagging = getattr(self.tag_model, \"free_tagging\", True)\n else:\n free_tagging = self.free_tagging\n\n context[\"widget\"][\"autocomplete_url\"] = autocomplete_url\n context[\"widget\"][\"options_json\"] = json.dumps(\n {\n \"allowSpaces\": getattr(settings, \"TAG_SPACES_ALLOWED\", True),\n \"tagLimit\": getattr(settings, \"TAG_LIMIT\", None),\n \"autocompleteOnly\": not free_tagging,\n }\n )\n\n return context\n", "path": "wagtail/admin/widgets/tags.py"}], "after_files": [{"content": "import json\n\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.forms import TagWidget\nfrom taggit.models import Tag\n\n\nclass AdminTagWidget(TagWidget):\n template_name = \"wagtailadmin/widgets/tag_widget.html\"\n\n def __init__(self, *args, **kwargs):\n self.tag_model = kwargs.pop(\"tag_model\", Tag)\n # free_tagging = None means defer to the tag model's setting\n self.free_tagging = kwargs.pop(\"free_tagging\", None)\n super().__init__(*args, **kwargs)\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n\n if self.tag_model == Tag:\n autocomplete_url = reverse(\"wagtailadmin_tag_autocomplete\")\n else:\n autocomplete_url = reverse(\n \"wagtailadmin_tag_model_autocomplete\",\n args=(self.tag_model._meta.app_label, self.tag_model._meta.model_name),\n )\n\n if self.free_tagging is None:\n free_tagging = getattr(self.tag_model, \"free_tagging\", True)\n else:\n free_tagging = self.free_tagging\n\n tag_spaces_allowed = getattr(settings, \"TAG_SPACES_ALLOWED\", True)\n if tag_spaces_allowed:\n help_text = _(\n 'Multi-word tags with spaces will automatically be enclosed in double quotes (\").'\n )\n else:\n help_text = _(\"Tags can only consist of a single word, no spaces allowed.\")\n\n context[\"widget\"][\"help_text\"] = help_text\n context[\"widget\"][\"autocomplete_url\"] = autocomplete_url\n context[\"widget\"][\"options_json\"] = json.dumps(\n {\n \"allowSpaces\": getattr(settings, \"TAG_SPACES_ALLOWED\", True),\n \"tagLimit\": getattr(settings, \"TAG_LIMIT\", None),\n \"autocompleteOnly\": not free_tagging,\n }\n )\n\n return context\n", "path": "wagtail/admin/widgets/tags.py"}]} | 781 | 229 |
gh_patches_debug_27674 | rasdani/github-patches | git_diff | fossasia__open-event-server-1278 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make privacy field in Event API accept only public/private as values
Currently privacy field in Event API accepts any string . It should be limited to "public" and "private" and should have "public" as default value.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `open_event/api/events.py`
Content:
```
1 from flask.ext.restplus import Resource, Namespace, reqparse
2 from flask import g
3
4 from open_event.models.event import Event as EventModel
5 from open_event.models.social_link import SocialLink as SocialLinkModel
6 from open_event.models.users_events_roles import UsersEventsRoles
7 from open_event.models.role import Role
8 from open_event.models.user import ORGANIZER
9 from open_event.helpers.data import save_to_db, update_version, record_activity
10
11 from .helpers.helpers import get_paginated_list, requires_auth, parse_args
12 from .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, \
13 PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, BaseDAO, ServiceDAO
14 from .helpers import custom_fields as fields
15 from helpers.special_fields import EventTypeField, EventTopicField
16
17
18 api = Namespace('events', description='Events')
19
20 EVENT_CREATOR = api.model('EventCreator', {
21 'id': fields.Integer(),
22 'email': fields.Email()
23 })
24
25 EVENT_SOCIAL = api.model('EventSocial', {
26 'id': fields.Integer(),
27 'name': fields.String(),
28 'link': fields.String()
29 })
30
31 EVENT = api.model('Event', {
32 'id': fields.Integer(required=True),
33 'name': fields.String(required=True),
34 'email': fields.Email(),
35 'color': fields.Color(),
36 'logo': fields.ImageUri(),
37 'start_time': fields.DateTime(required=True),
38 'end_time': fields.DateTime(required=True),
39 'timezone': fields.String(),
40 'latitude': fields.Float(),
41 'longitude': fields.Float(),
42 'event_url': fields.Uri(),
43 'background_url': fields.ImageUri(),
44 'description': fields.String(),
45 'location_name': fields.String(),
46 'organizer_name': fields.String(),
47 'organizer_description': fields.String(),
48 'state': fields.String(),
49 'closing_datetime': fields.DateTime(),
50 'type': EventTypeField(),
51 'topic': EventTopicField(),
52 'privacy': fields.String(),
53 'ticket_url': fields.Uri(),
54 'creator': fields.Nested(EVENT_CREATOR, allow_null=True),
55 'schedule_published_on': fields.DateTime(),
56 'code_of_conduct': fields.String(),
57 'social_links': fields.List(fields.Nested(EVENT_SOCIAL), attribute='social_link')
58 })
59
60 EVENT_PAGINATED = api.clone('EventPaginated', PAGINATED_MODEL, {
61 'results': fields.List(fields.Nested(EVENT))
62 })
63
64 EVENT_POST = api.clone('EventPost', EVENT)
65 SOCIAL_LINK_POST = api.clone('SocialLinkPost', EVENT_SOCIAL)
66
67 del EVENT_POST['id']
68 del EVENT_POST['creator']
69 del EVENT_POST['social_links']
70
71 del SOCIAL_LINK_POST['id']
72
73 # ###################
74 # Data Access Objects
75 # ###################
76
77
78 class SocialLinkDAO(ServiceDAO):
79 """
80 Social Link DAO
81 """
82 pass
83
84
85 class EventDAO(BaseDAO):
86 """
87 Event DAO
88 """
89 def fix_payload(self, data):
90 """
91 Fixes the payload data.
92 Here converts string time from datetime obj
93 """
94 data['start_time'] = EVENT_POST['start_time'].from_str(data['start_time'])
95 data['end_time'] = EVENT_POST['end_time'].from_str(data['end_time'])
96 data['closing_datetime'] = EVENT_POST['closing_datetime'].from_str(
97 data['closing_datetime'])
98 data['schedule_published_on'] = EVENT_POST['schedule_published_on'].from_str(
99 data['schedule_published_on'])
100 return data
101
102 def create(self, data, url):
103 data = self.validate(data)
104 payload = self.fix_payload(data)
105 new_event = self.model(**payload)
106 new_event.creator = g.user
107 save_to_db(new_event, "Event saved")
108 # set organizer
109 role = Role.query.filter_by(name=ORGANIZER).first()
110 uer = UsersEventsRoles(g.user, new_event, role)
111 save_to_db(uer, 'UER saved')
112 update_version(
113 event_id=new_event.id,
114 is_created=True,
115 column_to_increment="event_ver"
116 )
117 # Return created resource with a 201 status code and its Location
118 # (url) in the header.
119 resource_location = url + '/' + str(new_event.id)
120 return self.get(new_event.id), 201, {'Location': resource_location}
121
122 def update(self, event_id, data):
123 data = self.validate(data)
124 payload = self.fix_payload(data)
125 return BaseDAO.update(self, event_id, payload, validate=False)
126
127
128 LinkDAO = SocialLinkDAO(SocialLinkModel, SOCIAL_LINK_POST)
129 DAO = EventDAO(EventModel, EVENT_POST)
130
131
132 # DEFINE PARAMS
133
134 EVENT_PARAMS = {
135 'location': {
136 'type': str
137 },
138 'contains': {
139 'description': 'Contains the string in name and description',
140 'type': str
141 },
142 'state': {
143 'type': str
144 },
145 'privacy': {
146 'type': str
147 },
148 'type': {
149 'type': str
150 },
151 'topic': {
152 'type': str
153 },
154 'start_time_gt': {},
155 'start_time_lt': {},
156 'end_time_gt': {},
157 'end_time_lt': {}
158 }
159
160 # DEFINE RESOURCES
161
162
163 class EventResource():
164 """
165 Event Resource Base class
166 """
167 event_parser = reqparse.RequestParser()
168 event_parser.add_argument('location', type=str, dest='__event_location')
169 event_parser.add_argument('contains', type=str, dest='__event_contains')
170 event_parser.add_argument('state', type=str)
171 event_parser.add_argument('privacy', type=str)
172 event_parser.add_argument('type', type=str)
173 event_parser.add_argument('topic', type=str)
174 event_parser.add_argument('start_time_gt', dest='__event_start_time_gt')
175 event_parser.add_argument('start_time_lt', dest='__event_start_time_lt')
176 event_parser.add_argument('end_time_gt', dest='__event_end_time_gt')
177 event_parser.add_argument('end_time_lt', dest='__event_end_time_lt')
178
179
180 @api.route('/<int:event_id>')
181 @api.param('event_id')
182 @api.response(404, 'Event not found')
183 class Event(Resource):
184 @api.doc('get_event')
185 @api.marshal_with(EVENT)
186 def get(self, event_id):
187 """Fetch an event given its id"""
188 return DAO.get(event_id)
189
190 @requires_auth
191 @api.doc('delete_event')
192 @api.marshal_with(EVENT)
193 def delete(self, event_id):
194 """Delete an event given its id"""
195 event = DAO.delete(event_id)
196 record_activity('delete_event', event_id=event_id)
197 return event
198
199 @requires_auth
200 @api.doc('update_event', responses=PUT_RESPONSES)
201 @api.marshal_with(EVENT)
202 @api.expect(EVENT_POST)
203 def put(self, event_id):
204 """Update a event given its id"""
205 event = DAO.update(event_id, self.api.payload)
206 record_activity('update_event', event_id=event_id)
207 return event
208
209
210 @api.route('')
211 class EventList(Resource, EventResource):
212 @api.doc('list_events', params=EVENT_PARAMS)
213 @api.marshal_list_with(EVENT)
214 def get(self):
215 """List all events"""
216 return DAO.list(**parse_args(self.event_parser))
217
218 @requires_auth
219 @api.doc('create_event', responses=POST_RESPONSES)
220 @api.marshal_with(EVENT)
221 @api.expect(EVENT_POST)
222 def post(self):
223 """Create an event"""
224 item = DAO.create(self.api.payload, self.api.url_for(self))
225 record_activity('create_event', event_id=item[0].id)
226 return item
227
228
229 @api.route('/page')
230 class EventListPaginated(Resource, PaginatedResourceBase, EventResource):
231 @api.doc('list_events_paginated', params=PAGE_PARAMS)
232 @api.doc(params=EVENT_PARAMS)
233 @api.marshal_with(EVENT_PAGINATED)
234 def get(self):
235 """List events in a paginated manner"""
236 args = self.parser.parse_args()
237 url = self.api.url_for(self) # WARN: undocumented way
238 return get_paginated_list(
239 EventModel, url, args=args,
240 **parse_args(self.event_parser)
241 )
242
```
Path: `open_event/api/helpers/special_fields.py`
Content:
```
1 from open_event.helpers.data_getter import DataGetter
2
3 import custom_fields as fields
4
5
6 class EventTypeField(fields.ChoiceString):
7 __schema_example__ = DataGetter.get_event_types()[0]
8
9 def __init__(self, **kwargs):
10 super(EventTypeField, self).__init__(
11 choice_list=DataGetter.get_event_types(),
12 **kwargs)
13
14
15 class EventTopicField(fields.ChoiceString):
16 __schema_example__ = DataGetter.get_event_topics()[0]
17
18 def __init__(self, **kwargs):
19 super(EventTopicField, self).__init__(
20 choice_list=DataGetter.get_event_topics(),
21 **kwargs)
22
23
24 class SessionLanguageField(fields.ChoiceString):
25 __schema_example__ = DataGetter.get_language_list()[51]
26
27 def __init__(self, **kwargs):
28 super(SessionLanguageField, self).__init__(
29 choice_list=DataGetter.get_language_list(),
30 **kwargs)
31
32
33 class SessionStateField(fields.ChoiceString):
34 __schema_example__ = 'pending'
35
36 def __init__(self, **kwargs):
37 super(SessionStateField, self).__init__(
38 choice_list=['pending', 'accepted', 'rejected'],
39 **kwargs)
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/open_event/api/events.py b/open_event/api/events.py
--- a/open_event/api/events.py
+++ b/open_event/api/events.py
@@ -12,7 +12,8 @@
from .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, \
PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, BaseDAO, ServiceDAO
from .helpers import custom_fields as fields
-from helpers.special_fields import EventTypeField, EventTopicField
+from helpers.special_fields import EventTypeField, EventTopicField, \
+ EventPrivacyField
api = Namespace('events', description='Events')
@@ -49,7 +50,7 @@
'closing_datetime': fields.DateTime(),
'type': EventTypeField(),
'topic': EventTopicField(),
- 'privacy': fields.String(),
+ 'privacy': EventPrivacyField(),
'ticket_url': fields.Uri(),
'creator': fields.Nested(EVENT_CREATOR, allow_null=True),
'schedule_published_on': fields.DateTime(),
diff --git a/open_event/api/helpers/special_fields.py b/open_event/api/helpers/special_fields.py
--- a/open_event/api/helpers/special_fields.py
+++ b/open_event/api/helpers/special_fields.py
@@ -21,6 +21,16 @@
**kwargs)
+class EventPrivacyField(fields.ChoiceString):
+ __schema_example__ = 'public'
+
+ def __init__(self, **kwargs):
+ super(EventPrivacyField, self).__init__(
+ choice_list=['public', 'private'],
+ **kwargs
+ )
+
+
class SessionLanguageField(fields.ChoiceString):
__schema_example__ = DataGetter.get_language_list()[51]
| {"golden_diff": "diff --git a/open_event/api/events.py b/open_event/api/events.py\n--- a/open_event/api/events.py\n+++ b/open_event/api/events.py\n@@ -12,7 +12,8 @@\n from .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, \\\n PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, BaseDAO, ServiceDAO\n from .helpers import custom_fields as fields\n-from helpers.special_fields import EventTypeField, EventTopicField\n+from helpers.special_fields import EventTypeField, EventTopicField, \\\n+ EventPrivacyField\n \n \n api = Namespace('events', description='Events')\n@@ -49,7 +50,7 @@\n 'closing_datetime': fields.DateTime(),\n 'type': EventTypeField(),\n 'topic': EventTopicField(),\n- 'privacy': fields.String(),\n+ 'privacy': EventPrivacyField(),\n 'ticket_url': fields.Uri(),\n 'creator': fields.Nested(EVENT_CREATOR, allow_null=True),\n 'schedule_published_on': fields.DateTime(),\ndiff --git a/open_event/api/helpers/special_fields.py b/open_event/api/helpers/special_fields.py\n--- a/open_event/api/helpers/special_fields.py\n+++ b/open_event/api/helpers/special_fields.py\n@@ -21,6 +21,16 @@\n **kwargs)\n \n \n+class EventPrivacyField(fields.ChoiceString):\n+ __schema_example__ = 'public'\n+\n+ def __init__(self, **kwargs):\n+ super(EventPrivacyField, self).__init__(\n+ choice_list=['public', 'private'],\n+ **kwargs\n+ )\n+\n+\n class SessionLanguageField(fields.ChoiceString):\n __schema_example__ = DataGetter.get_language_list()[51]\n", "issue": "Make privacy field in Event API accept only public/private as values\nCurrently privacy field in Event API accepts any string . It should be limited to \"public\" and \"private\" and should have \"public\" as default value. \n\n", "before_files": [{"content": "from flask.ext.restplus import Resource, Namespace, reqparse\nfrom flask import g\n\nfrom open_event.models.event import Event as EventModel\nfrom open_event.models.social_link import SocialLink as SocialLinkModel\nfrom open_event.models.users_events_roles import UsersEventsRoles\nfrom open_event.models.role import Role\nfrom open_event.models.user import ORGANIZER\nfrom open_event.helpers.data import save_to_db, update_version, record_activity\n\nfrom .helpers.helpers import get_paginated_list, requires_auth, parse_args\nfrom .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, \\\n PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, BaseDAO, ServiceDAO\nfrom .helpers import custom_fields as fields\nfrom helpers.special_fields import EventTypeField, EventTopicField\n\n\napi = Namespace('events', description='Events')\n\nEVENT_CREATOR = api.model('EventCreator', {\n 'id': fields.Integer(),\n 'email': fields.Email()\n})\n\nEVENT_SOCIAL = api.model('EventSocial', {\n 'id': fields.Integer(),\n 'name': fields.String(),\n 'link': fields.String()\n})\n\nEVENT = api.model('Event', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(required=True),\n 'email': fields.Email(),\n 'color': fields.Color(),\n 'logo': fields.ImageUri(),\n 'start_time': fields.DateTime(required=True),\n 'end_time': fields.DateTime(required=True),\n 'timezone': fields.String(),\n 'latitude': fields.Float(),\n 'longitude': fields.Float(),\n 'event_url': fields.Uri(),\n 'background_url': fields.ImageUri(),\n 'description': fields.String(),\n 'location_name': fields.String(),\n 'organizer_name': fields.String(),\n 'organizer_description': fields.String(),\n 'state': fields.String(),\n 'closing_datetime': fields.DateTime(),\n 'type': EventTypeField(),\n 'topic': EventTopicField(),\n 'privacy': fields.String(),\n 'ticket_url': fields.Uri(),\n 'creator': fields.Nested(EVENT_CREATOR, allow_null=True),\n 'schedule_published_on': fields.DateTime(),\n 'code_of_conduct': fields.String(),\n 'social_links': fields.List(fields.Nested(EVENT_SOCIAL), attribute='social_link')\n})\n\nEVENT_PAGINATED = api.clone('EventPaginated', PAGINATED_MODEL, {\n 'results': fields.List(fields.Nested(EVENT))\n})\n\nEVENT_POST = api.clone('EventPost', EVENT)\nSOCIAL_LINK_POST = api.clone('SocialLinkPost', EVENT_SOCIAL)\n\ndel EVENT_POST['id']\ndel EVENT_POST['creator']\ndel EVENT_POST['social_links']\n\ndel SOCIAL_LINK_POST['id']\n\n# ###################\n# Data Access Objects\n# ###################\n\n\nclass SocialLinkDAO(ServiceDAO):\n \"\"\"\n Social Link DAO\n \"\"\"\n pass\n\n\nclass EventDAO(BaseDAO):\n \"\"\"\n Event DAO\n \"\"\"\n def fix_payload(self, data):\n \"\"\"\n Fixes the payload data.\n Here converts string time from datetime obj\n \"\"\"\n data['start_time'] = EVENT_POST['start_time'].from_str(data['start_time'])\n data['end_time'] = EVENT_POST['end_time'].from_str(data['end_time'])\n data['closing_datetime'] = EVENT_POST['closing_datetime'].from_str(\n data['closing_datetime'])\n data['schedule_published_on'] = EVENT_POST['schedule_published_on'].from_str(\n data['schedule_published_on'])\n return data\n\n def create(self, data, url):\n data = self.validate(data)\n payload = self.fix_payload(data)\n new_event = self.model(**payload)\n new_event.creator = g.user\n save_to_db(new_event, \"Event saved\")\n # set organizer\n role = Role.query.filter_by(name=ORGANIZER).first()\n uer = UsersEventsRoles(g.user, new_event, role)\n save_to_db(uer, 'UER saved')\n update_version(\n event_id=new_event.id,\n is_created=True,\n column_to_increment=\"event_ver\"\n )\n # Return created resource with a 201 status code and its Location\n # (url) in the header.\n resource_location = url + '/' + str(new_event.id)\n return self.get(new_event.id), 201, {'Location': resource_location}\n\n def update(self, event_id, data):\n data = self.validate(data)\n payload = self.fix_payload(data)\n return BaseDAO.update(self, event_id, payload, validate=False)\n\n\nLinkDAO = SocialLinkDAO(SocialLinkModel, SOCIAL_LINK_POST)\nDAO = EventDAO(EventModel, EVENT_POST)\n\n\n# DEFINE PARAMS\n\nEVENT_PARAMS = {\n 'location': {\n 'type': str\n },\n 'contains': {\n 'description': 'Contains the string in name and description',\n 'type': str\n },\n 'state': {\n 'type': str\n },\n 'privacy': {\n 'type': str\n },\n 'type': {\n 'type': str\n },\n 'topic': {\n 'type': str\n },\n 'start_time_gt': {},\n 'start_time_lt': {},\n 'end_time_gt': {},\n 'end_time_lt': {}\n}\n\n# DEFINE RESOURCES\n\n\nclass EventResource():\n \"\"\"\n Event Resource Base class\n \"\"\"\n event_parser = reqparse.RequestParser()\n event_parser.add_argument('location', type=str, dest='__event_location')\n event_parser.add_argument('contains', type=str, dest='__event_contains')\n event_parser.add_argument('state', type=str)\n event_parser.add_argument('privacy', type=str)\n event_parser.add_argument('type', type=str)\n event_parser.add_argument('topic', type=str)\n event_parser.add_argument('start_time_gt', dest='__event_start_time_gt')\n event_parser.add_argument('start_time_lt', dest='__event_start_time_lt')\n event_parser.add_argument('end_time_gt', dest='__event_end_time_gt')\n event_parser.add_argument('end_time_lt', dest='__event_end_time_lt')\n\n\[email protected]('/<int:event_id>')\[email protected]('event_id')\[email protected](404, 'Event not found')\nclass Event(Resource):\n @api.doc('get_event')\n @api.marshal_with(EVENT)\n def get(self, event_id):\n \"\"\"Fetch an event given its id\"\"\"\n return DAO.get(event_id)\n\n @requires_auth\n @api.doc('delete_event')\n @api.marshal_with(EVENT)\n def delete(self, event_id):\n \"\"\"Delete an event given its id\"\"\"\n event = DAO.delete(event_id)\n record_activity('delete_event', event_id=event_id)\n return event\n\n @requires_auth\n @api.doc('update_event', responses=PUT_RESPONSES)\n @api.marshal_with(EVENT)\n @api.expect(EVENT_POST)\n def put(self, event_id):\n \"\"\"Update a event given its id\"\"\"\n event = DAO.update(event_id, self.api.payload)\n record_activity('update_event', event_id=event_id)\n return event\n\n\[email protected]('')\nclass EventList(Resource, EventResource):\n @api.doc('list_events', params=EVENT_PARAMS)\n @api.marshal_list_with(EVENT)\n def get(self):\n \"\"\"List all events\"\"\"\n return DAO.list(**parse_args(self.event_parser))\n\n @requires_auth\n @api.doc('create_event', responses=POST_RESPONSES)\n @api.marshal_with(EVENT)\n @api.expect(EVENT_POST)\n def post(self):\n \"\"\"Create an event\"\"\"\n item = DAO.create(self.api.payload, self.api.url_for(self))\n record_activity('create_event', event_id=item[0].id)\n return item\n\n\[email protected]('/page')\nclass EventListPaginated(Resource, PaginatedResourceBase, EventResource):\n @api.doc('list_events_paginated', params=PAGE_PARAMS)\n @api.doc(params=EVENT_PARAMS)\n @api.marshal_with(EVENT_PAGINATED)\n def get(self):\n \"\"\"List events in a paginated manner\"\"\"\n args = self.parser.parse_args()\n url = self.api.url_for(self) # WARN: undocumented way\n return get_paginated_list(\n EventModel, url, args=args,\n **parse_args(self.event_parser)\n )\n", "path": "open_event/api/events.py"}, {"content": "from open_event.helpers.data_getter import DataGetter\n\nimport custom_fields as fields\n\n\nclass EventTypeField(fields.ChoiceString):\n __schema_example__ = DataGetter.get_event_types()[0]\n\n def __init__(self, **kwargs):\n super(EventTypeField, self).__init__(\n choice_list=DataGetter.get_event_types(),\n **kwargs)\n\n\nclass EventTopicField(fields.ChoiceString):\n __schema_example__ = DataGetter.get_event_topics()[0]\n\n def __init__(self, **kwargs):\n super(EventTopicField, self).__init__(\n choice_list=DataGetter.get_event_topics(),\n **kwargs)\n\n\nclass SessionLanguageField(fields.ChoiceString):\n __schema_example__ = DataGetter.get_language_list()[51]\n\n def __init__(self, **kwargs):\n super(SessionLanguageField, self).__init__(\n choice_list=DataGetter.get_language_list(),\n **kwargs)\n\n\nclass SessionStateField(fields.ChoiceString):\n __schema_example__ = 'pending'\n\n def __init__(self, **kwargs):\n super(SessionStateField, self).__init__(\n choice_list=['pending', 'accepted', 'rejected'],\n **kwargs)\n", "path": "open_event/api/helpers/special_fields.py"}], "after_files": [{"content": "from flask.ext.restplus import Resource, Namespace, reqparse\nfrom flask import g\n\nfrom open_event.models.event import Event as EventModel\nfrom open_event.models.social_link import SocialLink as SocialLinkModel\nfrom open_event.models.users_events_roles import UsersEventsRoles\nfrom open_event.models.role import Role\nfrom open_event.models.user import ORGANIZER\nfrom open_event.helpers.data import save_to_db, update_version, record_activity\n\nfrom .helpers.helpers import get_paginated_list, requires_auth, parse_args\nfrom .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, \\\n PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, BaseDAO, ServiceDAO\nfrom .helpers import custom_fields as fields\nfrom helpers.special_fields import EventTypeField, EventTopicField, \\\n EventPrivacyField\n\n\napi = Namespace('events', description='Events')\n\nEVENT_CREATOR = api.model('EventCreator', {\n 'id': fields.Integer(),\n 'email': fields.Email()\n})\n\nEVENT_SOCIAL = api.model('EventSocial', {\n 'id': fields.Integer(),\n 'name': fields.String(),\n 'link': fields.String()\n})\n\nEVENT = api.model('Event', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(required=True),\n 'email': fields.Email(),\n 'color': fields.Color(),\n 'logo': fields.ImageUri(),\n 'start_time': fields.DateTime(required=True),\n 'end_time': fields.DateTime(required=True),\n 'timezone': fields.String(),\n 'latitude': fields.Float(),\n 'longitude': fields.Float(),\n 'event_url': fields.Uri(),\n 'background_url': fields.ImageUri(),\n 'description': fields.String(),\n 'location_name': fields.String(),\n 'organizer_name': fields.String(),\n 'organizer_description': fields.String(),\n 'state': fields.String(),\n 'closing_datetime': fields.DateTime(),\n 'type': EventTypeField(),\n 'topic': EventTopicField(),\n 'privacy': EventPrivacyField(),\n 'ticket_url': fields.Uri(),\n 'creator': fields.Nested(EVENT_CREATOR, allow_null=True),\n 'schedule_published_on': fields.DateTime(),\n 'code_of_conduct': fields.String(),\n 'social_links': fields.List(fields.Nested(EVENT_SOCIAL), attribute='social_link')\n})\n\nEVENT_PAGINATED = api.clone('EventPaginated', PAGINATED_MODEL, {\n 'results': fields.List(fields.Nested(EVENT))\n})\n\nEVENT_POST = api.clone('EventPost', EVENT)\nSOCIAL_LINK_POST = api.clone('SocialLinkPost', EVENT_SOCIAL)\n\ndel EVENT_POST['id']\ndel EVENT_POST['creator']\ndel EVENT_POST['social_links']\n\ndel SOCIAL_LINK_POST['id']\n\n# ###################\n# Data Access Objects\n# ###################\n\n\nclass SocialLinkDAO(ServiceDAO):\n \"\"\"\n Social Link DAO\n \"\"\"\n pass\n\n\nclass EventDAO(BaseDAO):\n \"\"\"\n Event DAO\n \"\"\"\n def fix_payload(self, data):\n \"\"\"\n Fixes the payload data.\n Here converts string time from datetime obj\n \"\"\"\n data['start_time'] = EVENT_POST['start_time'].from_str(data['start_time'])\n data['end_time'] = EVENT_POST['end_time'].from_str(data['end_time'])\n data['closing_datetime'] = EVENT_POST['closing_datetime'].from_str(\n data['closing_datetime'])\n data['schedule_published_on'] = EVENT_POST['schedule_published_on'].from_str(\n data['schedule_published_on'])\n return data\n\n def create(self, data, url):\n data = self.validate(data)\n payload = self.fix_payload(data)\n new_event = self.model(**payload)\n new_event.creator = g.user\n save_to_db(new_event, \"Event saved\")\n # set organizer\n role = Role.query.filter_by(name=ORGANIZER).first()\n uer = UsersEventsRoles(g.user, new_event, role)\n save_to_db(uer, 'UER saved')\n update_version(\n event_id=new_event.id,\n is_created=True,\n column_to_increment=\"event_ver\"\n )\n # Return created resource with a 201 status code and its Location\n # (url) in the header.\n resource_location = url + '/' + str(new_event.id)\n return self.get(new_event.id), 201, {'Location': resource_location}\n\n def update(self, event_id, data):\n data = self.validate(data)\n payload = self.fix_payload(data)\n return BaseDAO.update(self, event_id, payload, validate=False)\n\n\nLinkDAO = SocialLinkDAO(SocialLinkModel, SOCIAL_LINK_POST)\nDAO = EventDAO(EventModel, EVENT_POST)\n\n\n# DEFINE PARAMS\n\nEVENT_PARAMS = {\n 'location': {\n 'type': str\n },\n 'contains': {\n 'description': 'Contains the string in name and description',\n 'type': str\n },\n 'state': {\n 'type': str\n },\n 'privacy': {\n 'type': str\n },\n 'type': {\n 'type': str\n },\n 'topic': {\n 'type': str\n },\n 'start_time_gt': {},\n 'start_time_lt': {},\n 'end_time_gt': {},\n 'end_time_lt': {}\n}\n\n# DEFINE RESOURCES\n\n\nclass EventResource():\n \"\"\"\n Event Resource Base class\n \"\"\"\n event_parser = reqparse.RequestParser()\n event_parser.add_argument('location', type=str, dest='__event_location')\n event_parser.add_argument('contains', type=str, dest='__event_contains')\n event_parser.add_argument('state', type=str)\n event_parser.add_argument('privacy', type=str)\n event_parser.add_argument('type', type=str)\n event_parser.add_argument('topic', type=str)\n event_parser.add_argument('start_time_gt', dest='__event_start_time_gt')\n event_parser.add_argument('start_time_lt', dest='__event_start_time_lt')\n event_parser.add_argument('end_time_gt', dest='__event_end_time_gt')\n event_parser.add_argument('end_time_lt', dest='__event_end_time_lt')\n\n\[email protected]('/<int:event_id>')\[email protected]('event_id')\[email protected](404, 'Event not found')\nclass Event(Resource):\n @api.doc('get_event')\n @api.marshal_with(EVENT)\n def get(self, event_id):\n \"\"\"Fetch an event given its id\"\"\"\n return DAO.get(event_id)\n\n @requires_auth\n @api.doc('delete_event')\n @api.marshal_with(EVENT)\n def delete(self, event_id):\n \"\"\"Delete an event given its id\"\"\"\n event = DAO.delete(event_id)\n record_activity('delete_event', event_id=event_id)\n return event\n\n @requires_auth\n @api.doc('update_event', responses=PUT_RESPONSES)\n @api.marshal_with(EVENT)\n @api.expect(EVENT_POST)\n def put(self, event_id):\n \"\"\"Update a event given its id\"\"\"\n event = DAO.update(event_id, self.api.payload)\n record_activity('update_event', event_id=event_id)\n return event\n\n\[email protected]('')\nclass EventList(Resource, EventResource):\n @api.doc('list_events', params=EVENT_PARAMS)\n @api.marshal_list_with(EVENT)\n def get(self):\n \"\"\"List all events\"\"\"\n return DAO.list(**parse_args(self.event_parser))\n\n @requires_auth\n @api.doc('create_event', responses=POST_RESPONSES)\n @api.marshal_with(EVENT)\n @api.expect(EVENT_POST)\n def post(self):\n \"\"\"Create an event\"\"\"\n item = DAO.create(self.api.payload, self.api.url_for(self))\n record_activity('create_event', event_id=item[0].id)\n return item\n\n\[email protected]('/page')\nclass EventListPaginated(Resource, PaginatedResourceBase, EventResource):\n @api.doc('list_events_paginated', params=PAGE_PARAMS)\n @api.doc(params=EVENT_PARAMS)\n @api.marshal_with(EVENT_PAGINATED)\n def get(self):\n \"\"\"List events in a paginated manner\"\"\"\n args = self.parser.parse_args()\n url = self.api.url_for(self) # WARN: undocumented way\n return get_paginated_list(\n EventModel, url, args=args,\n **parse_args(self.event_parser)\n )\n", "path": "open_event/api/events.py"}, {"content": "from open_event.helpers.data_getter import DataGetter\n\nimport custom_fields as fields\n\n\nclass EventTypeField(fields.ChoiceString):\n __schema_example__ = DataGetter.get_event_types()[0]\n\n def __init__(self, **kwargs):\n super(EventTypeField, self).__init__(\n choice_list=DataGetter.get_event_types(),\n **kwargs)\n\n\nclass EventTopicField(fields.ChoiceString):\n __schema_example__ = DataGetter.get_event_topics()[0]\n\n def __init__(self, **kwargs):\n super(EventTopicField, self).__init__(\n choice_list=DataGetter.get_event_topics(),\n **kwargs)\n\n\nclass EventPrivacyField(fields.ChoiceString):\n __schema_example__ = 'public'\n\n def __init__(self, **kwargs):\n super(EventPrivacyField, self).__init__(\n choice_list=['public', 'private'],\n **kwargs\n )\n\n\nclass SessionLanguageField(fields.ChoiceString):\n __schema_example__ = DataGetter.get_language_list()[51]\n\n def __init__(self, **kwargs):\n super(SessionLanguageField, self).__init__(\n choice_list=DataGetter.get_language_list(),\n **kwargs)\n\n\nclass SessionStateField(fields.ChoiceString):\n __schema_example__ = 'pending'\n\n def __init__(self, **kwargs):\n super(SessionStateField, self).__init__(\n choice_list=['pending', 'accepted', 'rejected'],\n **kwargs)\n", "path": "open_event/api/helpers/special_fields.py"}]} | 3,043 | 364 |
gh_patches_debug_18003 | rasdani/github-patches | git_diff | rasterio__rasterio-223 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
guard_transform prevents translation of unprojected files
Where by translation I mean the pattern
``` python
with rasterio.open(file1.tif) as src:
with rasterio.open(file2.tif, 'w', **src.meta) as dst:
...
```
Traceback snippet:
```
Traceback (most recent call last):
...
File "copier.py", line 15, in to_png
with rio.open(dst_path, "w", **meta) as dst:
File "xxx/anaconda/lib/python2.7/site-packages/rasterio/__init__.py", line 91, in open
transform = guard_transform(transform)
File "xxx/anaconda/lib/python2.7/site-packages/rasterio/transform.py", line 27, in guard_transform
transform.a, transform.e))
ValueError: Transform has invalid coefficients a, e: (0.000000, 0.000000)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/transform.py`
Content:
```
1
2 import warnings
3
4 from affine import Affine
5
6 IDENTITY = Affine.identity()
7
8 def tastes_like_gdal(t):
9 return t[2] == t[4] == 0.0 and t[1] > 0 and t[5] < 0
10
11 def guard_transform(transform):
12 """Return an Affine transformation instance"""
13 if not isinstance(transform, Affine):
14 if tastes_like_gdal(transform):
15 warnings.warn(
16 "GDAL-style transforms are deprecated and will not "
17 "be supported in Rasterio 1.0.",
18 FutureWarning,
19 stacklevel=2)
20 transform = Affine.from_gdal(*transform)
21 else:
22 transform = Affine(*transform)
23 a, e = transform.a, transform.e
24 if a == 0.0 or e == 0.0:
25 raise ValueError(
26 "Transform has invalid coefficients a, e: (%f, %f)" % (
27 transform.a, transform.e))
28 return transform
29
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/transform.py b/rasterio/transform.py
--- a/rasterio/transform.py
+++ b/rasterio/transform.py
@@ -1,12 +1,14 @@
-
import warnings
from affine import Affine
IDENTITY = Affine.identity()
-def tastes_like_gdal(t):
- return t[2] == t[4] == 0.0 and t[1] > 0 and t[5] < 0
+
+def tastes_like_gdal(seq):
+ """Return True if `seq` matches the GDAL geotransform pattern."""
+ return seq[2] == seq[4] == 0.0 and seq[1] > 0 and seq[5] < 0
+
def guard_transform(transform):
"""Return an Affine transformation instance"""
@@ -20,10 +22,4 @@
transform = Affine.from_gdal(*transform)
else:
transform = Affine(*transform)
- a, e = transform.a, transform.e
- if a == 0.0 or e == 0.0:
- raise ValueError(
- "Transform has invalid coefficients a, e: (%f, %f)" % (
- transform.a, transform.e))
return transform
-
| {"golden_diff": "diff --git a/rasterio/transform.py b/rasterio/transform.py\n--- a/rasterio/transform.py\n+++ b/rasterio/transform.py\n@@ -1,12 +1,14 @@\n-\n import warnings\n \n from affine import Affine\n \n IDENTITY = Affine.identity()\n \n-def tastes_like_gdal(t):\n- return t[2] == t[4] == 0.0 and t[1] > 0 and t[5] < 0\n+\n+def tastes_like_gdal(seq):\n+ \"\"\"Return True if `seq` matches the GDAL geotransform pattern.\"\"\"\n+ return seq[2] == seq[4] == 0.0 and seq[1] > 0 and seq[5] < 0\n+\n \n def guard_transform(transform):\n \"\"\"Return an Affine transformation instance\"\"\"\n@@ -20,10 +22,4 @@\n transform = Affine.from_gdal(*transform)\n else:\n transform = Affine(*transform)\n- a, e = transform.a, transform.e\n- if a == 0.0 or e == 0.0:\n- raise ValueError(\n- \"Transform has invalid coefficients a, e: (%f, %f)\" % (\n- transform.a, transform.e))\n return transform\n-\n", "issue": "guard_transform prevents translation of unprojected files\nWhere by translation I mean the pattern\n\n``` python\nwith rasterio.open(file1.tif) as src:\n with rasterio.open(file2.tif, 'w', **src.meta) as dst:\n ...\n```\n\nTraceback snippet:\n\n```\nTraceback (most recent call last):\n ...\n File \"copier.py\", line 15, in to_png\n with rio.open(dst_path, \"w\", **meta) as dst:\n File \"xxx/anaconda/lib/python2.7/site-packages/rasterio/__init__.py\", line 91, in open\n transform = guard_transform(transform)\n File \"xxx/anaconda/lib/python2.7/site-packages/rasterio/transform.py\", line 27, in guard_transform\n transform.a, transform.e))\nValueError: Transform has invalid coefficients a, e: (0.000000, 0.000000)\n```\n\n", "before_files": [{"content": "\nimport warnings\n\nfrom affine import Affine\n\nIDENTITY = Affine.identity()\n\ndef tastes_like_gdal(t):\n return t[2] == t[4] == 0.0 and t[1] > 0 and t[5] < 0\n\ndef guard_transform(transform):\n \"\"\"Return an Affine transformation instance\"\"\"\n if not isinstance(transform, Affine):\n if tastes_like_gdal(transform):\n warnings.warn(\n \"GDAL-style transforms are deprecated and will not \"\n \"be supported in Rasterio 1.0.\",\n FutureWarning,\n stacklevel=2)\n transform = Affine.from_gdal(*transform)\n else:\n transform = Affine(*transform)\n a, e = transform.a, transform.e\n if a == 0.0 or e == 0.0:\n raise ValueError(\n \"Transform has invalid coefficients a, e: (%f, %f)\" % (\n transform.a, transform.e))\n return transform\n\n", "path": "rasterio/transform.py"}], "after_files": [{"content": "import warnings\n\nfrom affine import Affine\n\nIDENTITY = Affine.identity()\n\n\ndef tastes_like_gdal(seq):\n \"\"\"Return True if `seq` matches the GDAL geotransform pattern.\"\"\"\n return seq[2] == seq[4] == 0.0 and seq[1] > 0 and seq[5] < 0\n\n\ndef guard_transform(transform):\n \"\"\"Return an Affine transformation instance\"\"\"\n if not isinstance(transform, Affine):\n if tastes_like_gdal(transform):\n warnings.warn(\n \"GDAL-style transforms are deprecated and will not \"\n \"be supported in Rasterio 1.0.\",\n FutureWarning,\n stacklevel=2)\n transform = Affine.from_gdal(*transform)\n else:\n transform = Affine(*transform)\n return transform\n", "path": "rasterio/transform.py"}]} | 735 | 286 |
gh_patches_debug_50579 | rasdani/github-patches | git_diff | streamlit__streamlit-4452 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unnecessary space symbol in markdown in lib/streamlit/hello/demos.py
### Summary
There are unnecessary space symbols in markdown in lib/streamlit/hello/demos.py
### Steps to reproduce
Code snippet (https://github.com/streamlit/streamlit/blob/develop/lib/streamlit/hello/demos.py):
```
st.markdown(
"""
Streamlit is an open-source app framework built specifically for
Machine Learning and Data Science projects.
**👈 Select a demo from the dropdown on the left** to see some examples
of what Streamlit can do!
### Want to learn more?
- Check out [streamlit.io](https://streamlit.io)
- Jump into our [documentation](https://docs.streamlit.io)
- Ask a question in our [community
forums](https://discuss.streamlit.io)
### See more complex demos
- Use a neural net to [analyze the Udacity Self-driving Car Image
Dataset] (https://github.com/streamlit/demo-self-driving)
- Explore a [New York City rideshare dataset]
(https://github.com/streamlit/demo-uber-nyc-pickups)
"""
)
```
If applicable, please provide the steps we should take to reproduce the bug:
1. run "streamlit hello"
2. open the front page
3. pay attention to the "See more complex demos" section
**Actual behavior:**
Markdown links are broken. There should not be spaces between `[text]` and `(URL)` parts (see code snippet)

**Expected behavior:**
Links are correctly shown
### Is this a regression?
maybe, but it reproduces in a previous released version
### Debug info
- Streamlit version: 1.6.0rc
- Python version: 3.9.7
- Using Conda and PipEnv
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/hello/demos.py`
Content:
```
1 # Copyright 2018-2022 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 from typing import Any
17
18
19 def intro():
20 import streamlit as st
21
22 st.sidebar.success("Select a demo above.")
23
24 st.markdown(
25 """
26 Streamlit is an open-source app framework built specifically for
27 Machine Learning and Data Science projects.
28
29 **👈 Select a demo from the dropdown on the left** to see some examples
30 of what Streamlit can do!
31
32 ### Want to learn more?
33
34 - Check out [streamlit.io](https://streamlit.io)
35 - Jump into our [documentation](https://docs.streamlit.io)
36 - Ask a question in our [community
37 forums](https://discuss.streamlit.io)
38
39 ### See more complex demos
40
41 - Use a neural net to [analyze the Udacity Self-driving Car Image
42 Dataset] (https://github.com/streamlit/demo-self-driving)
43 - Explore a [New York City rideshare dataset]
44 (https://github.com/streamlit/demo-uber-nyc-pickups)
45 """
46 )
47
48
49 # Turn off black formatting for this function to present the user with more
50 # compact code.
51 # fmt: off
52 def mapping_demo():
53 import streamlit as st
54 import pandas as pd
55 import pydeck as pdk
56
57 from urllib.error import URLError
58
59 @st.cache
60 def from_data_file(filename):
61 url = (
62 "http://raw.githubusercontent.com/streamlit/"
63 "example-data/master/hello/v1/%s" % filename)
64 return pd.read_json(url)
65
66 try:
67 ALL_LAYERS = {
68 "Bike Rentals": pdk.Layer(
69 "HexagonLayer",
70 data=from_data_file("bike_rental_stats.json"),
71 get_position=["lon", "lat"],
72 radius=200,
73 elevation_scale=4,
74 elevation_range=[0, 1000],
75 extruded=True,
76 ),
77 "Bart Stop Exits": pdk.Layer(
78 "ScatterplotLayer",
79 data=from_data_file("bart_stop_stats.json"),
80 get_position=["lon", "lat"],
81 get_color=[200, 30, 0, 160],
82 get_radius="[exits]",
83 radius_scale=0.05,
84 ),
85 "Bart Stop Names": pdk.Layer(
86 "TextLayer",
87 data=from_data_file("bart_stop_stats.json"),
88 get_position=["lon", "lat"],
89 get_text="name",
90 get_color=[0, 0, 0, 200],
91 get_size=15,
92 get_alignment_baseline="'bottom'",
93 ),
94 "Outbound Flow": pdk.Layer(
95 "ArcLayer",
96 data=from_data_file("bart_path_stats.json"),
97 get_source_position=["lon", "lat"],
98 get_target_position=["lon2", "lat2"],
99 get_source_color=[200, 30, 0, 160],
100 get_target_color=[200, 30, 0, 160],
101 auto_highlight=True,
102 width_scale=0.0001,
103 get_width="outbound",
104 width_min_pixels=3,
105 width_max_pixels=30,
106 ),
107 }
108 st.sidebar.markdown('### Map Layers')
109 selected_layers = [
110 layer for layer_name, layer in ALL_LAYERS.items()
111 if st.sidebar.checkbox(layer_name, True)]
112 if selected_layers:
113 st.pydeck_chart(pdk.Deck(
114 map_style="mapbox://styles/mapbox/light-v9",
115 initial_view_state={"latitude": 37.76,
116 "longitude": -122.4, "zoom": 11, "pitch": 50},
117 layers=selected_layers,
118 ))
119 else:
120 st.error("Please choose at least one layer above.")
121 except URLError as e:
122 st.error("""
123 **This demo requires internet access.**
124
125 Connection error: %s
126 """ % e.reason)
127 # fmt: on
128
129 # Turn off black formatting for this function to present the user with more
130 # compact code.
131 # fmt: off
132
133
134 def fractal_demo():
135 import streamlit as st
136 import numpy as np
137
138 # Interactive Streamlit elements, like these sliders, return their value.
139 # This gives you an extremely simple interaction model.
140 iterations = st.sidebar.slider("Level of detail", 2, 20, 10, 1)
141 separation = st.sidebar.slider("Separation", 0.7, 2.0, 0.7885)
142
143 # Non-interactive elements return a placeholder to their location
144 # in the app. Here we're storing progress_bar to update it later.
145 progress_bar = st.sidebar.progress(0)
146
147 # These two elements will be filled in later, so we create a placeholder
148 # for them using st.empty()
149 frame_text = st.sidebar.empty()
150 image = st.empty()
151
152 m, n, s = 960, 640, 400
153 x = np.linspace(-m / s, m / s, num=m).reshape((1, m))
154 y = np.linspace(-n / s, n / s, num=n).reshape((n, 1))
155
156 for frame_num, a in enumerate(np.linspace(0.0, 4 * np.pi, 100)):
157 # Here were setting value for these two elements.
158 progress_bar.progress(frame_num)
159 frame_text.text("Frame %i/100" % (frame_num + 1))
160
161 # Performing some fractal wizardry.
162 c = separation * np.exp(1j * a)
163 Z = np.tile(x, (n, 1)) + 1j * np.tile(y, (1, m))
164 C = np.full((n, m), c)
165 M: Any = np.full((n, m), True, dtype=bool)
166 N = np.zeros((n, m))
167
168 for i in range(iterations):
169 Z[M] = Z[M] * Z[M] + C[M]
170 M[np.abs(Z) > 2] = False
171 N[M] = i
172
173 # Update the image placeholder by calling the image() function on it.
174 image.image(1.0 - (N / N.max()), use_column_width=True)
175
176 # We clear elements by calling empty on them.
177 progress_bar.empty()
178 frame_text.empty()
179
180 # Streamlit widgets automatically run the script from top to bottom. Since
181 # this button is not connected to any other logic, it just causes a plain
182 # rerun.
183 st.button("Re-run")
184
185
186 # fmt: on
187
188 # Turn off black formatting for this function to present the user with more
189 # compact code.
190 # fmt: off
191 def plotting_demo():
192 import streamlit as st
193 import time
194 import numpy as np
195
196 progress_bar = st.sidebar.progress(0)
197 status_text = st.sidebar.empty()
198 last_rows = np.random.randn(1, 1)
199 chart = st.line_chart(last_rows)
200
201 for i in range(1, 101):
202 new_rows = last_rows[-1, :] + np.random.randn(5, 1).cumsum(axis=0)
203 status_text.text("%i%% Complete" % i)
204 chart.add_rows(new_rows)
205 progress_bar.progress(i)
206 last_rows = new_rows
207 time.sleep(0.05)
208
209 progress_bar.empty()
210
211 # Streamlit widgets automatically run the script from top to bottom. Since
212 # this button is not connected to any other logic, it just causes a plain
213 # rerun.
214 st.button("Re-run")
215
216
217 # fmt: on
218
219 # Turn off black formatting for this function to present the user with more
220 # compact code.
221 # fmt: off
222 def data_frame_demo():
223 import streamlit as st
224 import pandas as pd
225 import altair as alt
226
227 from urllib.error import URLError
228
229 @st.cache
230 def get_UN_data():
231 AWS_BUCKET_URL = "http://streamlit-demo-data.s3-us-west-2.amazonaws.com"
232 df = pd.read_csv(AWS_BUCKET_URL + "/agri.csv.gz")
233 return df.set_index("Region")
234
235 try:
236 df = get_UN_data()
237 countries = st.multiselect(
238 "Choose countries", list(df.index), ["China", "United States of America"]
239 )
240 if not countries:
241 st.error("Please select at least one country.")
242 else:
243 data = df.loc[countries]
244 data /= 1000000.0
245 st.write("### Gross Agricultural Production ($B)", data.sort_index())
246
247 data = data.T.reset_index()
248 data = pd.melt(data, id_vars=["index"]).rename(
249 columns={"index": "year", "value": "Gross Agricultural Product ($B)"}
250 )
251 chart = (
252 alt.Chart(data)
253 .mark_area(opacity=0.3)
254 .encode(
255 x="year:T",
256 y=alt.Y("Gross Agricultural Product ($B):Q", stack=None),
257 color="Region:N",
258 )
259 )
260 st.altair_chart(chart, use_container_width=True)
261 except URLError as e:
262 st.error(
263 """
264 **This demo requires internet access.**
265
266 Connection error: %s
267 """
268 % e.reason
269 )
270
271
272 # fmt: on
273
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/streamlit/hello/demos.py b/lib/streamlit/hello/demos.py
--- a/lib/streamlit/hello/demos.py
+++ b/lib/streamlit/hello/demos.py
@@ -39,9 +39,8 @@
### See more complex demos
- Use a neural net to [analyze the Udacity Self-driving Car Image
- Dataset] (https://github.com/streamlit/demo-self-driving)
- - Explore a [New York City rideshare dataset]
- (https://github.com/streamlit/demo-uber-nyc-pickups)
+ Dataset](https://github.com/streamlit/demo-self-driving)
+ - Explore a [New York City rideshare dataset](https://github.com/streamlit/demo-uber-nyc-pickups)
"""
)
| {"golden_diff": "diff --git a/lib/streamlit/hello/demos.py b/lib/streamlit/hello/demos.py\n--- a/lib/streamlit/hello/demos.py\n+++ b/lib/streamlit/hello/demos.py\n@@ -39,9 +39,8 @@\n ### See more complex demos\n \n - Use a neural net to [analyze the Udacity Self-driving Car Image\n- Dataset] (https://github.com/streamlit/demo-self-driving)\n- - Explore a [New York City rideshare dataset]\n- (https://github.com/streamlit/demo-uber-nyc-pickups)\n+ Dataset](https://github.com/streamlit/demo-self-driving)\n+ - Explore a [New York City rideshare dataset](https://github.com/streamlit/demo-uber-nyc-pickups)\n \"\"\"\n )\n", "issue": "Unnecessary space symbol in markdown in lib/streamlit/hello/demos.py \n### Summary\r\nThere are unnecessary space symbols in markdown in lib/streamlit/hello/demos.py \r\n\r\n### Steps to reproduce\r\n\r\nCode snippet (https://github.com/streamlit/streamlit/blob/develop/lib/streamlit/hello/demos.py):\r\n\r\n```\r\n st.markdown(\r\n \"\"\"\r\n Streamlit is an open-source app framework built specifically for\r\n Machine Learning and Data Science projects.\r\n **\ud83d\udc48 Select a demo from the dropdown on the left** to see some examples\r\n of what Streamlit can do!\r\n ### Want to learn more?\r\n - Check out [streamlit.io](https://streamlit.io)\r\n - Jump into our [documentation](https://docs.streamlit.io)\r\n - Ask a question in our [community\r\n forums](https://discuss.streamlit.io)\r\n ### See more complex demos\r\n - Use a neural net to [analyze the Udacity Self-driving Car Image\r\n Dataset] (https://github.com/streamlit/demo-self-driving)\r\n - Explore a [New York City rideshare dataset]\r\n (https://github.com/streamlit/demo-uber-nyc-pickups)\r\n \"\"\"\r\n )\r\n```\r\nIf applicable, please provide the steps we should take to reproduce the bug:\r\n\r\n1. run \"streamlit hello\"\r\n2. open the front page\r\n3. pay attention to the \"See more complex demos\" section\r\n\r\n**Actual behavior:**\r\nMarkdown links are broken. There should not be spaces between `[text]` and `(URL)` parts (see code snippet)\r\n\r\n\r\n**Expected behavior:**\r\nLinks are correctly shown\r\n\r\n### Is this a regression?\r\nmaybe, but it reproduces in a previous released version\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.6.0rc\r\n- Python version: 3.9.7\r\n- Using Conda and PipEnv\r\n\n", "before_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import Any\n\n\ndef intro():\n import streamlit as st\n\n st.sidebar.success(\"Select a demo above.\")\n\n st.markdown(\n \"\"\"\n Streamlit is an open-source app framework built specifically for\n Machine Learning and Data Science projects.\n\n **\ud83d\udc48 Select a demo from the dropdown on the left** to see some examples\n of what Streamlit can do!\n\n ### Want to learn more?\n\n - Check out [streamlit.io](https://streamlit.io)\n - Jump into our [documentation](https://docs.streamlit.io)\n - Ask a question in our [community\n forums](https://discuss.streamlit.io)\n\n ### See more complex demos\n\n - Use a neural net to [analyze the Udacity Self-driving Car Image\n Dataset] (https://github.com/streamlit/demo-self-driving)\n - Explore a [New York City rideshare dataset]\n (https://github.com/streamlit/demo-uber-nyc-pickups)\n \"\"\"\n )\n\n\n# Turn off black formatting for this function to present the user with more\n# compact code.\n# fmt: off\ndef mapping_demo():\n import streamlit as st\n import pandas as pd\n import pydeck as pdk\n\n from urllib.error import URLError\n\n @st.cache\n def from_data_file(filename):\n url = (\n \"http://raw.githubusercontent.com/streamlit/\"\n \"example-data/master/hello/v1/%s\" % filename)\n return pd.read_json(url)\n\n try:\n ALL_LAYERS = {\n \"Bike Rentals\": pdk.Layer(\n \"HexagonLayer\",\n data=from_data_file(\"bike_rental_stats.json\"),\n get_position=[\"lon\", \"lat\"],\n radius=200,\n elevation_scale=4,\n elevation_range=[0, 1000],\n extruded=True,\n ),\n \"Bart Stop Exits\": pdk.Layer(\n \"ScatterplotLayer\",\n data=from_data_file(\"bart_stop_stats.json\"),\n get_position=[\"lon\", \"lat\"],\n get_color=[200, 30, 0, 160],\n get_radius=\"[exits]\",\n radius_scale=0.05,\n ),\n \"Bart Stop Names\": pdk.Layer(\n \"TextLayer\",\n data=from_data_file(\"bart_stop_stats.json\"),\n get_position=[\"lon\", \"lat\"],\n get_text=\"name\",\n get_color=[0, 0, 0, 200],\n get_size=15,\n get_alignment_baseline=\"'bottom'\",\n ),\n \"Outbound Flow\": pdk.Layer(\n \"ArcLayer\",\n data=from_data_file(\"bart_path_stats.json\"),\n get_source_position=[\"lon\", \"lat\"],\n get_target_position=[\"lon2\", \"lat2\"],\n get_source_color=[200, 30, 0, 160],\n get_target_color=[200, 30, 0, 160],\n auto_highlight=True,\n width_scale=0.0001,\n get_width=\"outbound\",\n width_min_pixels=3,\n width_max_pixels=30,\n ),\n }\n st.sidebar.markdown('### Map Layers')\n selected_layers = [\n layer for layer_name, layer in ALL_LAYERS.items()\n if st.sidebar.checkbox(layer_name, True)]\n if selected_layers:\n st.pydeck_chart(pdk.Deck(\n map_style=\"mapbox://styles/mapbox/light-v9\",\n initial_view_state={\"latitude\": 37.76,\n \"longitude\": -122.4, \"zoom\": 11, \"pitch\": 50},\n layers=selected_layers,\n ))\n else:\n st.error(\"Please choose at least one layer above.\")\n except URLError as e:\n st.error(\"\"\"\n **This demo requires internet access.**\n\n Connection error: %s\n \"\"\" % e.reason)\n# fmt: on\n\n# Turn off black formatting for this function to present the user with more\n# compact code.\n# fmt: off\n\n\ndef fractal_demo():\n import streamlit as st\n import numpy as np\n\n # Interactive Streamlit elements, like these sliders, return their value.\n # This gives you an extremely simple interaction model.\n iterations = st.sidebar.slider(\"Level of detail\", 2, 20, 10, 1)\n separation = st.sidebar.slider(\"Separation\", 0.7, 2.0, 0.7885)\n\n # Non-interactive elements return a placeholder to their location\n # in the app. Here we're storing progress_bar to update it later.\n progress_bar = st.sidebar.progress(0)\n\n # These two elements will be filled in later, so we create a placeholder\n # for them using st.empty()\n frame_text = st.sidebar.empty()\n image = st.empty()\n\n m, n, s = 960, 640, 400\n x = np.linspace(-m / s, m / s, num=m).reshape((1, m))\n y = np.linspace(-n / s, n / s, num=n).reshape((n, 1))\n\n for frame_num, a in enumerate(np.linspace(0.0, 4 * np.pi, 100)):\n # Here were setting value for these two elements.\n progress_bar.progress(frame_num)\n frame_text.text(\"Frame %i/100\" % (frame_num + 1))\n\n # Performing some fractal wizardry.\n c = separation * np.exp(1j * a)\n Z = np.tile(x, (n, 1)) + 1j * np.tile(y, (1, m))\n C = np.full((n, m), c)\n M: Any = np.full((n, m), True, dtype=bool)\n N = np.zeros((n, m))\n\n for i in range(iterations):\n Z[M] = Z[M] * Z[M] + C[M]\n M[np.abs(Z) > 2] = False\n N[M] = i\n\n # Update the image placeholder by calling the image() function on it.\n image.image(1.0 - (N / N.max()), use_column_width=True)\n\n # We clear elements by calling empty on them.\n progress_bar.empty()\n frame_text.empty()\n\n # Streamlit widgets automatically run the script from top to bottom. Since\n # this button is not connected to any other logic, it just causes a plain\n # rerun.\n st.button(\"Re-run\")\n\n\n# fmt: on\n\n# Turn off black formatting for this function to present the user with more\n# compact code.\n# fmt: off\ndef plotting_demo():\n import streamlit as st\n import time\n import numpy as np\n\n progress_bar = st.sidebar.progress(0)\n status_text = st.sidebar.empty()\n last_rows = np.random.randn(1, 1)\n chart = st.line_chart(last_rows)\n\n for i in range(1, 101):\n new_rows = last_rows[-1, :] + np.random.randn(5, 1).cumsum(axis=0)\n status_text.text(\"%i%% Complete\" % i)\n chart.add_rows(new_rows)\n progress_bar.progress(i)\n last_rows = new_rows\n time.sleep(0.05)\n\n progress_bar.empty()\n\n # Streamlit widgets automatically run the script from top to bottom. Since\n # this button is not connected to any other logic, it just causes a plain\n # rerun.\n st.button(\"Re-run\")\n\n\n# fmt: on\n\n# Turn off black formatting for this function to present the user with more\n# compact code.\n# fmt: off\ndef data_frame_demo():\n import streamlit as st\n import pandas as pd\n import altair as alt\n\n from urllib.error import URLError\n\n @st.cache\n def get_UN_data():\n AWS_BUCKET_URL = \"http://streamlit-demo-data.s3-us-west-2.amazonaws.com\"\n df = pd.read_csv(AWS_BUCKET_URL + \"/agri.csv.gz\")\n return df.set_index(\"Region\")\n\n try:\n df = get_UN_data()\n countries = st.multiselect(\n \"Choose countries\", list(df.index), [\"China\", \"United States of America\"]\n )\n if not countries:\n st.error(\"Please select at least one country.\")\n else:\n data = df.loc[countries]\n data /= 1000000.0\n st.write(\"### Gross Agricultural Production ($B)\", data.sort_index())\n\n data = data.T.reset_index()\n data = pd.melt(data, id_vars=[\"index\"]).rename(\n columns={\"index\": \"year\", \"value\": \"Gross Agricultural Product ($B)\"}\n )\n chart = (\n alt.Chart(data)\n .mark_area(opacity=0.3)\n .encode(\n x=\"year:T\",\n y=alt.Y(\"Gross Agricultural Product ($B):Q\", stack=None),\n color=\"Region:N\",\n )\n )\n st.altair_chart(chart, use_container_width=True)\n except URLError as e:\n st.error(\n \"\"\"\n **This demo requires internet access.**\n\n Connection error: %s\n \"\"\"\n % e.reason\n )\n\n\n# fmt: on\n", "path": "lib/streamlit/hello/demos.py"}], "after_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import Any\n\n\ndef intro():\n import streamlit as st\n\n st.sidebar.success(\"Select a demo above.\")\n\n st.markdown(\n \"\"\"\n Streamlit is an open-source app framework built specifically for\n Machine Learning and Data Science projects.\n\n **\ud83d\udc48 Select a demo from the dropdown on the left** to see some examples\n of what Streamlit can do!\n\n ### Want to learn more?\n\n - Check out [streamlit.io](https://streamlit.io)\n - Jump into our [documentation](https://docs.streamlit.io)\n - Ask a question in our [community\n forums](https://discuss.streamlit.io)\n\n ### See more complex demos\n\n - Use a neural net to [analyze the Udacity Self-driving Car Image\n Dataset](https://github.com/streamlit/demo-self-driving)\n - Explore a [New York City rideshare dataset](https://github.com/streamlit/demo-uber-nyc-pickups)\n \"\"\"\n )\n\n\n# Turn off black formatting for this function to present the user with more\n# compact code.\n# fmt: off\ndef mapping_demo():\n import streamlit as st\n import pandas as pd\n import pydeck as pdk\n\n from urllib.error import URLError\n\n @st.cache\n def from_data_file(filename):\n url = (\n \"http://raw.githubusercontent.com/streamlit/\"\n \"example-data/master/hello/v1/%s\" % filename)\n return pd.read_json(url)\n\n try:\n ALL_LAYERS = {\n \"Bike Rentals\": pdk.Layer(\n \"HexagonLayer\",\n data=from_data_file(\"bike_rental_stats.json\"),\n get_position=[\"lon\", \"lat\"],\n radius=200,\n elevation_scale=4,\n elevation_range=[0, 1000],\n extruded=True,\n ),\n \"Bart Stop Exits\": pdk.Layer(\n \"ScatterplotLayer\",\n data=from_data_file(\"bart_stop_stats.json\"),\n get_position=[\"lon\", \"lat\"],\n get_color=[200, 30, 0, 160],\n get_radius=\"[exits]\",\n radius_scale=0.05,\n ),\n \"Bart Stop Names\": pdk.Layer(\n \"TextLayer\",\n data=from_data_file(\"bart_stop_stats.json\"),\n get_position=[\"lon\", \"lat\"],\n get_text=\"name\",\n get_color=[0, 0, 0, 200],\n get_size=15,\n get_alignment_baseline=\"'bottom'\",\n ),\n \"Outbound Flow\": pdk.Layer(\n \"ArcLayer\",\n data=from_data_file(\"bart_path_stats.json\"),\n get_source_position=[\"lon\", \"lat\"],\n get_target_position=[\"lon2\", \"lat2\"],\n get_source_color=[200, 30, 0, 160],\n get_target_color=[200, 30, 0, 160],\n auto_highlight=True,\n width_scale=0.0001,\n get_width=\"outbound\",\n width_min_pixels=3,\n width_max_pixels=30,\n ),\n }\n st.sidebar.markdown('### Map Layers')\n selected_layers = [\n layer for layer_name, layer in ALL_LAYERS.items()\n if st.sidebar.checkbox(layer_name, True)]\n if selected_layers:\n st.pydeck_chart(pdk.Deck(\n map_style=\"mapbox://styles/mapbox/light-v9\",\n initial_view_state={\"latitude\": 37.76,\n \"longitude\": -122.4, \"zoom\": 11, \"pitch\": 50},\n layers=selected_layers,\n ))\n else:\n st.error(\"Please choose at least one layer above.\")\n except URLError as e:\n st.error(\"\"\"\n **This demo requires internet access.**\n\n Connection error: %s\n \"\"\" % e.reason)\n# fmt: on\n\n# Turn off black formatting for this function to present the user with more\n# compact code.\n# fmt: off\n\n\ndef fractal_demo():\n import streamlit as st\n import numpy as np\n\n # Interactive Streamlit elements, like these sliders, return their value.\n # This gives you an extremely simple interaction model.\n iterations = st.sidebar.slider(\"Level of detail\", 2, 20, 10, 1)\n separation = st.sidebar.slider(\"Separation\", 0.7, 2.0, 0.7885)\n\n # Non-interactive elements return a placeholder to their location\n # in the app. Here we're storing progress_bar to update it later.\n progress_bar = st.sidebar.progress(0)\n\n # These two elements will be filled in later, so we create a placeholder\n # for them using st.empty()\n frame_text = st.sidebar.empty()\n image = st.empty()\n\n m, n, s = 960, 640, 400\n x = np.linspace(-m / s, m / s, num=m).reshape((1, m))\n y = np.linspace(-n / s, n / s, num=n).reshape((n, 1))\n\n for frame_num, a in enumerate(np.linspace(0.0, 4 * np.pi, 100)):\n # Here were setting value for these two elements.\n progress_bar.progress(frame_num)\n frame_text.text(\"Frame %i/100\" % (frame_num + 1))\n\n # Performing some fractal wizardry.\n c = separation * np.exp(1j * a)\n Z = np.tile(x, (n, 1)) + 1j * np.tile(y, (1, m))\n C = np.full((n, m), c)\n M: Any = np.full((n, m), True, dtype=bool)\n N = np.zeros((n, m))\n\n for i in range(iterations):\n Z[M] = Z[M] * Z[M] + C[M]\n M[np.abs(Z) > 2] = False\n N[M] = i\n\n # Update the image placeholder by calling the image() function on it.\n image.image(1.0 - (N / N.max()), use_column_width=True)\n\n # We clear elements by calling empty on them.\n progress_bar.empty()\n frame_text.empty()\n\n # Streamlit widgets automatically run the script from top to bottom. Since\n # this button is not connected to any other logic, it just causes a plain\n # rerun.\n st.button(\"Re-run\")\n\n\n# fmt: on\n\n# Turn off black formatting for this function to present the user with more\n# compact code.\n# fmt: off\ndef plotting_demo():\n import streamlit as st\n import time\n import numpy as np\n\n progress_bar = st.sidebar.progress(0)\n status_text = st.sidebar.empty()\n last_rows = np.random.randn(1, 1)\n chart = st.line_chart(last_rows)\n\n for i in range(1, 101):\n new_rows = last_rows[-1, :] + np.random.randn(5, 1).cumsum(axis=0)\n status_text.text(\"%i%% Complete\" % i)\n chart.add_rows(new_rows)\n progress_bar.progress(i)\n last_rows = new_rows\n time.sleep(0.05)\n\n progress_bar.empty()\n\n # Streamlit widgets automatically run the script from top to bottom. Since\n # this button is not connected to any other logic, it just causes a plain\n # rerun.\n st.button(\"Re-run\")\n\n\n# fmt: on\n\n# Turn off black formatting for this function to present the user with more\n# compact code.\n# fmt: off\ndef data_frame_demo():\n import streamlit as st\n import pandas as pd\n import altair as alt\n\n from urllib.error import URLError\n\n @st.cache\n def get_UN_data():\n AWS_BUCKET_URL = \"http://streamlit-demo-data.s3-us-west-2.amazonaws.com\"\n df = pd.read_csv(AWS_BUCKET_URL + \"/agri.csv.gz\")\n return df.set_index(\"Region\")\n\n try:\n df = get_UN_data()\n countries = st.multiselect(\n \"Choose countries\", list(df.index), [\"China\", \"United States of America\"]\n )\n if not countries:\n st.error(\"Please select at least one country.\")\n else:\n data = df.loc[countries]\n data /= 1000000.0\n st.write(\"### Gross Agricultural Production ($B)\", data.sort_index())\n\n data = data.T.reset_index()\n data = pd.melt(data, id_vars=[\"index\"]).rename(\n columns={\"index\": \"year\", \"value\": \"Gross Agricultural Product ($B)\"}\n )\n chart = (\n alt.Chart(data)\n .mark_area(opacity=0.3)\n .encode(\n x=\"year:T\",\n y=alt.Y(\"Gross Agricultural Product ($B):Q\", stack=None),\n color=\"Region:N\",\n )\n )\n st.altair_chart(chart, use_container_width=True)\n except URLError as e:\n st.error(\n \"\"\"\n **This demo requires internet access.**\n\n Connection error: %s\n \"\"\"\n % e.reason\n )\n\n\n# fmt: on\n", "path": "lib/streamlit/hello/demos.py"}]} | 3,616 | 172 |
gh_patches_debug_9370 | rasdani/github-patches | git_diff | liqd__a4-product-360 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[f2f module] wording
Here are the wordings:
1
Edit face-to-face participation information
Informationen zur Vor-Ort-Beteiligung bearbeiten
2
Title
Titel
3
Highlighted Info
Hervorgehobene Information
3a (Hilfetext)
Highlight important information like the time or location of your face-to-face event
Zur Hervorhebung von wichtigen Informationen wie Ort oder Zeitraum der Vor-Ort-Beteiligung
4
Description
Beschreibung
5
Face-to-Face Information
Informationen Vor-Ort-Beteiligung
6
Face-to-Face Participation
Vor-Ort-Beteiligung
7
With this module you can provide information about events or phases for face-to-face participation. No online participation is possible in this module.
Mit diesem Modul können Informationen über Veranstaltungen und Phasen zur Vor-Ort-Beteiligung bereitgestellt werden. In diesem Modul ist keine Online-Beteiligung möglich.
8
Phase 1: Provide information about face-to-face participation events
Phase 1: Informationen zur Vor-Ort-Beteiligung bereitstellen


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liqd_product/apps/dashboard/blueprints.py`
Content:
```
1 from django.utils.translation import ugettext_lazy as _
2
3 from adhocracy4.dashboard.blueprints import ProjectBlueprint
4 from meinberlin.apps.activities import phases as activities_phases
5 from meinberlin.apps.budgeting import phases as budgeting_phases
6 from meinberlin.apps.documents import phases as documents_phases
7 from meinberlin.apps.ideas import phases as ideas_phases
8 from meinberlin.apps.mapideas import phases as mapideas_phases
9 from meinberlin.apps.polls import phases as poll_phases
10
11 blueprints = [
12 ('brainstorming',
13 ProjectBlueprint(
14 title=_('Brainstorming'),
15 description=_(
16 'Collect first ideas for a specific topic and comment on them.'
17 ),
18 content=[
19 ideas_phases.CollectPhase(),
20 ],
21 image='images/brainstorming.svg',
22 settings_model=None,
23 )),
24 ('map-brainstorming',
25 ProjectBlueprint(
26 title=_('Spatial Brainstorming'),
27 description=_(
28 'Collect location specific ideas for a topic and comment on them.'
29 ),
30 content=[
31 mapideas_phases.CollectPhase(),
32 ],
33 image='images/map-brainstorming.svg',
34 settings_model=('a4maps', 'AreaSettings'),
35 )),
36 ('map-idea-collection',
37 ProjectBlueprint(
38 title=_('Spatial Idea Collection'),
39 description=_(
40 'Collect location specific ideas that can be rated and commented.'
41 ),
42 content=[
43 mapideas_phases.CollectPhase(),
44 mapideas_phases.RatingPhase()
45 ],
46 image='images/map-idea-collection.svg',
47 settings_model=('a4maps', 'AreaSettings'),
48 )),
49 ('agenda-setting',
50 ProjectBlueprint(
51 title=_('Agenda Setting'),
52 description=_(
53 'With Agenda-Setting it’s possible to identify topics and to '
54 'define mission statements. Afterwards anyone can comment and '
55 'rate on different topics.'
56 ),
57 content=[
58 ideas_phases.CollectPhase(),
59 ideas_phases.RatingPhase(),
60 ],
61 image='images/agenda-setting.svg',
62 settings_model=None,
63 )),
64 ('text-review',
65 ProjectBlueprint(
66 title=_('Text Review'),
67 description=_(
68 'In the text-review it’s possible to structure draft texts '
69 'that can be commented.'
70 ),
71 content=[
72 documents_phases.CommentPhase(),
73 ],
74 image='images/text-review.svg',
75 settings_model=None,
76 )),
77 ('poll',
78 ProjectBlueprint(
79 title=_('Poll'),
80 description=_(
81 'Create a poll with multiple questions and possible answers. '
82 'Anyone can cast votes and comment on the poll.'
83 ),
84 content=[
85 poll_phases.VotingPhase(),
86 ],
87 image='images/poll.svg',
88 settings_model=None,
89 )),
90 ('participatory-budgeting',
91 ProjectBlueprint(
92 title=_('Participatory budgeting'),
93 description=_(
94 'With participatory-budgeting it’s possible to make proposals '
95 'with budget specifications and locate them. Anyone can comment '
96 'and rate on different proposals.'),
97 content=[
98 budgeting_phases.RequestPhase()],
99 image='images/participatory-budgeting.svg',
100 settings_model=('a4maps', 'AreaSettings'),
101 )),
102 ('facetoface',
103 ProjectBlueprint(
104 title=_('Face to Face Participation'),
105 description=_(
106 'Share info about a face to face participation event.'
107 ),
108 content=[
109 activities_phases.FaceToFacePhase(),
110 ],
111 image='images/facetoface.svg',
112 settings_model=None,
113 )),
114 ]
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/liqd_product/apps/dashboard/blueprints.py b/liqd_product/apps/dashboard/blueprints.py
--- a/liqd_product/apps/dashboard/blueprints.py
+++ b/liqd_product/apps/dashboard/blueprints.py
@@ -101,9 +101,11 @@
)),
('facetoface',
ProjectBlueprint(
- title=_('Face to Face Participation'),
+ title=_('Face-to-Face Participation'),
description=_(
- 'Share info about a face to face participation event.'
+ 'With this module you can provide information about events or '
+ 'phases for face-to-face participation. No online participation '
+ 'is possible in this module.'
),
content=[
activities_phases.FaceToFacePhase(),
| {"golden_diff": "diff --git a/liqd_product/apps/dashboard/blueprints.py b/liqd_product/apps/dashboard/blueprints.py\n--- a/liqd_product/apps/dashboard/blueprints.py\n+++ b/liqd_product/apps/dashboard/blueprints.py\n@@ -101,9 +101,11 @@\n )),\n ('facetoface',\n ProjectBlueprint(\n- title=_('Face to Face Participation'),\n+ title=_('Face-to-Face Participation'),\n description=_(\n- 'Share info about a face to face participation event.'\n+ 'With this module you can provide information about events or '\n+ 'phases for face-to-face participation. No online participation '\n+ 'is possible in this module.'\n ),\n content=[\n activities_phases.FaceToFacePhase(),\n", "issue": "[f2f module] wording\nHere are the wordings:\r\n\r\n1 \r\nEdit face-to-face participation information\r\nInformationen zur Vor-Ort-Beteiligung bearbeiten\r\n\r\n2\r\nTitle\r\nTitel\r\n\r\n3\r\nHighlighted Info\r\nHervorgehobene Information\r\n\r\n3a (Hilfetext)\r\nHighlight important information like the time or location of your face-to-face event\r\nZur Hervorhebung von wichtigen Informationen wie Ort oder Zeitraum der Vor-Ort-Beteiligung\r\n\r\n4\r\nDescription\r\nBeschreibung\r\n\r\n5\r\nFace-to-Face Information\r\nInformationen Vor-Ort-Beteiligung\r\n\r\n6\r\nFace-to-Face Participation\r\nVor-Ort-Beteiligung\r\n\r\n7\r\nWith this module you can provide information about events or phases for face-to-face participation. No online participation is possible in this module.\r\nMit diesem Modul k\u00f6nnen Informationen \u00fcber Veranstaltungen und Phasen zur Vor-Ort-Beteiligung bereitgestellt werden. In diesem Modul ist keine Online-Beteiligung m\u00f6glich.\r\n\r\n8\r\nPhase 1: Provide information about face-to-face participation events\r\nPhase 1: Informationen zur Vor-Ort-Beteiligung bereitstellen\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom meinberlin.apps.activities import phases as activities_phases\nfrom meinberlin.apps.budgeting import phases as budgeting_phases\nfrom meinberlin.apps.documents import phases as documents_phases\nfrom meinberlin.apps.ideas import phases as ideas_phases\nfrom meinberlin.apps.mapideas import phases as mapideas_phases\nfrom meinberlin.apps.polls import phases as poll_phases\n\nblueprints = [\n ('brainstorming',\n ProjectBlueprint(\n title=_('Brainstorming'),\n description=_(\n 'Collect first ideas for a specific topic and comment on them.'\n ),\n content=[\n ideas_phases.CollectPhase(),\n ],\n image='images/brainstorming.svg',\n settings_model=None,\n )),\n ('map-brainstorming',\n ProjectBlueprint(\n title=_('Spatial Brainstorming'),\n description=_(\n 'Collect location specific ideas for a topic and comment on them.'\n ),\n content=[\n mapideas_phases.CollectPhase(),\n ],\n image='images/map-brainstorming.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('map-idea-collection',\n ProjectBlueprint(\n title=_('Spatial Idea Collection'),\n description=_(\n 'Collect location specific ideas that can be rated and commented.'\n ),\n content=[\n mapideas_phases.CollectPhase(),\n mapideas_phases.RatingPhase()\n ],\n image='images/map-idea-collection.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('agenda-setting',\n ProjectBlueprint(\n title=_('Agenda Setting'),\n description=_(\n 'With Agenda-Setting it\u2019s possible to identify topics and to '\n 'define mission statements. Afterwards anyone can comment and '\n 'rate on different topics.'\n ),\n content=[\n ideas_phases.CollectPhase(),\n ideas_phases.RatingPhase(),\n ],\n image='images/agenda-setting.svg',\n settings_model=None,\n )),\n ('text-review',\n ProjectBlueprint(\n title=_('Text Review'),\n description=_(\n 'In the text-review it\u2019s possible to structure draft texts '\n 'that can be commented.'\n ),\n content=[\n documents_phases.CommentPhase(),\n ],\n image='images/text-review.svg',\n settings_model=None,\n )),\n ('poll',\n ProjectBlueprint(\n title=_('Poll'),\n description=_(\n 'Create a poll with multiple questions and possible answers. '\n 'Anyone can cast votes and comment on the poll.'\n ),\n content=[\n poll_phases.VotingPhase(),\n ],\n image='images/poll.svg',\n settings_model=None,\n )),\n ('participatory-budgeting',\n ProjectBlueprint(\n title=_('Participatory budgeting'),\n description=_(\n 'With participatory-budgeting it\u2019s possible to make proposals '\n 'with budget specifications and locate them. Anyone can comment '\n 'and rate on different proposals.'),\n content=[\n budgeting_phases.RequestPhase()],\n image='images/participatory-budgeting.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('facetoface',\n ProjectBlueprint(\n title=_('Face to Face Participation'),\n description=_(\n 'Share info about a face to face participation event.'\n ),\n content=[\n activities_phases.FaceToFacePhase(),\n ],\n image='images/facetoface.svg',\n settings_model=None,\n )),\n]\n", "path": "liqd_product/apps/dashboard/blueprints.py"}], "after_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom meinberlin.apps.activities import phases as activities_phases\nfrom meinberlin.apps.budgeting import phases as budgeting_phases\nfrom meinberlin.apps.documents import phases as documents_phases\nfrom meinberlin.apps.ideas import phases as ideas_phases\nfrom meinberlin.apps.mapideas import phases as mapideas_phases\nfrom meinberlin.apps.polls import phases as poll_phases\n\nblueprints = [\n ('brainstorming',\n ProjectBlueprint(\n title=_('Brainstorming'),\n description=_(\n 'Collect first ideas for a specific topic and comment on them.'\n ),\n content=[\n ideas_phases.CollectPhase(),\n ],\n image='images/brainstorming.svg',\n settings_model=None,\n )),\n ('map-brainstorming',\n ProjectBlueprint(\n title=_('Spatial Brainstorming'),\n description=_(\n 'Collect location specific ideas for a topic and comment on them.'\n ),\n content=[\n mapideas_phases.CollectPhase(),\n ],\n image='images/map-brainstorming.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('map-idea-collection',\n ProjectBlueprint(\n title=_('Spatial Idea Collection'),\n description=_(\n 'Collect location specific ideas that can be rated and commented.'\n ),\n content=[\n mapideas_phases.CollectPhase(),\n mapideas_phases.RatingPhase()\n ],\n image='images/map-idea-collection.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('agenda-setting',\n ProjectBlueprint(\n title=_('Agenda Setting'),\n description=_(\n 'With Agenda-Setting it\u2019s possible to identify topics and to '\n 'define mission statements. Afterwards anyone can comment and '\n 'rate on different topics.'\n ),\n content=[\n ideas_phases.CollectPhase(),\n ideas_phases.RatingPhase(),\n ],\n image='images/agenda-setting.svg',\n settings_model=None,\n )),\n ('text-review',\n ProjectBlueprint(\n title=_('Text Review'),\n description=_(\n 'In the text-review it\u2019s possible to structure draft texts '\n 'that can be commented.'\n ),\n content=[\n documents_phases.CommentPhase(),\n ],\n image='images/text-review.svg',\n settings_model=None,\n )),\n ('poll',\n ProjectBlueprint(\n title=_('Poll'),\n description=_(\n 'Create a poll with multiple questions and possible answers. '\n 'Anyone can cast votes and comment on the poll.'\n ),\n content=[\n poll_phases.VotingPhase(),\n ],\n image='images/poll.svg',\n settings_model=None,\n )),\n ('participatory-budgeting',\n ProjectBlueprint(\n title=_('Participatory budgeting'),\n description=_(\n 'With participatory-budgeting it\u2019s possible to make proposals '\n 'with budget specifications and locate them. Anyone can comment '\n 'and rate on different proposals.'),\n content=[\n budgeting_phases.RequestPhase()],\n image='images/participatory-budgeting.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('facetoface',\n ProjectBlueprint(\n title=_('Face-to-Face Participation'),\n description=_(\n 'With this module you can provide information about events or '\n 'phases for face-to-face participation. No online participation '\n 'is possible in this module.'\n ),\n content=[\n activities_phases.FaceToFacePhase(),\n ],\n image='images/facetoface.svg',\n settings_model=None,\n )),\n]\n", "path": "liqd_product/apps/dashboard/blueprints.py"}]} | 1,631 | 159 |
gh_patches_debug_41660 | rasdani/github-patches | git_diff | vaexio__vaex-1085 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG-REPORT] Vaex cannot open more than 816 parquet files on s3
**Description**
I try to open many parquet partitions. Vaex opens first 816, then fails with the following error:
> ERROR:MainThread:vaex:error opening 's3://my/path/part-00817-db9aee77-5b44-4571-857b-ea2879609450.c000.snappy.parquet'
> Traceback (most recent call last):
> File "/home/myuser/p/VS_Code_Projects/SkillExtractor/test_vaex_multiprocessing.py", line 32, in <module>
> df = vaex.concat([df, vaex.open(parquet_file)])
> File "/home/myuser/anaconda3/lib/python3.8/site-packages/vaex/__init__.py", line 211, in open
> ds = vaex.dataset.open(path, fs_options=fs_options, *args, **kwargs)
> File "/home/myuser/anaconda3/lib/python3.8/site-packages/vaex/dataset.py", line 42, in open
> return opener.open(path, *args, **kwargs)
> File "/home/myuser/anaconda3/lib/python3.8/site-packages/vaex/arrow/opener.py", line 29, in open
> return open_parquet(path, *args, **kwargs)
> File "/home/myuser/anaconda3/lib/python3.8/site-packages/vaex/arrow/dataset.py", line 175, in open_parquet
> arrow_ds = pyarrow.dataset.dataset(path, filesystem=file_system)
> File "/home/myuser/anaconda3/lib/python3.8/site-packages/pyarrow/dataset.py", line 671, in dataset
> return _filesystem_dataset(source, **kwargs)
> File "/home/myuser/anaconda3/lib/python3.8/site-packages/pyarrow/dataset.py", line 428, in _filesystem_dataset
> fs, paths_or_selector = _ensure_single_source(source, filesystem)
> File "/home/myuser/anaconda3/lib/python3.8/site-packages/pyarrow/dataset.py", line 395, in _ensure_single_source
> file_info = filesystem.get_file_info([path])[0]
> File "pyarrow/_fs.pyx", line 434, in pyarrow._fs.FileSystem.get_file_info
> File "pyarrow/error.pxi", line 122, in pyarrow.lib.pyarrow_internal_check_status
> File "pyarrow/error.pxi", line 99, in pyarrow.lib.check_status
> OSError: When getting information for key 'demand/positions-20211123/place_country_code=US/part-00817-db9aee77-5b44-4571-857b-ea2879609450.c000.snappy.parquet' in bucket 'tn-datafeed-dev': AWS Error [code 99]: curlCode: 6, Couldn't resolve host name
Tried to open in two different ways, in both cases the same error.
Way 1:
`df = vaex.open("s3://some/path/*.parquet")`
Way 2:
```
s3 = boto3.resource('s3')
my_bucket = s3.Bucket('my_bucket_name')
parquet_files = []
for my_bucket_object in my_bucket.objects.filter(Prefix='some/prefix'):
if my_bucket_object.key.endswith(".parquet"):
parquet_files.append("s3://" + my_bucket._name + "/" + my_bucket_object.key)
df = None
for parquet_file in parquet_files:
if not df:
df = vaex.open(parquet_file)
else:
try:
df = vaex.concat([df, vaex.open(parquet_file)])
except Exception as e:
print(f"WARNING: The file {parquet_file} could not be read!")
print(e)
```
Everything is fine until it reaches 816 files open. I tried to start with parquet # 817 and it works until another certain number of files is open.
**Software information**
- Vaex version (`import vaex; vaex.__version__)`: {'vaex-core': '4.0.0a5'}
- Vaex was installed via: pip
- OS: AWS Linux v2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/vaex-core/vaex/file/s3arrow.py`
Content:
```
1 import os
2 import io
3 from .s3 import patch_profile
4
5 import pyarrow as pa
6 import pyarrow.fs
7
8 from . import split_options, FileProxy, split_scheme
9 from .cache import FileSystemHandlerCached
10
11
12 def glob(path, fs_options={}):
13 from .s3 import glob
14 return glob(path, fs_options)
15
16
17 def parse(path, fs_options, for_arrow=False):
18 path, fs_options = split_options(path, fs_options)
19 path = path.replace('arrow+s3://', 's3://')
20 scheme, _ = split_scheme(path)
21 assert scheme == 's3'
22 # anon is for backwards compatibility
23 fs_options['anonymous'] = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])
24 fs_options = patch_profile(fs_options)
25 use_cache = fs_options.pop('cache', 'true') in [True, 'true', 'True', '1']
26 if 'region' not in fs_options:
27 # we use this to get the default region
28 file_system, path = pa.fs.FileSystem.from_uri(path)
29 # Remove this line for testing purposes to fake not having s3 support
30 # raise pyarrow.lib.ArrowNotImplementedError('FOR TESTING')
31 fs_options['region'] = file_system.region
32 fs = pa.fs.S3FileSystem(**fs_options)
33 if use_cache:
34 fs = FileSystemHandlerCached(fs, scheme='s3', for_arrow=for_arrow)
35 if for_arrow:
36 fs = pyarrow.fs.PyFileSystem(fs)
37 return fs, path
38
```
Path: `packages/vaex-core/vaex/file/s3fs.py`
Content:
```
1 import vaex.utils
2 import pyarrow.fs
3 s3fs = vaex.utils.optional_import('s3fs')
4
5 import vaex.file.cache
6 from . import split_options, split_scheme
7 from .cache import FileSystemHandlerCached
8 from .s3 import patch_profile
9
10
11 def translate_options(fs_options):
12 # translate options of arrow to s3fs
13 fs_options = fs_options.copy()
14 not_supported = {
15 'role_arn', 'session_name', 'external_id', 'load_frequency', 'scheme', 'background_writes', 'profile', 'profile_name'
16 }
17 for key in not_supported:
18 if key in fs_options:
19 warnings.warn(f'The option {key} is not supported using s3fs instead of arrow, so it will be ignored')
20 fs_options.pop(key)
21
22 # top level
23 mapping = {
24 'anonymous': 'anon',
25 }
26 for key in list(fs_options):
27 if key in mapping:
28 fs_options[mapping[key]] = fs_options.pop(key)
29
30 # client kwargs
31 mapping = {
32 'access_key': 'aws_access_key_id',
33 'secret_key': 'aws_secret_access_key',
34 'session_token': 'aws_session_token',
35 'region': 'region_name',
36 'endpoint_override': 'endpoint_url',
37 }
38 fs_options['client_kwargs'] = fs_options.get('client_kwargs', {})
39 for key in list(fs_options):
40 if key in mapping:
41 fs_options['client_kwargs'][mapping[key]] = fs_options.pop(key)
42 return fs_options
43
44
45 def glob(path, fs_options={}):
46 if '?' in path:
47 __, query = path[:path.index('?')], path[path.index('?'):]
48 else:
49 query = ''
50 scheme, _ = split_scheme(path)
51 path = path.replace('s3fs://', 's3://')
52 path, fs_options = split_options(path, fs_options)
53 # anon is for backwards compatibility
54 if 'cache' in fs_options:
55 del fs_options['cache']
56 # standardize value, and make bool
57 anon = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])
58 fs_options = patch_profile(fs_options)
59 fs_options = translate_options(fs_options)
60 s3 = s3fs.S3FileSystem(anon=anon, **fs_options)
61 return [f'{scheme}://' + k + query for k in s3.glob(path)]
62
63
64 def parse(path, fs_options, for_arrow=False):
65 path = path.replace('fsspec+s3://', 's3://')
66 path, fs_options = split_options(path, fs_options)
67 scheme, path = split_scheme(path)
68 use_cache = fs_options.pop('cache', 'true') in [True, 'true', 'True', '1']
69 # standardize value, and make bool
70 anon = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])
71 fs_options = patch_profile(fs_options)
72 fs_options = translate_options(fs_options)
73 s3 = s3fs.S3FileSystem(anon=anon, default_fill_cache=False, **fs_options)
74 fs = pyarrow.fs.FSSpecHandler(s3)
75 if use_cache:
76 fs = FileSystemHandlerCached(fs, scheme='s3', for_arrow=for_arrow)
77 if for_arrow:
78 fs = pyarrow.fs.PyFileSystem(fs)
79 return fs, path
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/vaex-core/vaex/file/s3arrow.py b/packages/vaex-core/vaex/file/s3arrow.py
--- a/packages/vaex-core/vaex/file/s3arrow.py
+++ b/packages/vaex-core/vaex/file/s3arrow.py
@@ -7,6 +7,11 @@
from . import split_options, FileProxy, split_scheme
from .cache import FileSystemHandlerCached
+from ..cache import fingerprint
+
+
+region_cache = {}
+fs_arrow_cache = {}
def glob(path, fs_options={}):
@@ -15,21 +20,36 @@
def parse(path, fs_options, for_arrow=False):
+ # Remove this line for testing purposes to fake not having s3 support
+ # raise pyarrow.lib.ArrowNotImplementedError('FOR TESTING')
path, fs_options = split_options(path, fs_options)
path = path.replace('arrow+s3://', 's3://')
- scheme, _ = split_scheme(path)
+ fullpath = path
+ scheme, path = split_scheme(path)
assert scheme == 's3'
+
# anon is for backwards compatibility
fs_options['anonymous'] = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])
fs_options = patch_profile(fs_options)
use_cache = fs_options.pop('cache', 'true') in [True, 'true', 'True', '1']
+ bucket = path.split('/')[0]
if 'region' not in fs_options:
- # we use this to get the default region
- file_system, path = pa.fs.FileSystem.from_uri(path)
- # Remove this line for testing purposes to fake not having s3 support
- # raise pyarrow.lib.ArrowNotImplementedError('FOR TESTING')
- fs_options['region'] = file_system.region
- fs = pa.fs.S3FileSystem(**fs_options)
+ # cache region
+ if bucket not in region_cache:
+ # we use this to get the default region
+ file_system, _ = pa.fs.FileSystem.from_uri(fullpath)
+ region = file_system.region
+ region_cache[bucket] = region
+ else:
+ region = region_cache[bucket]
+ fs_options['region'] = region
+ # bucket and options make up a unique key
+ key = fingerprint(bucket, fs_options)
+ if key not in fs_arrow_cache:
+ fs = pa.fs.S3FileSystem(**fs_options)
+ fs_arrow_cache[key] = fs
+ else:
+ fs = fs_arrow_cache[key]
if use_cache:
fs = FileSystemHandlerCached(fs, scheme='s3', for_arrow=for_arrow)
if for_arrow:
diff --git a/packages/vaex-core/vaex/file/s3fs.py b/packages/vaex-core/vaex/file/s3fs.py
--- a/packages/vaex-core/vaex/file/s3fs.py
+++ b/packages/vaex-core/vaex/file/s3fs.py
@@ -6,6 +6,10 @@
from . import split_options, split_scheme
from .cache import FileSystemHandlerCached
from .s3 import patch_profile
+from ..cache import fingerprint
+
+
+fs_cache = {}
def translate_options(fs_options):
@@ -70,7 +74,14 @@
anon = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])
fs_options = patch_profile(fs_options)
fs_options = translate_options(fs_options)
- s3 = s3fs.S3FileSystem(anon=anon, default_fill_cache=False, **fs_options)
+
+ bucket = path.split('/')[0]
+ key = fingerprint(bucket, fs_options)
+ if key not in fs_cache:
+ s3 = s3fs.S3FileSystem(anon=anon, default_fill_cache=False, **fs_options)
+ fs_cache[key] = s3
+ else:
+ s3 = fs_cache[key]
fs = pyarrow.fs.FSSpecHandler(s3)
if use_cache:
fs = FileSystemHandlerCached(fs, scheme='s3', for_arrow=for_arrow)
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/file/s3arrow.py b/packages/vaex-core/vaex/file/s3arrow.py\n--- a/packages/vaex-core/vaex/file/s3arrow.py\n+++ b/packages/vaex-core/vaex/file/s3arrow.py\n@@ -7,6 +7,11 @@\n \n from . import split_options, FileProxy, split_scheme\n from .cache import FileSystemHandlerCached\n+from ..cache import fingerprint\n+\n+\n+region_cache = {}\n+fs_arrow_cache = {}\n \n \n def glob(path, fs_options={}):\n@@ -15,21 +20,36 @@\n \n \n def parse(path, fs_options, for_arrow=False):\n+ # Remove this line for testing purposes to fake not having s3 support\n+ # raise pyarrow.lib.ArrowNotImplementedError('FOR TESTING')\n path, fs_options = split_options(path, fs_options)\n path = path.replace('arrow+s3://', 's3://')\n- scheme, _ = split_scheme(path)\n+ fullpath = path\n+ scheme, path = split_scheme(path)\n assert scheme == 's3'\n+\n # anon is for backwards compatibility\n fs_options['anonymous'] = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])\n fs_options = patch_profile(fs_options)\n use_cache = fs_options.pop('cache', 'true') in [True, 'true', 'True', '1']\n+ bucket = path.split('/')[0]\n if 'region' not in fs_options:\n- # we use this to get the default region\n- file_system, path = pa.fs.FileSystem.from_uri(path)\n- # Remove this line for testing purposes to fake not having s3 support\n- # raise pyarrow.lib.ArrowNotImplementedError('FOR TESTING')\n- fs_options['region'] = file_system.region\n- fs = pa.fs.S3FileSystem(**fs_options)\n+ # cache region\n+ if bucket not in region_cache:\n+ # we use this to get the default region\n+ file_system, _ = pa.fs.FileSystem.from_uri(fullpath)\n+ region = file_system.region\n+ region_cache[bucket] = region\n+ else:\n+ region = region_cache[bucket]\n+ fs_options['region'] = region\n+ # bucket and options make up a unique key\n+ key = fingerprint(bucket, fs_options)\n+ if key not in fs_arrow_cache:\n+ fs = pa.fs.S3FileSystem(**fs_options)\n+ fs_arrow_cache[key] = fs\n+ else:\n+ fs = fs_arrow_cache[key]\n if use_cache:\n fs = FileSystemHandlerCached(fs, scheme='s3', for_arrow=for_arrow)\n if for_arrow:\ndiff --git a/packages/vaex-core/vaex/file/s3fs.py b/packages/vaex-core/vaex/file/s3fs.py\n--- a/packages/vaex-core/vaex/file/s3fs.py\n+++ b/packages/vaex-core/vaex/file/s3fs.py\n@@ -6,6 +6,10 @@\n from . import split_options, split_scheme\n from .cache import FileSystemHandlerCached\n from .s3 import patch_profile\n+from ..cache import fingerprint\n+\n+\n+fs_cache = {}\n \n \n def translate_options(fs_options):\n@@ -70,7 +74,14 @@\n anon = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])\n fs_options = patch_profile(fs_options)\n fs_options = translate_options(fs_options)\n- s3 = s3fs.S3FileSystem(anon=anon, default_fill_cache=False, **fs_options)\n+\n+ bucket = path.split('/')[0]\n+ key = fingerprint(bucket, fs_options)\n+ if key not in fs_cache:\n+ s3 = s3fs.S3FileSystem(anon=anon, default_fill_cache=False, **fs_options)\n+ fs_cache[key] = s3\n+ else:\n+ s3 = fs_cache[key]\n fs = pyarrow.fs.FSSpecHandler(s3)\n if use_cache:\n fs = FileSystemHandlerCached(fs, scheme='s3', for_arrow=for_arrow)\n", "issue": "[BUG-REPORT] Vaex cannot open more than 816 parquet files on s3\n**Description**\r\n\r\nI try to open many parquet partitions. Vaex opens first 816, then fails with the following error:\r\n\r\n> ERROR:MainThread:vaex:error opening 's3://my/path/part-00817-db9aee77-5b44-4571-857b-ea2879609450.c000.snappy.parquet'\r\n> Traceback (most recent call last):\r\n> File \"/home/myuser/p/VS_Code_Projects/SkillExtractor/test_vaex_multiprocessing.py\", line 32, in <module>\r\n> df = vaex.concat([df, vaex.open(parquet_file)])\r\n> File \"/home/myuser/anaconda3/lib/python3.8/site-packages/vaex/__init__.py\", line 211, in open\r\n> ds = vaex.dataset.open(path, fs_options=fs_options, *args, **kwargs)\r\n> File \"/home/myuser/anaconda3/lib/python3.8/site-packages/vaex/dataset.py\", line 42, in open\r\n> return opener.open(path, *args, **kwargs)\r\n> File \"/home/myuser/anaconda3/lib/python3.8/site-packages/vaex/arrow/opener.py\", line 29, in open\r\n> return open_parquet(path, *args, **kwargs)\r\n> File \"/home/myuser/anaconda3/lib/python3.8/site-packages/vaex/arrow/dataset.py\", line 175, in open_parquet\r\n> arrow_ds = pyarrow.dataset.dataset(path, filesystem=file_system)\r\n> File \"/home/myuser/anaconda3/lib/python3.8/site-packages/pyarrow/dataset.py\", line 671, in dataset\r\n> return _filesystem_dataset(source, **kwargs)\r\n> File \"/home/myuser/anaconda3/lib/python3.8/site-packages/pyarrow/dataset.py\", line 428, in _filesystem_dataset\r\n> fs, paths_or_selector = _ensure_single_source(source, filesystem)\r\n> File \"/home/myuser/anaconda3/lib/python3.8/site-packages/pyarrow/dataset.py\", line 395, in _ensure_single_source\r\n> file_info = filesystem.get_file_info([path])[0]\r\n> File \"pyarrow/_fs.pyx\", line 434, in pyarrow._fs.FileSystem.get_file_info\r\n> File \"pyarrow/error.pxi\", line 122, in pyarrow.lib.pyarrow_internal_check_status\r\n> File \"pyarrow/error.pxi\", line 99, in pyarrow.lib.check_status\r\n> OSError: When getting information for key 'demand/positions-20211123/place_country_code=US/part-00817-db9aee77-5b44-4571-857b-ea2879609450.c000.snappy.parquet' in bucket 'tn-datafeed-dev': AWS Error [code 99]: curlCode: 6, Couldn't resolve host name\r\n\r\nTried to open in two different ways, in both cases the same error.\r\n\r\nWay 1:\r\n\r\n`df = vaex.open(\"s3://some/path/*.parquet\")`\r\n\r\nWay 2:\r\n\r\n```\r\ns3 = boto3.resource('s3')\r\nmy_bucket = s3.Bucket('my_bucket_name')\r\n\r\nparquet_files = []\r\nfor my_bucket_object in my_bucket.objects.filter(Prefix='some/prefix'):\r\n if my_bucket_object.key.endswith(\".parquet\"):\r\n parquet_files.append(\"s3://\" + my_bucket._name + \"/\" + my_bucket_object.key)\r\n\r\ndf = None\r\nfor parquet_file in parquet_files:\r\n if not df:\r\n df = vaex.open(parquet_file)\r\n else:\r\n try:\r\n df = vaex.concat([df, vaex.open(parquet_file)])\r\n except Exception as e:\r\n print(f\"WARNING: The file {parquet_file} could not be read!\")\r\n print(e)\r\n```\r\n\r\nEverything is fine until it reaches 816 files open. I tried to start with parquet # 817 and it works until another certain number of files is open.\r\n\r\n**Software information**\r\n - Vaex version (`import vaex; vaex.__version__)`: {'vaex-core': '4.0.0a5'}\r\n - Vaex was installed via: pip\r\n - OS: AWS Linux v2\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport io\nfrom .s3 import patch_profile\n\nimport pyarrow as pa\nimport pyarrow.fs\n\nfrom . import split_options, FileProxy, split_scheme\nfrom .cache import FileSystemHandlerCached\n\n\ndef glob(path, fs_options={}):\n from .s3 import glob\n return glob(path, fs_options)\n\n\ndef parse(path, fs_options, for_arrow=False):\n path, fs_options = split_options(path, fs_options)\n path = path.replace('arrow+s3://', 's3://')\n scheme, _ = split_scheme(path)\n assert scheme == 's3'\n # anon is for backwards compatibility\n fs_options['anonymous'] = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])\n fs_options = patch_profile(fs_options)\n use_cache = fs_options.pop('cache', 'true') in [True, 'true', 'True', '1']\n if 'region' not in fs_options:\n # we use this to get the default region\n file_system, path = pa.fs.FileSystem.from_uri(path)\n # Remove this line for testing purposes to fake not having s3 support\n # raise pyarrow.lib.ArrowNotImplementedError('FOR TESTING')\n fs_options['region'] = file_system.region\n fs = pa.fs.S3FileSystem(**fs_options)\n if use_cache:\n fs = FileSystemHandlerCached(fs, scheme='s3', for_arrow=for_arrow)\n if for_arrow:\n fs = pyarrow.fs.PyFileSystem(fs)\n return fs, path\n", "path": "packages/vaex-core/vaex/file/s3arrow.py"}, {"content": "import vaex.utils\nimport pyarrow.fs\ns3fs = vaex.utils.optional_import('s3fs')\n\nimport vaex.file.cache\nfrom . import split_options, split_scheme\nfrom .cache import FileSystemHandlerCached\nfrom .s3 import patch_profile\n\n\ndef translate_options(fs_options):\n # translate options of arrow to s3fs\n fs_options = fs_options.copy()\n not_supported = {\n 'role_arn', 'session_name', 'external_id', 'load_frequency', 'scheme', 'background_writes', 'profile', 'profile_name'\n }\n for key in not_supported:\n if key in fs_options:\n warnings.warn(f'The option {key} is not supported using s3fs instead of arrow, so it will be ignored')\n fs_options.pop(key)\n\n # top level\n mapping = {\n 'anonymous': 'anon',\n }\n for key in list(fs_options):\n if key in mapping:\n fs_options[mapping[key]] = fs_options.pop(key)\n\n # client kwargs\n mapping = {\n 'access_key': 'aws_access_key_id',\n 'secret_key': 'aws_secret_access_key',\n 'session_token': 'aws_session_token',\n 'region': 'region_name',\n 'endpoint_override': 'endpoint_url',\n }\n fs_options['client_kwargs'] = fs_options.get('client_kwargs', {})\n for key in list(fs_options):\n if key in mapping:\n fs_options['client_kwargs'][mapping[key]] = fs_options.pop(key)\n return fs_options\n\n\ndef glob(path, fs_options={}):\n if '?' in path:\n __, query = path[:path.index('?')], path[path.index('?'):]\n else:\n query = ''\n scheme, _ = split_scheme(path)\n path = path.replace('s3fs://', 's3://')\n path, fs_options = split_options(path, fs_options)\n # anon is for backwards compatibility\n if 'cache' in fs_options:\n del fs_options['cache']\n # standardize value, and make bool\n anon = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])\n fs_options = patch_profile(fs_options)\n fs_options = translate_options(fs_options)\n s3 = s3fs.S3FileSystem(anon=anon, **fs_options)\n return [f'{scheme}://' + k + query for k in s3.glob(path)]\n\n\ndef parse(path, fs_options, for_arrow=False):\n path = path.replace('fsspec+s3://', 's3://')\n path, fs_options = split_options(path, fs_options)\n scheme, path = split_scheme(path)\n use_cache = fs_options.pop('cache', 'true') in [True, 'true', 'True', '1']\n # standardize value, and make bool\n anon = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])\n fs_options = patch_profile(fs_options)\n fs_options = translate_options(fs_options)\n s3 = s3fs.S3FileSystem(anon=anon, default_fill_cache=False, **fs_options)\n fs = pyarrow.fs.FSSpecHandler(s3)\n if use_cache:\n fs = FileSystemHandlerCached(fs, scheme='s3', for_arrow=for_arrow)\n if for_arrow:\n fs = pyarrow.fs.PyFileSystem(fs)\n return fs, path\n", "path": "packages/vaex-core/vaex/file/s3fs.py"}], "after_files": [{"content": "import os\nimport io\nfrom .s3 import patch_profile\n\nimport pyarrow as pa\nimport pyarrow.fs\n\nfrom . import split_options, FileProxy, split_scheme\nfrom .cache import FileSystemHandlerCached\nfrom ..cache import fingerprint\n\n\nregion_cache = {}\nfs_arrow_cache = {}\n\n\ndef glob(path, fs_options={}):\n from .s3 import glob\n return glob(path, fs_options)\n\n\ndef parse(path, fs_options, for_arrow=False):\n # Remove this line for testing purposes to fake not having s3 support\n # raise pyarrow.lib.ArrowNotImplementedError('FOR TESTING')\n path, fs_options = split_options(path, fs_options)\n path = path.replace('arrow+s3://', 's3://')\n fullpath = path\n scheme, path = split_scheme(path)\n assert scheme == 's3'\n\n # anon is for backwards compatibility\n fs_options['anonymous'] = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])\n fs_options = patch_profile(fs_options)\n use_cache = fs_options.pop('cache', 'true') in [True, 'true', 'True', '1']\n bucket = path.split('/')[0]\n if 'region' not in fs_options:\n # cache region\n if bucket not in region_cache:\n # we use this to get the default region\n file_system, _ = pa.fs.FileSystem.from_uri(fullpath)\n region = file_system.region\n region_cache[bucket] = region\n else:\n region = region_cache[bucket]\n fs_options['region'] = region\n # bucket and options make up a unique key\n key = fingerprint(bucket, fs_options)\n if key not in fs_arrow_cache:\n fs = pa.fs.S3FileSystem(**fs_options)\n fs_arrow_cache[key] = fs\n else:\n fs = fs_arrow_cache[key]\n if use_cache:\n fs = FileSystemHandlerCached(fs, scheme='s3', for_arrow=for_arrow)\n if for_arrow:\n fs = pyarrow.fs.PyFileSystem(fs)\n return fs, path\n", "path": "packages/vaex-core/vaex/file/s3arrow.py"}, {"content": "import vaex.utils\nimport pyarrow.fs\ns3fs = vaex.utils.optional_import('s3fs')\n\nimport vaex.file.cache\nfrom . import split_options, split_scheme\nfrom .cache import FileSystemHandlerCached\nfrom .s3 import patch_profile\nfrom ..cache import fingerprint\n\n\nfs_cache = {}\n\n\ndef translate_options(fs_options):\n # translate options of arrow to s3fs\n fs_options = fs_options.copy()\n not_supported = {\n 'role_arn', 'session_name', 'external_id', 'load_frequency', 'scheme', 'background_writes', 'profile', 'profile_name'\n }\n for key in not_supported:\n if key in fs_options:\n warnings.warn(f'The option {key} is not supported using s3fs instead of arrow, so it will be ignored')\n fs_options.pop(key)\n\n # top level\n mapping = {\n 'anonymous': 'anon',\n }\n for key in list(fs_options):\n if key in mapping:\n fs_options[mapping[key]] = fs_options.pop(key)\n\n # client kwargs\n mapping = {\n 'access_key': 'aws_access_key_id',\n 'secret_key': 'aws_secret_access_key',\n 'session_token': 'aws_session_token',\n 'region': 'region_name',\n 'endpoint_override': 'endpoint_url',\n }\n fs_options['client_kwargs'] = fs_options.get('client_kwargs', {})\n for key in list(fs_options):\n if key in mapping:\n fs_options['client_kwargs'][mapping[key]] = fs_options.pop(key)\n return fs_options\n\n\ndef glob(path, fs_options={}):\n if '?' in path:\n __, query = path[:path.index('?')], path[path.index('?'):]\n else:\n query = ''\n scheme, _ = split_scheme(path)\n path = path.replace('s3fs://', 's3://')\n path, fs_options = split_options(path, fs_options)\n # anon is for backwards compatibility\n if 'cache' in fs_options:\n del fs_options['cache']\n # standardize value, and make bool\n anon = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])\n fs_options = patch_profile(fs_options)\n fs_options = translate_options(fs_options)\n s3 = s3fs.S3FileSystem(anon=anon, **fs_options)\n return [f'{scheme}://' + k + query for k in s3.glob(path)]\n\n\ndef parse(path, fs_options, for_arrow=False):\n path = path.replace('fsspec+s3://', 's3://')\n path, fs_options = split_options(path, fs_options)\n scheme, path = split_scheme(path)\n use_cache = fs_options.pop('cache', 'true') in [True, 'true', 'True', '1']\n # standardize value, and make bool\n anon = (fs_options.pop('anon', None) in [True, 'true', 'True', '1']) or (fs_options.pop('anonymous', None) in [True, 'true', 'True', '1'])\n fs_options = patch_profile(fs_options)\n fs_options = translate_options(fs_options)\n\n bucket = path.split('/')[0]\n key = fingerprint(bucket, fs_options)\n if key not in fs_cache:\n s3 = s3fs.S3FileSystem(anon=anon, default_fill_cache=False, **fs_options)\n fs_cache[key] = s3\n else:\n s3 = fs_cache[key]\n fs = pyarrow.fs.FSSpecHandler(s3)\n if use_cache:\n fs = FileSystemHandlerCached(fs, scheme='s3', for_arrow=for_arrow)\n if for_arrow:\n fs = pyarrow.fs.PyFileSystem(fs)\n return fs, path\n", "path": "packages/vaex-core/vaex/file/s3fs.py"}]} | 2,647 | 966 |
gh_patches_debug_4726 | rasdani/github-patches | git_diff | Mailu__Mailu-2437 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
admin container on master branch fails with mysql
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
## Before you open your issue
- [*] Check if no issue or pull-request for this already exists.
- [*] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [*] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [*] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [*] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
```
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}admin:${MAILU_VERSION:-master}
```
## Description
The last 2-3 admin builds of the master branch fail on mysql with this error:
ModuleNotFoundError: No module named 'MySQLdb'
This is not a production environment, just wanted to report.
My mysql server is as follows, but with the admin:1.9 image the setup works properly, so it isn't a mysql issue.
mysql Ver 15.1 Distrib 10.8.4-MariaDB, for debian-linux-gnu (x86_64) using EditLine wrapper
## Replication Steps
Steps for replicating your issue
## Expected behaviour
## Logs
````markdown
```
admin_1 | Traceback (most recent call last):
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/util/_collections.py", line 1008, in __call__
admin_1 | return self.registry[key]
admin_1 | KeyError: <greenlet.greenlet object at 0x7f231b79da90 (otid=0x7f231b623340) current active started main>
admin_1 | During handling of the above exception, another exception occurred:
admin_1 | Traceback (most recent call last):
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 2073, in wsgi_app
admin_1 | response = self.full_dispatch_request()
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 1518, in full_dispatch_request
admin_1 | rv = self.handle_user_exception(e)
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 1516, in full_dispatch_request
admin_1 | rv = self.dispatch_request()
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 1502, in dispatch_request
admin_1 | return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)
admin_1 | File "/app/mailu/sso/views/base.py", line 49, in login
admin_1 | return flask.render_template('login.html', form=form, fields=fields)
admin_1 | File "/usr/lib/python3.9/site-packages/flask/templating.py", line 146, in render_template
admin_1 | ctx.app.update_template_context(context)
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 756, in update_template_context
admin_1 | context.update(func())
admin_1 | File "/app/mailu/__init__.py", line 54, in inject_defaults
admin_1 | signup_domains = models.Domain.query.filter_by(signup_enabled=True).all()
admin_1 | File "/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py", line 552, in __get__
admin_1 | return type.query_class(mapper, session=self.sa.session())
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/orm/scoping.py", line 47, in __call__
admin_1 | sess = self.registry()
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/util/_collections.py", line 1010, in __call__
admin_1 | return self.registry.setdefault(key, self.createfunc())
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/orm/session.py", line 4172, in __call__
admin_1 | return self.class_(**local_kw)
admin_1 | File "/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py", line 176, in __init__
admin_1 | bind = options.pop('bind', None) or db.engine
admin_1 | File "/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py", line 998, in engine
admin_1 | return self.get_engine()
admin_1 | File "/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py", line 1017, in get_engine
admin_1 | return connector.get_engine()
admin_1 | File "/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py", line 594, in get_engine
admin_1 | self._engine = rv = self._sa.create_engine(sa_url, options)
admin_1 | File "/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py", line 1027, in create_engine
admin_1 | return sqlalchemy.create_engine(sa_url, **engine_opts)
admin_1 | File "<string>", line 2, in create_engine
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/util/deprecations.py", line 298, in warned
admin_1 | return fn(*args, **kwargs)
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/engine/create.py", line 560, in create_engine
admin_1 | dbapi = dialect_cls.dbapi(**dbapi_args)
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/mysqldb.py", line 167, in dbapi
admin_1 | return __import__("MySQLdb")
admin_1 | ModuleNotFoundError: No module named 'MySQLdb'
```
````
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/configuration.py`
Content:
```
1 import os
2
3 from datetime import timedelta
4 from socrate import system
5 import ipaddress
6
7 DEFAULT_CONFIG = {
8 # Specific to the admin UI
9 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',
10 'BABEL_DEFAULT_LOCALE': 'en',
11 'BABEL_DEFAULT_TIMEZONE': 'UTC',
12 'BOOTSTRAP_SERVE_LOCAL': True,
13 'RATELIMIT_STORAGE_URL': '',
14 'QUOTA_STORAGE_URL': '',
15 'DEBUG': False,
16 'DOMAIN_REGISTRATION': False,
17 'TEMPLATES_AUTO_RELOAD': True,
18 'MEMORY_SESSIONS': False,
19 # Database settings
20 'DB_FLAVOR': None,
21 'DB_USER': 'mailu',
22 'DB_PW': None,
23 'DB_HOST': 'database',
24 'DB_NAME': 'mailu',
25 'SQLITE_DATABASE_FILE':'data/main.db',
26 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
27 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
28 # Statistics management
29 'INSTANCE_ID_PATH': '/data/instance',
30 'STATS_ENDPOINT': '19.{}.stats.mailu.io',
31 # Common configuration variables
32 'SECRET_KEY': 'changeMe',
33 'DOMAIN': 'mailu.io',
34 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',
35 'POSTMASTER': 'postmaster',
36 'WILDCARD_SENDERS': '',
37 'TLS_FLAVOR': 'cert',
38 'INBOUND_TLS_ENFORCE': False,
39 'DEFER_ON_TLS_ERROR': True,
40 'AUTH_RATELIMIT_IP': '60/hour',
41 'AUTH_RATELIMIT_IP_V4_MASK': 24,
42 'AUTH_RATELIMIT_IP_V6_MASK': 56,
43 'AUTH_RATELIMIT_USER': '100/day',
44 'AUTH_RATELIMIT_EXEMPTION': '',
45 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,
46 'DISABLE_STATISTICS': False,
47 # Mail settings
48 'DMARC_RUA': None,
49 'DMARC_RUF': None,
50 'WELCOME': False,
51 'WELCOME_SUBJECT': 'Dummy welcome topic',
52 'WELCOME_BODY': 'Dummy welcome body',
53 'DKIM_SELECTOR': 'dkim',
54 'DKIM_PATH': '/dkim/{domain}.{selector}.key',
55 'DEFAULT_QUOTA': 1000000000,
56 'MESSAGE_RATELIMIT': '200/day',
57 'MESSAGE_RATELIMIT_EXEMPTION': '',
58 'RECIPIENT_DELIMITER': '',
59 # Web settings
60 'SITENAME': 'Mailu',
61 'WEBSITE': 'https://mailu.io',
62 'ADMIN' : 'none',
63 'WEB_ADMIN': '/admin',
64 'WEB_WEBMAIL': '/webmail',
65 'WEBMAIL': 'none',
66 'RECAPTCHA_PUBLIC_KEY': '',
67 'RECAPTCHA_PRIVATE_KEY': '',
68 'LOGO_URL': None,
69 'LOGO_BACKGROUND': None,
70 # Advanced settings
71 'LOG_LEVEL': 'WARNING',
72 'SESSION_KEY_BITS': 128,
73 'SESSION_TIMEOUT': 3600,
74 'PERMANENT_SESSION_LIFETIME': 30*24*3600,
75 'SESSION_COOKIE_SECURE': True,
76 'CREDENTIAL_ROUNDS': 12,
77 'TZ': 'Etc/UTC',
78 # Host settings
79 'HOST_IMAP': 'imap',
80 'HOST_LMTP': 'imap:2525',
81 'HOST_POP3': 'imap',
82 'HOST_SMTP': 'smtp',
83 'HOST_AUTHSMTP': 'smtp',
84 'HOST_ADMIN': 'admin',
85 'HOST_WEBMAIL': 'webmail',
86 'HOST_WEBDAV': 'webdav:5232',
87 'HOST_REDIS': 'redis',
88 'HOST_FRONT': 'front',
89 'SUBNET': '192.168.203.0/24',
90 'SUBNET6': None,
91 'POD_ADDRESS_RANGE': None
92 }
93
94 class ConfigManager:
95 """ Naive configuration manager that uses environment only
96 """
97
98 DB_TEMPLATES = {
99 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
100 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
101 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'
102 }
103
104 def __init__(self):
105 self.config = dict()
106
107 def get_host_address(self, name):
108 # if MYSERVICE_ADDRESS is defined, use this
109 if f'{name}_ADDRESS' in os.environ:
110 return os.environ.get(f'{name}_ADDRESS')
111 # otherwise use the host name and resolve it
112 return system.resolve_address(self.config[f'HOST_{name}'])
113
114 def resolve_hosts(self):
115 for key in ['IMAP', 'POP3', 'AUTHSMTP', 'SMTP', 'REDIS']:
116 self.config[f'{key}_ADDRESS'] = self.get_host_address(key)
117 if self.config['WEBMAIL'] != 'none':
118 self.config['WEBMAIL_ADDRESS'] = self.get_host_address('WEBMAIL')
119
120 def __get_env(self, key, value):
121 key_file = key + "_FILE"
122 if key_file in os.environ:
123 with open(os.environ.get(key_file)) as file:
124 value_from_file = file.read()
125 return value_from_file.strip()
126 else:
127 return os.environ.get(key, value)
128
129 def __coerce_value(self, value):
130 if isinstance(value, str) and value.lower() in ('true','yes'):
131 return True
132 elif isinstance(value, str) and value.lower() in ('false', 'no'):
133 return False
134 return value
135
136 def init_app(self, app):
137 # get current app config
138 self.config.update(app.config)
139 # get environment variables
140 self.config.update({
141 key: self.__coerce_value(self.__get_env(key, value))
142 for key, value in DEFAULT_CONFIG.items()
143 })
144 self.resolve_hosts()
145
146 # automatically set the sqlalchemy string
147 if self.config['DB_FLAVOR']:
148 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]
149 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)
150
151 self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/2'
152 self.config['QUOTA_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/1'
153 self.config['SESSION_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/3'
154 self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'
155 self.config['SESSION_COOKIE_HTTPONLY'] = True
156 self.config['SESSION_PERMANENT'] = True
157 self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])
158 self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])
159 self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])
160 self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])
161 hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]
162 self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)
163 self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])
164 self.config['HOSTNAMES'] = ','.join(hostnames)
165 self.config['HOSTNAME'] = hostnames[0]
166
167 # update the app config
168 app.config.update(self.config)
169
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -98,7 +98,7 @@
DB_TEMPLATES = {
'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
- 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'
+ 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'
}
def __init__(self):
| {"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -98,7 +98,7 @@\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n- 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n+ 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n \n def __init__(self):\n", "issue": "admin container on master branch fails with mysql\nThank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.\r\nFor **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).\r\n\r\nTo be able to help you best, we need some more information.\r\n\r\n## Before you open your issue\r\n- [*] Check if no issue or pull-request for this already exists.\r\n- [*] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [*] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [*] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n### Environment\r\n - [*] docker-compose\r\n - [ ] kubernetes\r\n - [ ] docker swarm\r\n\r\n### Versions\r\nTo find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).\r\n```\r\nimage: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}admin:${MAILU_VERSION:-master}\r\n```\r\n\r\n## Description\r\nThe last 2-3 admin builds of the master branch fail on mysql with this error:\r\nModuleNotFoundError: No module named 'MySQLdb'\r\nThis is not a production environment, just wanted to report.\r\nMy mysql server is as follows, but with the admin:1.9 image the setup works properly, so it isn't a mysql issue.\r\nmysql Ver 15.1 Distrib 10.8.4-MariaDB, for debian-linux-gnu (x86_64) using EditLine wrapper\r\n\r\n\r\n## Replication Steps\r\nSteps for replicating your issue\r\n\r\n## Expected behaviour\r\n\r\n## Logs\r\n\r\n````markdown\r\n```\r\nadmin_1 | Traceback (most recent call last):\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/sqlalchemy/util/_collections.py\", line 1008, in __call__\r\nadmin_1 | return self.registry[key]\r\nadmin_1 | KeyError: <greenlet.greenlet object at 0x7f231b79da90 (otid=0x7f231b623340) current active started main>\r\nadmin_1 | During handling of the above exception, another exception occurred:\r\nadmin_1 | Traceback (most recent call last):\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/flask/app.py\", line 2073, in wsgi_app\r\nadmin_1 | response = self.full_dispatch_request()\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/flask/app.py\", line 1518, in full_dispatch_request\r\nadmin_1 | rv = self.handle_user_exception(e)\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/flask/app.py\", line 1516, in full_dispatch_request\r\nadmin_1 | rv = self.dispatch_request()\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/flask/app.py\", line 1502, in dispatch_request\r\nadmin_1 | return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\r\nadmin_1 | File \"/app/mailu/sso/views/base.py\", line 49, in login\r\nadmin_1 | return flask.render_template('login.html', form=form, fields=fields)\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/flask/templating.py\", line 146, in render_template\r\nadmin_1 | ctx.app.update_template_context(context)\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/flask/app.py\", line 756, in update_template_context\r\nadmin_1 | context.update(func())\r\nadmin_1 | File \"/app/mailu/__init__.py\", line 54, in inject_defaults\r\nadmin_1 | signup_domains = models.Domain.query.filter_by(signup_enabled=True).all()\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py\", line 552, in __get__\r\nadmin_1 | return type.query_class(mapper, session=self.sa.session())\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/sqlalchemy/orm/scoping.py\", line 47, in __call__\r\nadmin_1 | sess = self.registry()\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/sqlalchemy/util/_collections.py\", line 1010, in __call__\r\nadmin_1 | return self.registry.setdefault(key, self.createfunc())\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/sqlalchemy/orm/session.py\", line 4172, in __call__\r\nadmin_1 | return self.class_(**local_kw)\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py\", line 176, in __init__\r\nadmin_1 | bind = options.pop('bind', None) or db.engine\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py\", line 998, in engine\r\nadmin_1 | return self.get_engine()\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py\", line 1017, in get_engine\r\nadmin_1 | return connector.get_engine()\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py\", line 594, in get_engine\r\nadmin_1 | self._engine = rv = self._sa.create_engine(sa_url, options)\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py\", line 1027, in create_engine\r\nadmin_1 | return sqlalchemy.create_engine(sa_url, **engine_opts)\r\nadmin_1 | File \"<string>\", line 2, in create_engine\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/sqlalchemy/util/deprecations.py\", line 298, in warned\r\nadmin_1 | return fn(*args, **kwargs)\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/sqlalchemy/engine/create.py\", line 560, in create_engine\r\nadmin_1 | dbapi = dialect_cls.dbapi(**dbapi_args)\r\nadmin_1 | File \"/usr/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/mysqldb.py\", line 167, in dbapi\r\nadmin_1 | return __import__(\"MySQLdb\")\r\nadmin_1 | ModuleNotFoundError: No module named 'MySQLdb'\r\n\r\n```\r\n````\r\n\n", "before_files": [{"content": "import os\n\nfrom datetime import timedelta\nfrom socrate import system\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'QUOTA_STORAGE_URL': '',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '19.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '60/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 56,\n 'AUTH_RATELIMIT_USER': '100/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN' : 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': True,\n 'CREDENTIAL_ROUNDS': 12,\n 'TZ': 'Etc/UTC',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_AUTHSMTP': 'smtp',\n 'HOST_ADMIN': 'admin',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_WEBDAV': 'webdav:5232',\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def get_host_address(self, name):\n # if MYSERVICE_ADDRESS is defined, use this\n if f'{name}_ADDRESS' in os.environ:\n return os.environ.get(f'{name}_ADDRESS')\n # otherwise use the host name and resolve it\n return system.resolve_address(self.config[f'HOST_{name}'])\n\n def resolve_hosts(self):\n for key in ['IMAP', 'POP3', 'AUTHSMTP', 'SMTP', 'REDIS']:\n self.config[f'{key}_ADDRESS'] = self.get_host_address(key)\n if self.config['WEBMAIL'] != 'none':\n self.config['WEBMAIL_ADDRESS'] = self.get_host_address('WEBMAIL')\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_hosts()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n self.config['QUOTA_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/1'\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n\n # update the app config\n app.config.update(self.config)\n\n", "path": "core/admin/mailu/configuration.py"}], "after_files": [{"content": "import os\n\nfrom datetime import timedelta\nfrom socrate import system\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'QUOTA_STORAGE_URL': '',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '19.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '60/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 56,\n 'AUTH_RATELIMIT_USER': '100/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN' : 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': True,\n 'CREDENTIAL_ROUNDS': 12,\n 'TZ': 'Etc/UTC',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_AUTHSMTP': 'smtp',\n 'HOST_ADMIN': 'admin',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_WEBDAV': 'webdav:5232',\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def get_host_address(self, name):\n # if MYSERVICE_ADDRESS is defined, use this\n if f'{name}_ADDRESS' in os.environ:\n return os.environ.get(f'{name}_ADDRESS')\n # otherwise use the host name and resolve it\n return system.resolve_address(self.config[f'HOST_{name}'])\n\n def resolve_hosts(self):\n for key in ['IMAP', 'POP3', 'AUTHSMTP', 'SMTP', 'REDIS']:\n self.config[f'{key}_ADDRESS'] = self.get_host_address(key)\n if self.config['WEBMAIL'] != 'none':\n self.config['WEBMAIL_ADDRESS'] = self.get_host_address('WEBMAIL')\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_hosts()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n self.config['QUOTA_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/1'\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n\n # update the app config\n app.config.update(self.config)\n\n", "path": "core/admin/mailu/configuration.py"}]} | 3,954 | 144 |
gh_patches_debug_28372 | rasdani/github-patches | git_diff | pypa__setuptools-4283 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deduplicate testing dependencies by referencing `[testing-integration]`
<!-- First time contributors: Take a moment to review https://setuptools.pypa.io/en/latest/development/developer-guide.html! -->
<!-- Remove sections if not applicable -->
## Summary of changes
This was my original idea, but in https://github.com/pypa/setuptools/pull/4257#discussion_r1514754970, @abravalheri mentioned that:
> [...] I think we just remove the `testing-integration` and use `testing` everywhere...
>
> Although it might add a bit of overhead for the integration tests, it will simplify and streamline the setup... This way we reduce the divergence with `skeleton`. The additional overhead in the integration tests should not be too much, and the integration tests just run before the releases anyway.
But I'm still opening this PR as an option / for comparison.
Accepting this closes #4282
### Pull Request Checklist
- [x] Changes have tests (these are test changes)
- [x] News fragment added in [`newsfragments/`]. (no user facing changes)
_(See [documentation][PR docs] for details)_
[`newsfragments/`]: https://github.com/pypa/setuptools/tree/master/newsfragments
[PR docs]:
https://setuptools.pypa.io/en/latest/development/developer-guide.html#making-a-pull-request
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conftest.py`
Content:
```
1 import sys
2
3 import pytest
4
5
6 pytest_plugins = 'setuptools.tests.fixtures'
7
8
9 def pytest_addoption(parser):
10 parser.addoption(
11 "--package_name",
12 action="append",
13 default=[],
14 help="list of package_name to pass to test functions",
15 )
16 parser.addoption(
17 "--integration",
18 action="store_true",
19 default=False,
20 help="run integration tests (only)",
21 )
22
23
24 def pytest_configure(config):
25 config.addinivalue_line("markers", "integration: integration tests")
26 config.addinivalue_line("markers", "uses_network: tests may try to download files")
27
28
29 collect_ignore = [
30 'tests/manual_test.py',
31 'setuptools/tests/mod_with_constant.py',
32 'setuptools/_distutils',
33 '_distutils_hack',
34 'setuptools/extern',
35 'pkg_resources/extern',
36 'pkg_resources/tests/data',
37 'setuptools/_vendor',
38 'pkg_resources/_vendor',
39 'setuptools/config/_validate_pyproject',
40 'setuptools/modified.py',
41 ]
42
43
44 if sys.version_info < (3, 9) or sys.platform == 'cygwin':
45 collect_ignore.append('tools/finalize.py')
46
47
48 @pytest.fixture(autouse=True)
49 def _skip_integration(request):
50 running_integration_tests = request.config.getoption("--integration")
51 is_integration_test = request.node.get_closest_marker("integration")
52 if running_integration_tests and not is_integration_test:
53 pytest.skip("running integration tests only")
54 if not running_integration_tests and is_integration_test:
55 pytest.skip("skipping integration tests")
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conftest.py b/conftest.py
--- a/conftest.py
+++ b/conftest.py
@@ -24,6 +24,7 @@
def pytest_configure(config):
config.addinivalue_line("markers", "integration: integration tests")
config.addinivalue_line("markers", "uses_network: tests may try to download files")
+ _IntegrationTestSpeedups.disable_plugins_already_run(config)
collect_ignore = [
@@ -47,9 +48,25 @@
@pytest.fixture(autouse=True)
def _skip_integration(request):
- running_integration_tests = request.config.getoption("--integration")
- is_integration_test = request.node.get_closest_marker("integration")
- if running_integration_tests and not is_integration_test:
- pytest.skip("running integration tests only")
- if not running_integration_tests and is_integration_test:
- pytest.skip("skipping integration tests")
+ _IntegrationTestSpeedups.conditional_skip(request)
+
+
+class _IntegrationTestSpeedups:
+ """Speed-up integration tests by only running what does not run in other tests."""
+
+ RUNS_ON_NORMAL_TESTS = ("checkdocks", "cov", "mypy", "perf", "ruff")
+
+ @classmethod
+ def disable_plugins_already_run(cls, config):
+ if config.getoption("--integration"):
+ for plugin in cls.RUNS_ON_NORMAL_TESTS: # no need to run again
+ config.pluginmanager.set_blocked(plugin)
+
+ @staticmethod
+ def conditional_skip(request):
+ running_integration_tests = request.config.getoption("--integration")
+ is_integration_test = request.node.get_closest_marker("integration")
+ if running_integration_tests and not is_integration_test:
+ pytest.skip("running integration tests only")
+ if not running_integration_tests and is_integration_test:
+ pytest.skip("skipping integration tests")
| {"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -24,6 +24,7 @@\n def pytest_configure(config):\n config.addinivalue_line(\"markers\", \"integration: integration tests\")\n config.addinivalue_line(\"markers\", \"uses_network: tests may try to download files\")\n+ _IntegrationTestSpeedups.disable_plugins_already_run(config)\n \n \n collect_ignore = [\n@@ -47,9 +48,25 @@\n \n @pytest.fixture(autouse=True)\n def _skip_integration(request):\n- running_integration_tests = request.config.getoption(\"--integration\")\n- is_integration_test = request.node.get_closest_marker(\"integration\")\n- if running_integration_tests and not is_integration_test:\n- pytest.skip(\"running integration tests only\")\n- if not running_integration_tests and is_integration_test:\n- pytest.skip(\"skipping integration tests\")\n+ _IntegrationTestSpeedups.conditional_skip(request)\n+\n+\n+class _IntegrationTestSpeedups:\n+ \"\"\"Speed-up integration tests by only running what does not run in other tests.\"\"\"\n+\n+ RUNS_ON_NORMAL_TESTS = (\"checkdocks\", \"cov\", \"mypy\", \"perf\", \"ruff\")\n+\n+ @classmethod\n+ def disable_plugins_already_run(cls, config):\n+ if config.getoption(\"--integration\"):\n+ for plugin in cls.RUNS_ON_NORMAL_TESTS: # no need to run again\n+ config.pluginmanager.set_blocked(plugin)\n+\n+ @staticmethod\n+ def conditional_skip(request):\n+ running_integration_tests = request.config.getoption(\"--integration\")\n+ is_integration_test = request.node.get_closest_marker(\"integration\")\n+ if running_integration_tests and not is_integration_test:\n+ pytest.skip(\"running integration tests only\")\n+ if not running_integration_tests and is_integration_test:\n+ pytest.skip(\"skipping integration tests\")\n", "issue": "Deduplicate testing dependencies by referencing `[testing-integration]`\n<!-- First time contributors: Take a moment to review https://setuptools.pypa.io/en/latest/development/developer-guide.html! -->\r\n<!-- Remove sections if not applicable -->\r\n\r\n## Summary of changes\r\n\r\nThis was my original idea, but in https://github.com/pypa/setuptools/pull/4257#discussion_r1514754970, @abravalheri mentioned that:\r\n> [...] I think we just remove the `testing-integration` and use `testing` everywhere...\r\n> \r\n> Although it might add a bit of overhead for the integration tests, it will simplify and streamline the setup... This way we reduce the divergence with `skeleton`. The additional overhead in the integration tests should not be too much, and the integration tests just run before the releases anyway.\r\n\r\nBut I'm still opening this PR as an option / for comparison.\r\n\r\nAccepting this closes #4282\r\n\r\n### Pull Request Checklist\r\n- [x] Changes have tests (these are test changes)\r\n- [x] News fragment added in [`newsfragments/`]. (no user facing changes)\r\n _(See [documentation][PR docs] for details)_\r\n\r\n\r\n[`newsfragments/`]: https://github.com/pypa/setuptools/tree/master/newsfragments\r\n[PR docs]:\r\nhttps://setuptools.pypa.io/en/latest/development/developer-guide.html#making-a-pull-request\r\n\n", "before_files": [{"content": "import sys\n\nimport pytest\n\n\npytest_plugins = 'setuptools.tests.fixtures'\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--package_name\",\n action=\"append\",\n default=[],\n help=\"list of package_name to pass to test functions\",\n )\n parser.addoption(\n \"--integration\",\n action=\"store_true\",\n default=False,\n help=\"run integration tests (only)\",\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\", \"integration: integration tests\")\n config.addinivalue_line(\"markers\", \"uses_network: tests may try to download files\")\n\n\ncollect_ignore = [\n 'tests/manual_test.py',\n 'setuptools/tests/mod_with_constant.py',\n 'setuptools/_distutils',\n '_distutils_hack',\n 'setuptools/extern',\n 'pkg_resources/extern',\n 'pkg_resources/tests/data',\n 'setuptools/_vendor',\n 'pkg_resources/_vendor',\n 'setuptools/config/_validate_pyproject',\n 'setuptools/modified.py',\n]\n\n\nif sys.version_info < (3, 9) or sys.platform == 'cygwin':\n collect_ignore.append('tools/finalize.py')\n\n\[email protected](autouse=True)\ndef _skip_integration(request):\n running_integration_tests = request.config.getoption(\"--integration\")\n is_integration_test = request.node.get_closest_marker(\"integration\")\n if running_integration_tests and not is_integration_test:\n pytest.skip(\"running integration tests only\")\n if not running_integration_tests and is_integration_test:\n pytest.skip(\"skipping integration tests\")\n", "path": "conftest.py"}], "after_files": [{"content": "import sys\n\nimport pytest\n\n\npytest_plugins = 'setuptools.tests.fixtures'\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--package_name\",\n action=\"append\",\n default=[],\n help=\"list of package_name to pass to test functions\",\n )\n parser.addoption(\n \"--integration\",\n action=\"store_true\",\n default=False,\n help=\"run integration tests (only)\",\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\", \"integration: integration tests\")\n config.addinivalue_line(\"markers\", \"uses_network: tests may try to download files\")\n _IntegrationTestSpeedups.disable_plugins_already_run(config)\n\n\ncollect_ignore = [\n 'tests/manual_test.py',\n 'setuptools/tests/mod_with_constant.py',\n 'setuptools/_distutils',\n '_distutils_hack',\n 'setuptools/extern',\n 'pkg_resources/extern',\n 'pkg_resources/tests/data',\n 'setuptools/_vendor',\n 'pkg_resources/_vendor',\n 'setuptools/config/_validate_pyproject',\n 'setuptools/modified.py',\n]\n\n\nif sys.version_info < (3, 9) or sys.platform == 'cygwin':\n collect_ignore.append('tools/finalize.py')\n\n\[email protected](autouse=True)\ndef _skip_integration(request):\n _IntegrationTestSpeedups.conditional_skip(request)\n\n\nclass _IntegrationTestSpeedups:\n \"\"\"Speed-up integration tests by only running what does not run in other tests.\"\"\"\n\n RUNS_ON_NORMAL_TESTS = (\"checkdocks\", \"cov\", \"mypy\", \"perf\", \"ruff\")\n\n @classmethod\n def disable_plugins_already_run(cls, config):\n if config.getoption(\"--integration\"):\n for plugin in cls.RUNS_ON_NORMAL_TESTS: # no need to run again\n config.pluginmanager.set_blocked(plugin)\n\n @staticmethod\n def conditional_skip(request):\n running_integration_tests = request.config.getoption(\"--integration\")\n is_integration_test = request.node.get_closest_marker(\"integration\")\n if running_integration_tests and not is_integration_test:\n pytest.skip(\"running integration tests only\")\n if not running_integration_tests and is_integration_test:\n pytest.skip(\"skipping integration tests\")\n", "path": "conftest.py"}]} | 1,007 | 408 |
gh_patches_debug_24747 | rasdani/github-patches | git_diff | common-workflow-language__cwltool-755 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
singularity image pull triggered for every step when using beegfs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cwltool/singularity.py`
Content:
```
1 """Support for executing Docker containers using Singularity."""
2 from __future__ import absolute_import
3 import logging
4 import os
5 import re
6 import shutil
7 import sys
8 from io import open # pylint: disable=redefined-builtin
9 from typing import (Dict, List, Text, Optional, MutableMapping)
10 from .errors import WorkflowException
11 from .job import ContainerCommandLineJob
12 from .pathmapper import PathMapper, ensure_writable
13 from .process import (UnsupportedRequirement)
14 from .utils import docker_windows_path_adjust
15 from schema_salad.sourceline import SourceLine
16 if os.name == 'posix' and sys.version_info[0] < 3:
17 from subprocess32 import (check_call, check_output, # pylint: disable=import-error
18 CalledProcessError, DEVNULL, PIPE, Popen,
19 TimeoutExpired)
20 elif os.name == 'posix':
21 from subprocess import (check_call, check_output, # type: ignore
22 CalledProcessError, DEVNULL, PIPE, Popen,
23 TimeoutExpired)
24 else: # we're not on Unix, so none of this matters
25 pass
26
27 _logger = logging.getLogger("cwltool")
28 _USERNS = None
29
30 def _singularity_supports_userns(): # type: ()->bool
31 global _USERNS # pylint: disable=global-statement
32 if _USERNS is None:
33 try:
34 result = Popen(
35 [u"singularity", u"exec", u"--userns", u"/etc", u"true"],
36 stderr=PIPE, stdout=DEVNULL,
37 universal_newlines=True).communicate(timeout=60)[1]
38 _USERNS = "No valid /bin/sh" in result
39 except TimeoutExpired:
40 _USERNS = False
41 return _USERNS
42
43 def _normalizeImageId(string): # type: (Text)->Text
44 candidate = re.sub(pattern=r'([a-z]*://)', repl=r'', string=string)
45 return re.sub(pattern=r'[:/]', repl=r'-', string=candidate) + ".img"
46
47
48 class SingularityCommandLineJob(ContainerCommandLineJob):
49
50 @staticmethod
51 def get_image(dockerRequirement, # type: Dict[Text, Text]
52 pull_image, # type: bool
53 force_pull=False # type: bool
54 ):
55 # type: (...) -> bool
56 """
57 Acquire the software container image in the specified dockerRequirement
58 using Singularity and returns the success as a bool. Updates the
59 provided dockerRequirement with the specific dockerImageId to the full
60 path of the local image, if found. Likewise the
61 dockerRequirement['dockerPull'] is updated to a docker:// URI if needed.
62 """
63 found = False
64
65 candidates = []
66
67 if "dockerImageId" not in dockerRequirement and "dockerPull" in dockerRequirement:
68 match = re.search(pattern=r'([a-z]*://)', string=dockerRequirement["dockerPull"])
69 candidate = _normalizeImageId(dockerRequirement['dockerPull'])
70 candidates.append(candidate)
71 dockerRequirement['dockerImageId'] = candidate
72 if not match:
73 dockerRequirement["dockerPull"] = "docker://" + dockerRequirement["dockerPull"]
74 elif "dockerImageId" in dockerRequirement:
75 candidates.append(dockerRequirement['dockerImageId'])
76 candidates.append(_normalizeImageId(dockerRequirement['dockerImageId']))
77
78 # check if Singularity image is available in $SINGULARITY_CACHEDIR
79 for target in ("SINGULARITY_CACHEDIR", "SINGULARITY_PULLFOLDER",
80 os.getcwd()):
81 if target in os.environ:
82 for candidate in candidates:
83 path = os.path.join(os.environ[target], candidate)
84 if os.path.isfile(path):
85 _logger.info("Using local copy of Singularity image "
86 "found in {}".format(target))
87 dockerRequirement["dockerImageId"] = path
88 found = True
89
90 if (force_pull or not found) and pull_image:
91 cmd = [] # type: List[Text]
92 if "dockerPull" in dockerRequirement:
93 cmd = ["singularity", "pull", "--force", "--name",
94 str(dockerRequirement["dockerImageId"]),
95 str(dockerRequirement["dockerPull"])]
96 _logger.info(Text(cmd))
97 check_call(cmd, stdout=sys.stderr)
98 found = True
99 elif "dockerFile" in dockerRequirement:
100 raise WorkflowException(SourceLine(
101 dockerRequirement, 'dockerFile').makeError(
102 "dockerFile is not currently supported when using the "
103 "Singularity runtime for Docker containers."))
104 elif "dockerLoad" in dockerRequirement:
105 raise WorkflowException(SourceLine(
106 dockerRequirement, 'dockerLoad').makeError(
107 "dockerLoad is not currently supported when using the "
108 "Singularity runtime for Docker containers."))
109 elif "dockerImport" in dockerRequirement:
110 raise WorkflowException(SourceLine(
111 dockerRequirement, 'dockerImport').makeError(
112 "dockerImport is not currently supported when using the "
113 "Singularity runtime for Docker containers."))
114
115 return found
116
117 def get_from_requirements(self,
118 r, # type: Optional[Dict[Text, Text]]
119 req, # type: bool
120 pull_image, # type: bool
121 force_pull=False, # type: bool
122 tmp_outdir_prefix=None # type: Text
123 ): # type: (...) -> Text
124 """
125 Returns the filename of the Singularity image (e.g.
126 hello-world-latest.img).
127 """
128
129 if r:
130 errmsg = None
131 try:
132 check_output(["singularity", "--version"])
133 except CalledProcessError as err:
134 errmsg = "Cannot execute 'singularity --version' {}".format(err)
135 except OSError as err:
136 errmsg = "'singularity' executable not found: {}".format(err)
137
138 if errmsg:
139 if req:
140 raise WorkflowException(errmsg)
141 else:
142 return None
143
144 if self.get_image(r, pull_image, force_pull):
145 return os.path.abspath(r["dockerImageId"])
146 else:
147 if req:
148 raise WorkflowException(u"Container image {} not "
149 "found".format(r["dockerImageId"]))
150
151 return None
152
153 def add_volumes(self, pathmapper, runtime, stage_output):
154 # type: (PathMapper, List[Text], bool) -> None
155
156 host_outdir = self.outdir
157 container_outdir = self.builder.outdir
158 for _, vol in pathmapper.items():
159 if not vol.staged:
160 continue
161 if stage_output and not vol.target.startswith(container_outdir):
162 containertgt = container_outdir + vol.target[len(host_outdir):]
163 else:
164 containertgt = vol.target
165 if vol.target.startswith(container_outdir + "/"):
166 host_outdir_tgt = os.path.join(
167 host_outdir, vol.target[len(container_outdir) + 1:])
168 else:
169 host_outdir_tgt = None
170 if vol.type in ("File", "Directory"):
171 if not vol.resolved.startswith("_:"):
172 runtime.append(u"--bind")
173 runtime.append("{}:{}:ro".format(
174 docker_windows_path_adjust(vol.resolved),
175 docker_windows_path_adjust(containertgt)))
176 elif vol.type == "WritableFile":
177 if self.inplace_update:
178 runtime.append(u"--bind")
179 runtime.append(u"{}:{}:rw".format(
180 docker_windows_path_adjust(vol.resolved),
181 docker_windows_path_adjust(containertgt)))
182 else:
183 shutil.copy(vol.resolved, host_outdir_tgt)
184 ensure_writable(host_outdir_tgt)
185 elif vol.type == "WritableDirectory":
186 if vol.resolved.startswith("_:"):
187 os.makedirs(host_outdir_tgt, 0o0755)
188 else:
189 if self.inplace_update:
190 runtime.append(u"--bind")
191 runtime.append(u"{}:{}:rw".format(
192 docker_windows_path_adjust(vol.resolved),
193 docker_windows_path_adjust(containertgt)))
194 else:
195 shutil.copytree(vol.resolved, vol.target)
196 elif vol.type == "CreateFile":
197 createtmp = os.path.join(host_outdir, os.path.basename(vol.target))
198 with open(createtmp, "wb") as tmp:
199 tmp.write(vol.resolved.encode("utf-8"))
200 runtime.append(u"--bind")
201 runtime.append(u"{}:{}:ro".format(
202 docker_windows_path_adjust(createtmp),
203 docker_windows_path_adjust(vol.target)))
204
205 def create_runtime(self,
206 env, # type: MutableMapping[Text, Text]
207 rm_container=True, # type: bool
208 record_container_id=False, # type: bool
209 cidfile_dir="", # type: Text
210 cidfile_prefix="", # type: Text
211 **kwargs
212 ):
213 # type: (...) -> List
214 """ Returns the Singularity runtime list of commands and options."""
215
216 runtime = [u"singularity", u"--quiet", u"exec", u"--contain", u"--pid",
217 u"--ipc"]
218 if _singularity_supports_userns():
219 runtime.append(u"--userns")
220 runtime.append(u"--bind")
221 runtime.append(u"{}:{}:rw".format(
222 docker_windows_path_adjust(os.path.realpath(self.outdir)),
223 self.builder.outdir))
224 runtime.append(u"--bind")
225 runtime.append(u"{}:{}:rw".format(
226 docker_windows_path_adjust(os.path.realpath(self.tmpdir)), "/tmp"))
227
228 self.add_volumes(self.pathmapper, runtime, stage_output=False)
229 if self.generatemapper:
230 self.add_volumes(self.generatemapper, runtime, stage_output=True)
231
232 runtime.append(u"--pwd")
233 runtime.append("%s" % (docker_windows_path_adjust(self.builder.outdir)))
234
235 if kwargs.get("custom_net", None) is not None:
236 raise UnsupportedRequirement(
237 "Singularity implementation does not support custom networking")
238 elif kwargs.get("disable_net", None):
239 runtime.append(u"--net")
240
241 env["SINGULARITYENV_TMPDIR"] = "/tmp"
242 env["SINGULARITYENV_HOME"] = self.builder.outdir
243
244 for name, value in self.environment.items():
245 env["SINGULARITYENV_{}".format(name)] = value
246 return runtime
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cwltool/singularity.py b/cwltool/singularity.py
--- a/cwltool/singularity.py
+++ b/cwltool/singularity.py
@@ -76,16 +76,18 @@
candidates.append(_normalizeImageId(dockerRequirement['dockerImageId']))
# check if Singularity image is available in $SINGULARITY_CACHEDIR
- for target in ("SINGULARITY_CACHEDIR", "SINGULARITY_PULLFOLDER",
- os.getcwd()):
- if target in os.environ:
- for candidate in candidates:
- path = os.path.join(os.environ[target], candidate)
- if os.path.isfile(path):
- _logger.info("Using local copy of Singularity image "
- "found in {}".format(target))
- dockerRequirement["dockerImageId"] = path
- found = True
+ targets = [os.getcwd()]
+ for env in ("SINGULARITY_CACHEDIR", "SINGULARITY_PULLFOLDER"):
+ if env in os.environ:
+ targets.append(os.environ[env])
+ for target in targets:
+ for candidate in candidates:
+ path = os.path.join(target, candidate)
+ if os.path.isfile(path):
+ _logger.info("Using local copy of Singularity image "
+ "found in {}".format(target))
+ dockerRequirement["dockerImageId"] = path
+ found = True
if (force_pull or not found) and pull_image:
cmd = [] # type: List[Text]
| {"golden_diff": "diff --git a/cwltool/singularity.py b/cwltool/singularity.py\n--- a/cwltool/singularity.py\n+++ b/cwltool/singularity.py\n@@ -76,16 +76,18 @@\n candidates.append(_normalizeImageId(dockerRequirement['dockerImageId']))\n \n # check if Singularity image is available in $SINGULARITY_CACHEDIR\n- for target in (\"SINGULARITY_CACHEDIR\", \"SINGULARITY_PULLFOLDER\",\n- os.getcwd()):\n- if target in os.environ:\n- for candidate in candidates:\n- path = os.path.join(os.environ[target], candidate)\n- if os.path.isfile(path):\n- _logger.info(\"Using local copy of Singularity image \"\n- \"found in {}\".format(target))\n- dockerRequirement[\"dockerImageId\"] = path\n- found = True\n+ targets = [os.getcwd()]\n+ for env in (\"SINGULARITY_CACHEDIR\", \"SINGULARITY_PULLFOLDER\"):\n+ if env in os.environ:\n+ targets.append(os.environ[env])\n+ for target in targets:\n+ for candidate in candidates:\n+ path = os.path.join(target, candidate)\n+ if os.path.isfile(path):\n+ _logger.info(\"Using local copy of Singularity image \"\n+ \"found in {}\".format(target))\n+ dockerRequirement[\"dockerImageId\"] = path\n+ found = True\n \n if (force_pull or not found) and pull_image:\n cmd = [] # type: List[Text]\n", "issue": "singularity image pull triggered for every step when using beegfs\n\n", "before_files": [{"content": "\"\"\"Support for executing Docker containers using Singularity.\"\"\"\nfrom __future__ import absolute_import\nimport logging\nimport os\nimport re\nimport shutil\nimport sys\nfrom io import open # pylint: disable=redefined-builtin\nfrom typing import (Dict, List, Text, Optional, MutableMapping)\nfrom .errors import WorkflowException\nfrom .job import ContainerCommandLineJob\nfrom .pathmapper import PathMapper, ensure_writable\nfrom .process import (UnsupportedRequirement)\nfrom .utils import docker_windows_path_adjust\nfrom schema_salad.sourceline import SourceLine\nif os.name == 'posix' and sys.version_info[0] < 3:\n from subprocess32 import (check_call, check_output, # pylint: disable=import-error\n CalledProcessError, DEVNULL, PIPE, Popen,\n TimeoutExpired)\nelif os.name == 'posix':\n from subprocess import (check_call, check_output, # type: ignore\n CalledProcessError, DEVNULL, PIPE, Popen,\n TimeoutExpired)\nelse: # we're not on Unix, so none of this matters\n pass\n\n_logger = logging.getLogger(\"cwltool\")\n_USERNS = None\n\ndef _singularity_supports_userns(): # type: ()->bool\n global _USERNS # pylint: disable=global-statement\n if _USERNS is None:\n try:\n result = Popen(\n [u\"singularity\", u\"exec\", u\"--userns\", u\"/etc\", u\"true\"],\n stderr=PIPE, stdout=DEVNULL,\n universal_newlines=True).communicate(timeout=60)[1]\n _USERNS = \"No valid /bin/sh\" in result\n except TimeoutExpired:\n _USERNS = False\n return _USERNS\n\ndef _normalizeImageId(string): # type: (Text)->Text\n candidate = re.sub(pattern=r'([a-z]*://)', repl=r'', string=string)\n return re.sub(pattern=r'[:/]', repl=r'-', string=candidate) + \".img\"\n\n\nclass SingularityCommandLineJob(ContainerCommandLineJob):\n\n @staticmethod\n def get_image(dockerRequirement, # type: Dict[Text, Text]\n pull_image, # type: bool\n force_pull=False # type: bool\n ):\n # type: (...) -> bool\n \"\"\"\n Acquire the software container image in the specified dockerRequirement\n using Singularity and returns the success as a bool. Updates the\n provided dockerRequirement with the specific dockerImageId to the full\n path of the local image, if found. Likewise the\n dockerRequirement['dockerPull'] is updated to a docker:// URI if needed.\n \"\"\"\n found = False\n\n candidates = []\n\n if \"dockerImageId\" not in dockerRequirement and \"dockerPull\" in dockerRequirement:\n match = re.search(pattern=r'([a-z]*://)', string=dockerRequirement[\"dockerPull\"])\n candidate = _normalizeImageId(dockerRequirement['dockerPull'])\n candidates.append(candidate)\n dockerRequirement['dockerImageId'] = candidate\n if not match:\n dockerRequirement[\"dockerPull\"] = \"docker://\" + dockerRequirement[\"dockerPull\"]\n elif \"dockerImageId\" in dockerRequirement:\n candidates.append(dockerRequirement['dockerImageId'])\n candidates.append(_normalizeImageId(dockerRequirement['dockerImageId']))\n\n # check if Singularity image is available in $SINGULARITY_CACHEDIR\n for target in (\"SINGULARITY_CACHEDIR\", \"SINGULARITY_PULLFOLDER\",\n os.getcwd()):\n if target in os.environ:\n for candidate in candidates:\n path = os.path.join(os.environ[target], candidate)\n if os.path.isfile(path):\n _logger.info(\"Using local copy of Singularity image \"\n \"found in {}\".format(target))\n dockerRequirement[\"dockerImageId\"] = path\n found = True\n\n if (force_pull or not found) and pull_image:\n cmd = [] # type: List[Text]\n if \"dockerPull\" in dockerRequirement:\n cmd = [\"singularity\", \"pull\", \"--force\", \"--name\",\n str(dockerRequirement[\"dockerImageId\"]),\n str(dockerRequirement[\"dockerPull\"])]\n _logger.info(Text(cmd))\n check_call(cmd, stdout=sys.stderr)\n found = True\n elif \"dockerFile\" in dockerRequirement:\n raise WorkflowException(SourceLine(\n dockerRequirement, 'dockerFile').makeError(\n \"dockerFile is not currently supported when using the \"\n \"Singularity runtime for Docker containers.\"))\n elif \"dockerLoad\" in dockerRequirement:\n raise WorkflowException(SourceLine(\n dockerRequirement, 'dockerLoad').makeError(\n \"dockerLoad is not currently supported when using the \"\n \"Singularity runtime for Docker containers.\"))\n elif \"dockerImport\" in dockerRequirement:\n raise WorkflowException(SourceLine(\n dockerRequirement, 'dockerImport').makeError(\n \"dockerImport is not currently supported when using the \"\n \"Singularity runtime for Docker containers.\"))\n\n return found\n\n def get_from_requirements(self,\n r, # type: Optional[Dict[Text, Text]]\n req, # type: bool\n pull_image, # type: bool\n force_pull=False, # type: bool\n tmp_outdir_prefix=None # type: Text\n ): # type: (...) -> Text\n \"\"\"\n Returns the filename of the Singularity image (e.g.\n hello-world-latest.img).\n \"\"\"\n\n if r:\n errmsg = None\n try:\n check_output([\"singularity\", \"--version\"])\n except CalledProcessError as err:\n errmsg = \"Cannot execute 'singularity --version' {}\".format(err)\n except OSError as err:\n errmsg = \"'singularity' executable not found: {}\".format(err)\n\n if errmsg:\n if req:\n raise WorkflowException(errmsg)\n else:\n return None\n\n if self.get_image(r, pull_image, force_pull):\n return os.path.abspath(r[\"dockerImageId\"])\n else:\n if req:\n raise WorkflowException(u\"Container image {} not \"\n \"found\".format(r[\"dockerImageId\"]))\n\n return None\n\n def add_volumes(self, pathmapper, runtime, stage_output):\n # type: (PathMapper, List[Text], bool) -> None\n\n host_outdir = self.outdir\n container_outdir = self.builder.outdir\n for _, vol in pathmapper.items():\n if not vol.staged:\n continue\n if stage_output and not vol.target.startswith(container_outdir):\n containertgt = container_outdir + vol.target[len(host_outdir):]\n else:\n containertgt = vol.target\n if vol.target.startswith(container_outdir + \"/\"):\n host_outdir_tgt = os.path.join(\n host_outdir, vol.target[len(container_outdir) + 1:])\n else:\n host_outdir_tgt = None\n if vol.type in (\"File\", \"Directory\"):\n if not vol.resolved.startswith(\"_:\"):\n runtime.append(u\"--bind\")\n runtime.append(\"{}:{}:ro\".format(\n docker_windows_path_adjust(vol.resolved),\n docker_windows_path_adjust(containertgt)))\n elif vol.type == \"WritableFile\":\n if self.inplace_update:\n runtime.append(u\"--bind\")\n runtime.append(u\"{}:{}:rw\".format(\n docker_windows_path_adjust(vol.resolved),\n docker_windows_path_adjust(containertgt)))\n else:\n shutil.copy(vol.resolved, host_outdir_tgt)\n ensure_writable(host_outdir_tgt)\n elif vol.type == \"WritableDirectory\":\n if vol.resolved.startswith(\"_:\"):\n os.makedirs(host_outdir_tgt, 0o0755)\n else:\n if self.inplace_update:\n runtime.append(u\"--bind\")\n runtime.append(u\"{}:{}:rw\".format(\n docker_windows_path_adjust(vol.resolved),\n docker_windows_path_adjust(containertgt)))\n else:\n shutil.copytree(vol.resolved, vol.target)\n elif vol.type == \"CreateFile\":\n createtmp = os.path.join(host_outdir, os.path.basename(vol.target))\n with open(createtmp, \"wb\") as tmp:\n tmp.write(vol.resolved.encode(\"utf-8\"))\n runtime.append(u\"--bind\")\n runtime.append(u\"{}:{}:ro\".format(\n docker_windows_path_adjust(createtmp),\n docker_windows_path_adjust(vol.target)))\n\n def create_runtime(self,\n env, # type: MutableMapping[Text, Text]\n rm_container=True, # type: bool\n record_container_id=False, # type: bool\n cidfile_dir=\"\", # type: Text\n cidfile_prefix=\"\", # type: Text\n **kwargs\n ):\n # type: (...) -> List\n \"\"\" Returns the Singularity runtime list of commands and options.\"\"\"\n\n runtime = [u\"singularity\", u\"--quiet\", u\"exec\", u\"--contain\", u\"--pid\",\n u\"--ipc\"]\n if _singularity_supports_userns():\n runtime.append(u\"--userns\")\n runtime.append(u\"--bind\")\n runtime.append(u\"{}:{}:rw\".format(\n docker_windows_path_adjust(os.path.realpath(self.outdir)),\n self.builder.outdir))\n runtime.append(u\"--bind\")\n runtime.append(u\"{}:{}:rw\".format(\n docker_windows_path_adjust(os.path.realpath(self.tmpdir)), \"/tmp\"))\n\n self.add_volumes(self.pathmapper, runtime, stage_output=False)\n if self.generatemapper:\n self.add_volumes(self.generatemapper, runtime, stage_output=True)\n\n runtime.append(u\"--pwd\")\n runtime.append(\"%s\" % (docker_windows_path_adjust(self.builder.outdir)))\n\n if kwargs.get(\"custom_net\", None) is not None:\n raise UnsupportedRequirement(\n \"Singularity implementation does not support custom networking\")\n elif kwargs.get(\"disable_net\", None):\n runtime.append(u\"--net\")\n\n env[\"SINGULARITYENV_TMPDIR\"] = \"/tmp\"\n env[\"SINGULARITYENV_HOME\"] = self.builder.outdir\n\n for name, value in self.environment.items():\n env[\"SINGULARITYENV_{}\".format(name)] = value\n return runtime\n", "path": "cwltool/singularity.py"}], "after_files": [{"content": "\"\"\"Support for executing Docker containers using Singularity.\"\"\"\nfrom __future__ import absolute_import\nimport logging\nimport os\nimport re\nimport shutil\nimport sys\nfrom io import open # pylint: disable=redefined-builtin\nfrom typing import (Dict, List, Text, Optional, MutableMapping)\nfrom .errors import WorkflowException\nfrom .job import ContainerCommandLineJob\nfrom .pathmapper import PathMapper, ensure_writable\nfrom .process import (UnsupportedRequirement)\nfrom .utils import docker_windows_path_adjust\nfrom schema_salad.sourceline import SourceLine\nif os.name == 'posix' and sys.version_info[0] < 3:\n from subprocess32 import (check_call, check_output, # pylint: disable=import-error\n CalledProcessError, DEVNULL, PIPE, Popen,\n TimeoutExpired)\nelif os.name == 'posix':\n from subprocess import (check_call, check_output, # type: ignore\n CalledProcessError, DEVNULL, PIPE, Popen,\n TimeoutExpired)\nelse: # we're not on Unix, so none of this matters\n pass\n\n_logger = logging.getLogger(\"cwltool\")\n_USERNS = None\n\ndef _singularity_supports_userns(): # type: ()->bool\n global _USERNS # pylint: disable=global-statement\n if _USERNS is None:\n try:\n result = Popen(\n [u\"singularity\", u\"exec\", u\"--userns\", u\"/etc\", u\"true\"],\n stderr=PIPE, stdout=DEVNULL,\n universal_newlines=True).communicate(timeout=60)[1]\n _USERNS = \"No valid /bin/sh\" in result\n except TimeoutExpired:\n _USERNS = False\n return _USERNS\n\ndef _normalizeImageId(string): # type: (Text)->Text\n candidate = re.sub(pattern=r'([a-z]*://)', repl=r'', string=string)\n return re.sub(pattern=r'[:/]', repl=r'-', string=candidate) + \".img\"\n\n\nclass SingularityCommandLineJob(ContainerCommandLineJob):\n\n @staticmethod\n def get_image(dockerRequirement, # type: Dict[Text, Text]\n pull_image, # type: bool\n force_pull=False # type: bool\n ):\n # type: (...) -> bool\n \"\"\"\n Acquire the software container image in the specified dockerRequirement\n using Singularity and returns the success as a bool. Updates the\n provided dockerRequirement with the specific dockerImageId to the full\n path of the local image, if found. Likewise the\n dockerRequirement['dockerPull'] is updated to a docker:// URI if needed.\n \"\"\"\n found = False\n\n candidates = []\n\n if \"dockerImageId\" not in dockerRequirement and \"dockerPull\" in dockerRequirement:\n match = re.search(pattern=r'([a-z]*://)', string=dockerRequirement[\"dockerPull\"])\n candidate = _normalizeImageId(dockerRequirement['dockerPull'])\n candidates.append(candidate)\n dockerRequirement['dockerImageId'] = candidate\n if not match:\n dockerRequirement[\"dockerPull\"] = \"docker://\" + dockerRequirement[\"dockerPull\"]\n elif \"dockerImageId\" in dockerRequirement:\n candidates.append(dockerRequirement['dockerImageId'])\n candidates.append(_normalizeImageId(dockerRequirement['dockerImageId']))\n\n # check if Singularity image is available in $SINGULARITY_CACHEDIR\n targets = [os.getcwd()]\n for env in (\"SINGULARITY_CACHEDIR\", \"SINGULARITY_PULLFOLDER\"):\n if env in os.environ:\n targets.append(os.environ[env])\n for target in targets:\n for candidate in candidates:\n path = os.path.join(target, candidate)\n if os.path.isfile(path):\n _logger.info(\"Using local copy of Singularity image \"\n \"found in {}\".format(target))\n dockerRequirement[\"dockerImageId\"] = path\n found = True\n\n if (force_pull or not found) and pull_image:\n cmd = [] # type: List[Text]\n if \"dockerPull\" in dockerRequirement:\n cmd = [\"singularity\", \"pull\", \"--force\", \"--name\",\n str(dockerRequirement[\"dockerImageId\"]),\n str(dockerRequirement[\"dockerPull\"])]\n _logger.info(Text(cmd))\n check_call(cmd, stdout=sys.stderr)\n found = True\n elif \"dockerFile\" in dockerRequirement:\n raise WorkflowException(SourceLine(\n dockerRequirement, 'dockerFile').makeError(\n \"dockerFile is not currently supported when using the \"\n \"Singularity runtime for Docker containers.\"))\n elif \"dockerLoad\" in dockerRequirement:\n raise WorkflowException(SourceLine(\n dockerRequirement, 'dockerLoad').makeError(\n \"dockerLoad is not currently supported when using the \"\n \"Singularity runtime for Docker containers.\"))\n elif \"dockerImport\" in dockerRequirement:\n raise WorkflowException(SourceLine(\n dockerRequirement, 'dockerImport').makeError(\n \"dockerImport is not currently supported when using the \"\n \"Singularity runtime for Docker containers.\"))\n\n return found\n\n def get_from_requirements(self,\n r, # type: Optional[Dict[Text, Text]]\n req, # type: bool\n pull_image, # type: bool\n force_pull=False, # type: bool\n tmp_outdir_prefix=None # type: Text\n ): # type: (...) -> Text\n \"\"\"\n Returns the filename of the Singularity image (e.g.\n hello-world-latest.img).\n \"\"\"\n\n if r:\n errmsg = None\n try:\n check_output([\"singularity\", \"--version\"])\n except CalledProcessError as err:\n errmsg = \"Cannot execute 'singularity --version' {}\".format(err)\n except OSError as err:\n errmsg = \"'singularity' executable not found: {}\".format(err)\n\n if errmsg:\n if req:\n raise WorkflowException(errmsg)\n else:\n return None\n\n if self.get_image(r, pull_image, force_pull):\n return os.path.abspath(r[\"dockerImageId\"])\n else:\n if req:\n raise WorkflowException(u\"Container image {} not \"\n \"found\".format(r[\"dockerImageId\"]))\n\n return None\n\n def add_volumes(self, pathmapper, runtime, stage_output):\n # type: (PathMapper, List[Text], bool) -> None\n\n host_outdir = self.outdir\n container_outdir = self.builder.outdir\n for _, vol in pathmapper.items():\n if not vol.staged:\n continue\n if stage_output and not vol.target.startswith(container_outdir):\n containertgt = container_outdir + vol.target[len(host_outdir):]\n else:\n containertgt = vol.target\n if vol.target.startswith(container_outdir + \"/\"):\n host_outdir_tgt = os.path.join(\n host_outdir, vol.target[len(container_outdir) + 1:])\n else:\n host_outdir_tgt = None\n if vol.type in (\"File\", \"Directory\"):\n if not vol.resolved.startswith(\"_:\"):\n runtime.append(u\"--bind\")\n runtime.append(\"{}:{}:ro\".format(\n docker_windows_path_adjust(vol.resolved),\n docker_windows_path_adjust(containertgt)))\n elif vol.type == \"WritableFile\":\n if self.inplace_update:\n runtime.append(u\"--bind\")\n runtime.append(u\"{}:{}:rw\".format(\n docker_windows_path_adjust(vol.resolved),\n docker_windows_path_adjust(containertgt)))\n else:\n shutil.copy(vol.resolved, host_outdir_tgt)\n ensure_writable(host_outdir_tgt)\n elif vol.type == \"WritableDirectory\":\n if vol.resolved.startswith(\"_:\"):\n os.makedirs(host_outdir_tgt, 0o0755)\n else:\n if self.inplace_update:\n runtime.append(u\"--bind\")\n runtime.append(u\"{}:{}:rw\".format(\n docker_windows_path_adjust(vol.resolved),\n docker_windows_path_adjust(containertgt)))\n else:\n shutil.copytree(vol.resolved, vol.target)\n elif vol.type == \"CreateFile\":\n createtmp = os.path.join(host_outdir, os.path.basename(vol.target))\n with open(createtmp, \"wb\") as tmp:\n tmp.write(vol.resolved.encode(\"utf-8\"))\n runtime.append(u\"--bind\")\n runtime.append(u\"{}:{}:ro\".format(\n docker_windows_path_adjust(createtmp),\n docker_windows_path_adjust(vol.target)))\n\n def create_runtime(self,\n env, # type: MutableMapping[Text, Text]\n rm_container=True, # type: bool\n record_container_id=False, # type: bool\n cidfile_dir=\"\", # type: Text\n cidfile_prefix=\"\", # type: Text\n **kwargs\n ):\n # type: (...) -> List\n \"\"\" Returns the Singularity runtime list of commands and options.\"\"\"\n\n runtime = [u\"singularity\", u\"--quiet\", u\"exec\", u\"--contain\", u\"--pid\",\n u\"--ipc\"]\n if _singularity_supports_userns():\n runtime.append(u\"--userns\")\n runtime.append(u\"--bind\")\n runtime.append(u\"{}:{}:rw\".format(\n docker_windows_path_adjust(os.path.realpath(self.outdir)),\n self.builder.outdir))\n runtime.append(u\"--bind\")\n runtime.append(u\"{}:{}:rw\".format(\n docker_windows_path_adjust(os.path.realpath(self.tmpdir)), \"/tmp\"))\n\n self.add_volumes(self.pathmapper, runtime, stage_output=False)\n if self.generatemapper:\n self.add_volumes(self.generatemapper, runtime, stage_output=True)\n\n runtime.append(u\"--pwd\")\n runtime.append(\"%s\" % (docker_windows_path_adjust(self.builder.outdir)))\n\n if kwargs.get(\"custom_net\", None) is not None:\n raise UnsupportedRequirement(\n \"Singularity implementation does not support custom networking\")\n elif kwargs.get(\"disable_net\", None):\n runtime.append(u\"--net\")\n\n env[\"SINGULARITYENV_TMPDIR\"] = \"/tmp\"\n env[\"SINGULARITYENV_HOME\"] = self.builder.outdir\n\n for name, value in self.environment.items():\n env[\"SINGULARITYENV_{}\".format(name)] = value\n return runtime\n", "path": "cwltool/singularity.py"}]} | 3,116 | 341 |
gh_patches_debug_20481 | rasdani/github-patches | git_diff | pulp__pulpcore-2518 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Uploading large files fails on the "Too many open files" error
Author: @lubosmj (lmjachky)
Redmine Issue: 9634, https://pulp.plan.io/issues/9634
---
Steps to reproduce the behaviour:
```
(pulp) [vagrant@pulp3-source-fedora34 pulp_ostree]$ truncate -s 3G gentoo_root.img
(pulp) [vagrant@pulp3-source-fedora34 pulp_ostree]$ pulp artifact upload --file gentoo_root.img
Uploading file gentoo_root.img
................[truncated the number of dots]...................Upload complete. Creating artifact.
Started background task /pulp/api/v3/tasks/2d3cf569-2d5c-449d-9ac6-7a53ab5f3a30/
........Error: Task /pulp/api/v3/tasks/2d3cf569-2d5c-449d-9ac6-7a53ab5f3a30/ failed: '[Errno 24] Too many open files: '/var/lib/pulp/media/upload/ec2d5c92-57e9-4191-ae95-ff4b9ec94353''
(pulp) [vagrant@pulp3-source-fedora34 pulp_ostree]$ ls -la gentoo_root.img
-rw-r--r--. 1 vagrant vagrant 3221225472 Dec 13 11:32 gentoo_root.img
```
Traceback:
```
Task 2d3cf569-2d5c-449d-9ac6-7a53ab5f3a30 failed ([Errno 24] Too many open files: '/var/lib/pulp/media/upload/ec2d5c92-57e9-4191-ae95-ff4b9ec94353')
pulp [3a3a9ea662994f609eea7d43ac8f30aa]: pulpcore.tasking.pulpcore_worker:INFO: File "/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py", line 362, in _perform_task
result = func(*args, **kwargs)
File "/home/vagrant/devel/pulpcore/pulpcore/app/tasks/upload.py", line 31, in commit
temp_file.write(chunk.file.read())
File "/usr/local/lib/pulp/lib64/python3.9/site-packages/django/core/files/utils.py", line 42, in <lambda>
read = property(lambda self: self.file.read)
File "/usr/local/lib/pulp/lib64/python3.9/site-packages/django/db/models/fields/files.py", line 45, in _get_file
self._file = self.storage.open(self.name, 'rb')
File "/usr/local/lib/pulp/lib64/python3.9/site-packages/django/core/files/storage.py", line 38, in open
return self._open(name, mode)
File "/usr/local/lib/pulp/lib64/python3.9/site-packages/django/core/files/storage.py", line 238, in _open
return File(open(self.path(name), mode))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/importexport.py`
Content:
```
1 import os
2 import io
3 import json
4 import tarfile
5 import tempfile
6
7 from django.conf import settings
8
9 from pulpcore.app.apps import get_plugin_config
10 from pulpcore.app.models.progress import ProgressReport
11 from pulpcore.app.models.repository import Repository
12 from pulpcore.app.modelresource import (
13 ArtifactResource,
14 ContentArtifactResource,
15 RepositoryResource,
16 )
17 from pulpcore.constants import TASK_STATES
18
19
20 def _write_export(the_tarfile, resource, dest_dir=None):
21 """
22 Write the JSON export for the specified resource to the specified tarfile.
23
24 The resulting file will be found at <dest_dir>/<resource.__class__.__name__>.json. If dest_dir
25 is None, the file will be added at the 'top level' of the_tarfile.
26
27 Export-files are UTF-8 encoded.
28
29 Args:
30 the_tarfile (tarfile.Tarfile): tarfile we are writing into
31 resource (import_export.resources.ModelResource): ModelResource to be exported
32 dest_dir str(directory-path): directory 'inside' the tarfile to write to
33 """
34 filename = "{}.{}.json".format(resource.__module__, type(resource).__name__)
35 dataset = resource.export(resource.queryset)
36 if dest_dir:
37 dest_filename = os.path.join(dest_dir, filename)
38 else:
39 dest_filename = filename
40
41 data = dataset.json.encode("utf8")
42 info = tarfile.TarInfo(name=dest_filename)
43 info.size = len(data)
44 the_tarfile.addfile(info, io.BytesIO(data))
45
46
47 def export_versions(export, version_info):
48 """
49 Write a JSON list of plugins and their versions as 'versions.json' to export.tarfile
50
51 Output format is [{"component": "<pluginname>", "version": "<pluginversion>"},...]
52
53 Args:
54 export (django.db.models.PulpExport): export instance that's doing the export
55 version_info (set): set of (distribution-label,version) tuples for repos in this export
56 """
57 # build the version-list from the distributions for each component
58 versions = [{"component": label, "version": version} for (label, version) in version_info]
59
60 version_json = json.dumps(versions).encode("utf8")
61 info = tarfile.TarInfo(name="versions.json")
62 info.size = len(version_json)
63 export.tarfile.addfile(info, io.BytesIO(version_json))
64
65
66 def export_artifacts(export, artifacts):
67 """
68 Export a set of Artifacts, ArtifactResources, and RepositoryResources
69
70 Args:
71 export (django.db.models.PulpExport): export instance that's doing the export
72 artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported
73
74 Raises:
75 ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
76 """
77 data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
78 with ProgressReport(**data) as pb:
79 for artifact in pb.iter(artifacts):
80 dest = artifact.file.name
81 if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
82 with tempfile.TemporaryDirectory() as temp_dir:
83 with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file:
84 temp_file.write(artifact.file.read())
85 temp_file.flush()
86 export.tarfile.add(temp_file.name, dest)
87 else:
88 export.tarfile.add(artifact.file.path, dest)
89
90 resource = ArtifactResource()
91 resource.queryset = artifacts
92 _write_export(export.tarfile, resource)
93
94 resource = RepositoryResource()
95 resource.queryset = Repository.objects.filter(pk__in=export.exporter.repositories.all())
96 _write_export(export.tarfile, resource)
97
98
99 def export_content(export, repository_version):
100 """
101 Export db-content, and the db-content of the owning repositories
102
103 Args:
104 export (django.db.models.PulpExport): export instance that's doing the export
105 repository_version (django.db.models.RepositoryVersion): RepositoryVersion being exported
106 """
107
108 def _combine_content_mappings(map1, map2):
109 """Combine two content mapping dicts into one by combining ids for for each key."""
110 result = {}
111 for key in map1.keys() | map2.keys():
112 result[key] = list(set(map1.get(key, []) + map2.get(key, [])))
113 return result
114
115 dest_dir = os.path.join(
116 "repository-{}_{}".format(
117 str(repository_version.repository.name), repository_version.number
118 )
119 )
120
121 # content mapping is used by repo versions with subrepos (eg distribution tree repos)
122 content_mapping = {}
123
124 # find and export any ModelResource found in pulp_<repo-type>.app.modelresource
125 plugin_name = repository_version.repository.pulp_type.split(".")[0]
126 cfg = get_plugin_config(plugin_name)
127 if cfg.exportable_classes:
128 for cls in cfg.exportable_classes:
129 resource = cls(repository_version)
130 _write_export(export.tarfile, resource, dest_dir)
131
132 if hasattr(resource, "content_mapping") and resource.content_mapping:
133 content_mapping = _combine_content_mappings(
134 content_mapping, resource.content_mapping
135 )
136
137 # Export the connection between content and artifacts
138 resource = ContentArtifactResource(repository_version, content_mapping)
139 _write_export(export.tarfile, resource, dest_dir)
140
141 msg = (
142 f"Exporting content for {plugin_name} "
143 f"repository-version {repository_version.repository.name}/{repository_version.number}"
144 )
145 content_count = repository_version.content.count()
146 data = dict(
147 message=msg,
148 code="export.repo.version.content",
149 total=content_count,
150 done=content_count,
151 state=TASK_STATES.COMPLETED,
152 )
153 pb = ProgressReport(**data)
154 pb.save()
155
156 if content_mapping:
157 # write the content mapping to tarfile
158 cm_json = json.dumps(content_mapping).encode("utf8")
159 info = tarfile.TarInfo(name=f"{dest_dir}/content_mapping.json")
160 info.size = len(cm_json)
161 export.tarfile.addfile(info, io.BytesIO(cm_json))
162
```
Path: `pulpcore/app/tasks/upload.py`
Content:
```
1 from gettext import gettext as _
2 from logging import getLogger
3 from tempfile import NamedTemporaryFile
4
5 from django.core.files import File
6
7 from pulpcore.app import files, models
8 from pulpcore.app.models import CreatedResource
9 from pulpcore.app.serializers import ArtifactSerializer
10
11 log = getLogger(__name__)
12
13
14 def commit(upload_id, sha256):
15 """
16 Commit the upload and turn it into an artifact.
17
18 Args:
19 upload_id (int): The upload primary key
20 sha256 (str): The checksum for the uploaded file
21 """
22 try:
23 upload = models.Upload.objects.get(pk=upload_id)
24 except models.Upload.DoesNotExist:
25 log.info(_("The upload was not found. Nothing to do."))
26 return
27
28 chunks = models.UploadChunk.objects.filter(upload=upload).order_by("offset")
29 with NamedTemporaryFile("ab") as temp_file:
30 for chunk in chunks:
31 temp_file.write(chunk.file.read())
32 temp_file.flush()
33
34 file = files.PulpTemporaryUploadedFile.from_file(File(open(temp_file.name, "rb")))
35
36 data = {"file": file, "sha256": sha256}
37 serializer = ArtifactSerializer(data=data)
38 serializer.is_valid(raise_exception=True)
39 artifact = serializer.save()
40
41 resource = CreatedResource(content_object=artifact)
42 resource.save()
43
44 # delete the upload since it can't be reused to create another artifact
45 upload.delete()
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -83,6 +83,7 @@
with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file:
temp_file.write(artifact.file.read())
temp_file.flush()
+ artifact.file.close()
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
diff --git a/pulpcore/app/tasks/upload.py b/pulpcore/app/tasks/upload.py
--- a/pulpcore/app/tasks/upload.py
+++ b/pulpcore/app/tasks/upload.py
@@ -29,6 +29,7 @@
with NamedTemporaryFile("ab") as temp_file:
for chunk in chunks:
temp_file.write(chunk.file.read())
+ chunk.file.close()
temp_file.flush()
file = files.PulpTemporaryUploadedFile.from_file(File(open(temp_file.name, "rb")))
| {"golden_diff": "diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py\n--- a/pulpcore/app/importexport.py\n+++ b/pulpcore/app/importexport.py\n@@ -83,6 +83,7 @@\n with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file:\n temp_file.write(artifact.file.read())\n temp_file.flush()\n+ artifact.file.close()\n export.tarfile.add(temp_file.name, dest)\n else:\n export.tarfile.add(artifact.file.path, dest)\ndiff --git a/pulpcore/app/tasks/upload.py b/pulpcore/app/tasks/upload.py\n--- a/pulpcore/app/tasks/upload.py\n+++ b/pulpcore/app/tasks/upload.py\n@@ -29,6 +29,7 @@\n with NamedTemporaryFile(\"ab\") as temp_file:\n for chunk in chunks:\n temp_file.write(chunk.file.read())\n+ chunk.file.close()\n temp_file.flush()\n \n file = files.PulpTemporaryUploadedFile.from_file(File(open(temp_file.name, \"rb\")))\n", "issue": "Uploading large files fails on the \"Too many open files\" error\nAuthor: @lubosmj (lmjachky)\n\n\nRedmine Issue: 9634, https://pulp.plan.io/issues/9634\n\n---\n\nSteps to reproduce the behaviour:\r\n```\r\n(pulp) [vagrant@pulp3-source-fedora34 pulp_ostree]$ truncate -s 3G gentoo_root.img\r\n\r\n(pulp) [vagrant@pulp3-source-fedora34 pulp_ostree]$ pulp artifact upload --file gentoo_root.img\r\nUploading file gentoo_root.img\r\n................[truncated the number of dots]...................Upload complete. Creating artifact.\r\nStarted background task /pulp/api/v3/tasks/2d3cf569-2d5c-449d-9ac6-7a53ab5f3a30/\r\n........Error: Task /pulp/api/v3/tasks/2d3cf569-2d5c-449d-9ac6-7a53ab5f3a30/ failed: '[Errno 24] Too many open files: '/var/lib/pulp/media/upload/ec2d5c92-57e9-4191-ae95-ff4b9ec94353''\r\n\r\n(pulp) [vagrant@pulp3-source-fedora34 pulp_ostree]$ ls -la gentoo_root.img\r\n-rw-r--r--. 1 vagrant vagrant 3221225472 Dec 13 11:32 gentoo_root.img\r\n```\r\n\r\nTraceback:\r\n```\r\nTask 2d3cf569-2d5c-449d-9ac6-7a53ab5f3a30 failed ([Errno 24] Too many open files: '/var/lib/pulp/media/upload/ec2d5c92-57e9-4191-ae95-ff4b9ec94353')\r\npulp [3a3a9ea662994f609eea7d43ac8f30aa]: pulpcore.tasking.pulpcore_worker:INFO: File \"/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py\", line 362, in _perform_task\r\n result = func(*args, **kwargs)\r\n File \"/home/vagrant/devel/pulpcore/pulpcore/app/tasks/upload.py\", line 31, in commit\r\n temp_file.write(chunk.file.read())\r\n File \"/usr/local/lib/pulp/lib64/python3.9/site-packages/django/core/files/utils.py\", line 42, in <lambda>\r\n read = property(lambda self: self.file.read)\r\n File \"/usr/local/lib/pulp/lib64/python3.9/site-packages/django/db/models/fields/files.py\", line 45, in _get_file\r\n self._file = self.storage.open(self.name, 'rb')\r\n File \"/usr/local/lib/pulp/lib64/python3.9/site-packages/django/core/files/storage.py\", line 38, in open\r\n return self._open(name, mode)\r\n File \"/usr/local/lib/pulp/lib64/python3.9/site-packages/django/core/files/storage.py\", line 238, in _open\r\n return File(open(self.path(name), mode))\r\n```\n\n\n\n", "before_files": [{"content": "import os\nimport io\nimport json\nimport tarfile\nimport tempfile\n\nfrom django.conf import settings\n\nfrom pulpcore.app.apps import get_plugin_config\nfrom pulpcore.app.models.progress import ProgressReport\nfrom pulpcore.app.models.repository import Repository\nfrom pulpcore.app.modelresource import (\n ArtifactResource,\n ContentArtifactResource,\n RepositoryResource,\n)\nfrom pulpcore.constants import TASK_STATES\n\n\ndef _write_export(the_tarfile, resource, dest_dir=None):\n \"\"\"\n Write the JSON export for the specified resource to the specified tarfile.\n\n The resulting file will be found at <dest_dir>/<resource.__class__.__name__>.json. If dest_dir\n is None, the file will be added at the 'top level' of the_tarfile.\n\n Export-files are UTF-8 encoded.\n\n Args:\n the_tarfile (tarfile.Tarfile): tarfile we are writing into\n resource (import_export.resources.ModelResource): ModelResource to be exported\n dest_dir str(directory-path): directory 'inside' the tarfile to write to\n \"\"\"\n filename = \"{}.{}.json\".format(resource.__module__, type(resource).__name__)\n dataset = resource.export(resource.queryset)\n if dest_dir:\n dest_filename = os.path.join(dest_dir, filename)\n else:\n dest_filename = filename\n\n data = dataset.json.encode(\"utf8\")\n info = tarfile.TarInfo(name=dest_filename)\n info.size = len(data)\n the_tarfile.addfile(info, io.BytesIO(data))\n\n\ndef export_versions(export, version_info):\n \"\"\"\n Write a JSON list of plugins and their versions as 'versions.json' to export.tarfile\n\n Output format is [{\"component\": \"<pluginname>\", \"version\": \"<pluginversion>\"},...]\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n version_info (set): set of (distribution-label,version) tuples for repos in this export\n \"\"\"\n # build the version-list from the distributions for each component\n versions = [{\"component\": label, \"version\": version} for (label, version) in version_info]\n\n version_json = json.dumps(versions).encode(\"utf8\")\n info = tarfile.TarInfo(name=\"versions.json\")\n info.size = len(version_json)\n export.tarfile.addfile(info, io.BytesIO(version_json))\n\n\ndef export_artifacts(export, artifacts):\n \"\"\"\n Export a set of Artifacts, ArtifactResources, and RepositoryResources\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported\n\n Raises:\n ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting\n \"\"\"\n data = dict(message=\"Exporting Artifacts\", code=\"export.artifacts\", total=len(artifacts))\n with ProgressReport(**data) as pb:\n for artifact in pb.iter(artifacts):\n dest = artifact.file.name\n if settings.DEFAULT_FILE_STORAGE != \"pulpcore.app.models.storage.FileSystem\":\n with tempfile.TemporaryDirectory() as temp_dir:\n with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file:\n temp_file.write(artifact.file.read())\n temp_file.flush()\n export.tarfile.add(temp_file.name, dest)\n else:\n export.tarfile.add(artifact.file.path, dest)\n\n resource = ArtifactResource()\n resource.queryset = artifacts\n _write_export(export.tarfile, resource)\n\n resource = RepositoryResource()\n resource.queryset = Repository.objects.filter(pk__in=export.exporter.repositories.all())\n _write_export(export.tarfile, resource)\n\n\ndef export_content(export, repository_version):\n \"\"\"\n Export db-content, and the db-content of the owning repositories\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n repository_version (django.db.models.RepositoryVersion): RepositoryVersion being exported\n \"\"\"\n\n def _combine_content_mappings(map1, map2):\n \"\"\"Combine two content mapping dicts into one by combining ids for for each key.\"\"\"\n result = {}\n for key in map1.keys() | map2.keys():\n result[key] = list(set(map1.get(key, []) + map2.get(key, [])))\n return result\n\n dest_dir = os.path.join(\n \"repository-{}_{}\".format(\n str(repository_version.repository.name), repository_version.number\n )\n )\n\n # content mapping is used by repo versions with subrepos (eg distribution tree repos)\n content_mapping = {}\n\n # find and export any ModelResource found in pulp_<repo-type>.app.modelresource\n plugin_name = repository_version.repository.pulp_type.split(\".\")[0]\n cfg = get_plugin_config(plugin_name)\n if cfg.exportable_classes:\n for cls in cfg.exportable_classes:\n resource = cls(repository_version)\n _write_export(export.tarfile, resource, dest_dir)\n\n if hasattr(resource, \"content_mapping\") and resource.content_mapping:\n content_mapping = _combine_content_mappings(\n content_mapping, resource.content_mapping\n )\n\n # Export the connection between content and artifacts\n resource = ContentArtifactResource(repository_version, content_mapping)\n _write_export(export.tarfile, resource, dest_dir)\n\n msg = (\n f\"Exporting content for {plugin_name} \"\n f\"repository-version {repository_version.repository.name}/{repository_version.number}\"\n )\n content_count = repository_version.content.count()\n data = dict(\n message=msg,\n code=\"export.repo.version.content\",\n total=content_count,\n done=content_count,\n state=TASK_STATES.COMPLETED,\n )\n pb = ProgressReport(**data)\n pb.save()\n\n if content_mapping:\n # write the content mapping to tarfile\n cm_json = json.dumps(content_mapping).encode(\"utf8\")\n info = tarfile.TarInfo(name=f\"{dest_dir}/content_mapping.json\")\n info.size = len(cm_json)\n export.tarfile.addfile(info, io.BytesIO(cm_json))\n", "path": "pulpcore/app/importexport.py"}, {"content": "from gettext import gettext as _\nfrom logging import getLogger\nfrom tempfile import NamedTemporaryFile\n\nfrom django.core.files import File\n\nfrom pulpcore.app import files, models\nfrom pulpcore.app.models import CreatedResource\nfrom pulpcore.app.serializers import ArtifactSerializer\n\nlog = getLogger(__name__)\n\n\ndef commit(upload_id, sha256):\n \"\"\"\n Commit the upload and turn it into an artifact.\n\n Args:\n upload_id (int): The upload primary key\n sha256 (str): The checksum for the uploaded file\n \"\"\"\n try:\n upload = models.Upload.objects.get(pk=upload_id)\n except models.Upload.DoesNotExist:\n log.info(_(\"The upload was not found. Nothing to do.\"))\n return\n\n chunks = models.UploadChunk.objects.filter(upload=upload).order_by(\"offset\")\n with NamedTemporaryFile(\"ab\") as temp_file:\n for chunk in chunks:\n temp_file.write(chunk.file.read())\n temp_file.flush()\n\n file = files.PulpTemporaryUploadedFile.from_file(File(open(temp_file.name, \"rb\")))\n\n data = {\"file\": file, \"sha256\": sha256}\n serializer = ArtifactSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n artifact = serializer.save()\n\n resource = CreatedResource(content_object=artifact)\n resource.save()\n\n # delete the upload since it can't be reused to create another artifact\n upload.delete()\n", "path": "pulpcore/app/tasks/upload.py"}], "after_files": [{"content": "import os\nimport io\nimport json\nimport tarfile\nimport tempfile\n\nfrom django.conf import settings\n\nfrom pulpcore.app.apps import get_plugin_config\nfrom pulpcore.app.models.progress import ProgressReport\nfrom pulpcore.app.models.repository import Repository\nfrom pulpcore.app.modelresource import (\n ArtifactResource,\n ContentArtifactResource,\n RepositoryResource,\n)\nfrom pulpcore.constants import TASK_STATES\n\n\ndef _write_export(the_tarfile, resource, dest_dir=None):\n \"\"\"\n Write the JSON export for the specified resource to the specified tarfile.\n\n The resulting file will be found at <dest_dir>/<resource.__class__.__name__>.json. If dest_dir\n is None, the file will be added at the 'top level' of the_tarfile.\n\n Export-files are UTF-8 encoded.\n\n Args:\n the_tarfile (tarfile.Tarfile): tarfile we are writing into\n resource (import_export.resources.ModelResource): ModelResource to be exported\n dest_dir str(directory-path): directory 'inside' the tarfile to write to\n \"\"\"\n filename = \"{}.{}.json\".format(resource.__module__, type(resource).__name__)\n dataset = resource.export(resource.queryset)\n if dest_dir:\n dest_filename = os.path.join(dest_dir, filename)\n else:\n dest_filename = filename\n\n data = dataset.json.encode(\"utf8\")\n info = tarfile.TarInfo(name=dest_filename)\n info.size = len(data)\n the_tarfile.addfile(info, io.BytesIO(data))\n\n\ndef export_versions(export, version_info):\n \"\"\"\n Write a JSON list of plugins and their versions as 'versions.json' to export.tarfile\n\n Output format is [{\"component\": \"<pluginname>\", \"version\": \"<pluginversion>\"},...]\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n version_info (set): set of (distribution-label,version) tuples for repos in this export\n \"\"\"\n # build the version-list from the distributions for each component\n versions = [{\"component\": label, \"version\": version} for (label, version) in version_info]\n\n version_json = json.dumps(versions).encode(\"utf8\")\n info = tarfile.TarInfo(name=\"versions.json\")\n info.size = len(version_json)\n export.tarfile.addfile(info, io.BytesIO(version_json))\n\n\ndef export_artifacts(export, artifacts):\n \"\"\"\n Export a set of Artifacts, ArtifactResources, and RepositoryResources\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported\n\n Raises:\n ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting\n \"\"\"\n data = dict(message=\"Exporting Artifacts\", code=\"export.artifacts\", total=len(artifacts))\n with ProgressReport(**data) as pb:\n for artifact in pb.iter(artifacts):\n dest = artifact.file.name\n if settings.DEFAULT_FILE_STORAGE != \"pulpcore.app.models.storage.FileSystem\":\n with tempfile.TemporaryDirectory() as temp_dir:\n with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file:\n temp_file.write(artifact.file.read())\n temp_file.flush()\n artifact.file.close()\n export.tarfile.add(temp_file.name, dest)\n else:\n export.tarfile.add(artifact.file.path, dest)\n\n resource = ArtifactResource()\n resource.queryset = artifacts\n _write_export(export.tarfile, resource)\n\n resource = RepositoryResource()\n resource.queryset = Repository.objects.filter(pk__in=export.exporter.repositories.all())\n _write_export(export.tarfile, resource)\n\n\ndef export_content(export, repository_version):\n \"\"\"\n Export db-content, and the db-content of the owning repositories\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n repository_version (django.db.models.RepositoryVersion): RepositoryVersion being exported\n \"\"\"\n\n def _combine_content_mappings(map1, map2):\n \"\"\"Combine two content mapping dicts into one by combining ids for for each key.\"\"\"\n result = {}\n for key in map1.keys() | map2.keys():\n result[key] = list(set(map1.get(key, []) + map2.get(key, [])))\n return result\n\n dest_dir = os.path.join(\n \"repository-{}_{}\".format(\n str(repository_version.repository.name), repository_version.number\n )\n )\n\n # content mapping is used by repo versions with subrepos (eg distribution tree repos)\n content_mapping = {}\n\n # find and export any ModelResource found in pulp_<repo-type>.app.modelresource\n plugin_name = repository_version.repository.pulp_type.split(\".\")[0]\n cfg = get_plugin_config(plugin_name)\n if cfg.exportable_classes:\n for cls in cfg.exportable_classes:\n resource = cls(repository_version)\n _write_export(export.tarfile, resource, dest_dir)\n\n if hasattr(resource, \"content_mapping\") and resource.content_mapping:\n content_mapping = _combine_content_mappings(\n content_mapping, resource.content_mapping\n )\n\n # Export the connection between content and artifacts\n resource = ContentArtifactResource(repository_version, content_mapping)\n _write_export(export.tarfile, resource, dest_dir)\n\n msg = (\n f\"Exporting content for {plugin_name} \"\n f\"repository-version {repository_version.repository.name}/{repository_version.number}\"\n )\n content_count = repository_version.content.count()\n data = dict(\n message=msg,\n code=\"export.repo.version.content\",\n total=content_count,\n done=content_count,\n state=TASK_STATES.COMPLETED,\n )\n pb = ProgressReport(**data)\n pb.save()\n\n if content_mapping:\n # write the content mapping to tarfile\n cm_json = json.dumps(content_mapping).encode(\"utf8\")\n info = tarfile.TarInfo(name=f\"{dest_dir}/content_mapping.json\")\n info.size = len(cm_json)\n export.tarfile.addfile(info, io.BytesIO(cm_json))\n", "path": "pulpcore/app/importexport.py"}, {"content": "from gettext import gettext as _\nfrom logging import getLogger\nfrom tempfile import NamedTemporaryFile\n\nfrom django.core.files import File\n\nfrom pulpcore.app import files, models\nfrom pulpcore.app.models import CreatedResource\nfrom pulpcore.app.serializers import ArtifactSerializer\n\nlog = getLogger(__name__)\n\n\ndef commit(upload_id, sha256):\n \"\"\"\n Commit the upload and turn it into an artifact.\n\n Args:\n upload_id (int): The upload primary key\n sha256 (str): The checksum for the uploaded file\n \"\"\"\n try:\n upload = models.Upload.objects.get(pk=upload_id)\n except models.Upload.DoesNotExist:\n log.info(_(\"The upload was not found. Nothing to do.\"))\n return\n\n chunks = models.UploadChunk.objects.filter(upload=upload).order_by(\"offset\")\n with NamedTemporaryFile(\"ab\") as temp_file:\n for chunk in chunks:\n temp_file.write(chunk.file.read())\n chunk.file.close()\n temp_file.flush()\n\n file = files.PulpTemporaryUploadedFile.from_file(File(open(temp_file.name, \"rb\")))\n\n data = {\"file\": file, \"sha256\": sha256}\n serializer = ArtifactSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n artifact = serializer.save()\n\n resource = CreatedResource(content_object=artifact)\n resource.save()\n\n # delete the upload since it can't be reused to create another artifact\n upload.delete()\n", "path": "pulpcore/app/tasks/upload.py"}]} | 3,107 | 222 |
gh_patches_debug_6277 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-869 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Commands and \n after aren't processed by CommandHandler
<!--
Thanks for reporting issues of python-telegram-bot!
Use this template to notify us if you found a bug, or if you want to request a new feature.
If you're looking for help with programming your bot using our library, feel free to ask your
questions in out telegram group at: https://t.me/pythontelegrambotgroup
To make it easier for us to help you please enter detailed information below.
Please note, we only support the latest version of python-telegram-bot and
master branch. Please make sure to upgrade & recreate the issue on the latest
version prior to opening an issue.
-->
### Steps to reproduce
1. Make a bot with a CommandHandler
2. Send a command with \n after it without space! Like this:
```
/feedback
some text
```
### Expected behaviour
Command should not be ignored...
### Actual behaviour
...but it is ignored instead
### Configuration
**Operating System:**
Gentoo
**Version of Python, python-telegram-bot & dependencies:**
``$ python -m telegram``
```
Traceback (most recent call last):
File "/usr/lib64/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib64/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/kisik21/Projects/Nadeko/.virtualenv/lib64/python3.6/site-packages/telegram/__main__.py", line 3, in <module>
import urllib3
ModuleNotFoundError: No module named 'urllib3'
```
Okay, I installed urllib3...
```
python-telegram-bot 6.1.0
urllib3 1.22
certifi 2017.04.17
future 0.16.0
Python 3.6.1 (default, Sep 25 2017, 22:51:25) [GCC 5.4.0]
```
### Logs
Don't think this applies here.
The issue is probably the CommandHandler - I read the code, it thinks of \n as a part of command string. We need to strip these while detecting command names, I think...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/ext/commandhandler.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2017
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains the CommandHandler class."""
20 import warnings
21
22 from future.utils import string_types
23
24 from .handler import Handler
25 from telegram import Update
26
27
28 class CommandHandler(Handler):
29 """Handler class to handle Telegram commands.
30
31 Commands are Telegram messages that start with ``/``, optionally followed by an ``@`` and the
32 bot's name and/or some additional text.
33
34 Attributes:
35 command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler
36 should listen for.
37 callback (:obj:`callable`): The callback function for this handler.
38 filters (:class:`telegram.ext.BaseFilter`): Optional. Only allow updates with these
39 Filters.
40 allow_edited (:obj:`bool`): Optional. Determines Whether the handler should also accept
41 edited messages.
42 pass_args (:obj:`bool`): Optional. Determines whether the handler should be passed
43 ``args``.
44 pass_update_queue (:obj:`bool`): Optional. Determines whether ``update_queue`` will be
45 passed to the callback function.
46 pass_job_queue (:obj:`bool`): Optional. Determines whether ``job_queue`` will be passed to
47 the callback function.
48 pass_user_data (:obj:`bool`): Optional. Determines whether ``user_data`` will be passed to
49 the callback function.
50 pass_chat_data (:obj:`bool`): Optional. Determines whether ``chat_data`` will be passed to
51 the callback function.
52
53 Note:
54 :attr:`pass_user_data` and :attr:`pass_chat_data` determine whether a ``dict`` you
55 can use to keep any data in will be sent to the :attr:`callback` function.. Related to
56 either the user or the chat that the update was sent in. For each update from the same user
57 or in the same chat, it will be the same ``dict``.
58
59 Args:
60 command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler
61 should listen for.
62 callback (:obj:`callable`): A function that takes ``bot, update`` as positional arguments.
63 It will be called when the :attr:`check_update` has determined that an update should be
64 processed by this handler.
65 filters (:class:`telegram.ext.BaseFilter`, optional): A filter inheriting from
66 :class:`telegram.ext.filters.BaseFilter`. Standard filters can be found in
67 :class:`telegram.ext.filters.Filters`. Filters can be combined using bitwise
68 operators (& for and, | for or, ~ for not).
69 allow_edited (:obj:`bool`, optional): Determines whether the handler should also accept
70 edited messages. Default is ``False``.
71 pass_args (:obj:`bool`, optional): Determines whether the handler should be passed the
72 arguments passed to the command as a keyword argument called ``args``. It will contain
73 a list of strings, which is the text following the command split on single or
74 consecutive whitespace characters. Default is ``False``
75 pass_update_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called
76 ``update_queue`` will be passed to the callback function. It will be the ``Queue``
77 instance used by the :class:`telegram.ext.Updater` and :class:`telegram.ext.Dispatcher`
78 that contains new updates which can be used to insert updates. Default is ``False``.
79 pass_job_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called
80 ``job_queue`` will be passed to the callback function. It will be a
81 :class:`telegram.ext.JobQueue` instance created by the :class:`telegram.ext.Updater`
82 which can be used to schedule new jobs. Default is ``False``.
83 pass_user_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called
84 ``user_data`` will be passed to the callback function. Default is ``False``.
85 pass_chat_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called
86 ``chat_data`` will be passed to the callback function. Default is ``False``.
87
88 """
89
90 def __init__(self,
91 command,
92 callback,
93 filters=None,
94 allow_edited=False,
95 pass_args=False,
96 pass_update_queue=False,
97 pass_job_queue=False,
98 pass_user_data=False,
99 pass_chat_data=False):
100 super(CommandHandler, self).__init__(
101 callback,
102 pass_update_queue=pass_update_queue,
103 pass_job_queue=pass_job_queue,
104 pass_user_data=pass_user_data,
105 pass_chat_data=pass_chat_data)
106
107 if isinstance(command, string_types):
108 self.command = [command.lower()]
109 else:
110 self.command = [x.lower() for x in command]
111 self.filters = filters
112 self.allow_edited = allow_edited
113 self.pass_args = pass_args
114
115 # We put this up here instead of with the rest of checking code
116 # in check_update since we don't wanna spam a ton
117 if isinstance(self.filters, list):
118 warnings.warn('Using a list of filters in MessageHandler is getting '
119 'deprecated, please use bitwise operators (& and |) '
120 'instead. More info: https://git.io/vPTbc.')
121
122 def check_update(self, update):
123 """Determines whether an update should be passed to this handlers :attr:`callback`.
124
125 Args:
126 update (:class:`telegram.Update`): Incoming telegram update.
127
128 Returns:
129 :obj:`bool`
130
131 """
132 if (isinstance(update, Update)
133 and (update.message or update.edited_message and self.allow_edited)):
134 message = update.message or update.edited_message
135
136 if message.text:
137 command = message.text[1:].split(' ')[0].split('@')
138 command.append(
139 message.bot.username) # in case the command was send without a username
140
141 if self.filters is None:
142 res = True
143 elif isinstance(self.filters, list):
144 res = any(func(message) for func in self.filters)
145 else:
146 res = self.filters(message)
147
148 return res and (message.text.startswith('/') and command[0].lower() in self.command
149 and command[1].lower() == message.bot.username.lower())
150 else:
151 return False
152
153 else:
154 return False
155
156 def handle_update(self, update, dispatcher):
157 """Send the update to the :attr:`callback`.
158
159 Args:
160 update (:class:`telegram.Update`): Incoming telegram update.
161 dispatcher (:class:`telegram.ext.Dispatcher`): Dispatcher that originated the Update.
162
163 """
164 optional_args = self.collect_optional_args(dispatcher, update)
165
166 message = update.message or update.edited_message
167
168 if self.pass_args:
169 optional_args['args'] = message.text.split()[1:]
170
171 return self.callback(dispatcher.bot, update, **optional_args)
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/telegram/ext/commandhandler.py b/telegram/ext/commandhandler.py
--- a/telegram/ext/commandhandler.py
+++ b/telegram/ext/commandhandler.py
@@ -134,7 +134,7 @@
message = update.message or update.edited_message
if message.text:
- command = message.text[1:].split(' ')[0].split('@')
+ command = message.text[1:].split(None, 1)[0].split('@')
command.append(
message.bot.username) # in case the command was send without a username
| {"golden_diff": "diff --git a/telegram/ext/commandhandler.py b/telegram/ext/commandhandler.py\n--- a/telegram/ext/commandhandler.py\n+++ b/telegram/ext/commandhandler.py\n@@ -134,7 +134,7 @@\n message = update.message or update.edited_message\n \n if message.text:\n- command = message.text[1:].split(' ')[0].split('@')\n+ command = message.text[1:].split(None, 1)[0].split('@')\n command.append(\n message.bot.username) # in case the command was send without a username\n", "issue": "Commands and \\n after aren't processed by CommandHandler\n<!--\r\nThanks for reporting issues of python-telegram-bot!\r\n\r\nUse this template to notify us if you found a bug, or if you want to request a new feature.\r\nIf you're looking for help with programming your bot using our library, feel free to ask your\r\nquestions in out telegram group at: https://t.me/pythontelegrambotgroup\r\n\r\nTo make it easier for us to help you please enter detailed information below.\r\n\r\nPlease note, we only support the latest version of python-telegram-bot and\r\nmaster branch. Please make sure to upgrade & recreate the issue on the latest\r\nversion prior to opening an issue.\r\n-->\r\n### Steps to reproduce\r\n1. Make a bot with a CommandHandler\r\n\r\n2. Send a command with \\n after it without space! Like this:\r\n```\r\n/feedback\r\nsome text\r\n```\r\n\r\n### Expected behaviour\r\nCommand should not be ignored...\r\n\r\n### Actual behaviour\r\n...but it is ignored instead\r\n\r\n### Configuration\r\n**Operating System:**\r\nGentoo\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\n``$ python -m telegram``\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python3.6/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/lib64/python3.6/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/kisik21/Projects/Nadeko/.virtualenv/lib64/python3.6/site-packages/telegram/__main__.py\", line 3, in <module>\r\n import urllib3\r\nModuleNotFoundError: No module named 'urllib3'\r\n```\r\nOkay, I installed urllib3...\r\n```\r\npython-telegram-bot 6.1.0\r\nurllib3 1.22\r\ncertifi 2017.04.17\r\nfuture 0.16.0\r\nPython 3.6.1 (default, Sep 25 2017, 22:51:25) [GCC 5.4.0]\r\n```\r\n### Logs\r\nDon't think this applies here.\r\n\r\nThe issue is probably the CommandHandler - I read the code, it thinks of \\n as a part of command string. We need to strip these while detecting command names, I think...\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2017\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the CommandHandler class.\"\"\"\nimport warnings\n\nfrom future.utils import string_types\n\nfrom .handler import Handler\nfrom telegram import Update\n\n\nclass CommandHandler(Handler):\n \"\"\"Handler class to handle Telegram commands.\n\n Commands are Telegram messages that start with ``/``, optionally followed by an ``@`` and the\n bot's name and/or some additional text.\n\n Attributes:\n command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler\n should listen for.\n callback (:obj:`callable`): The callback function for this handler.\n filters (:class:`telegram.ext.BaseFilter`): Optional. Only allow updates with these\n Filters.\n allow_edited (:obj:`bool`): Optional. Determines Whether the handler should also accept\n edited messages.\n pass_args (:obj:`bool`): Optional. Determines whether the handler should be passed\n ``args``.\n pass_update_queue (:obj:`bool`): Optional. Determines whether ``update_queue`` will be\n passed to the callback function.\n pass_job_queue (:obj:`bool`): Optional. Determines whether ``job_queue`` will be passed to\n the callback function.\n pass_user_data (:obj:`bool`): Optional. Determines whether ``user_data`` will be passed to\n the callback function.\n pass_chat_data (:obj:`bool`): Optional. Determines whether ``chat_data`` will be passed to\n the callback function.\n\n Note:\n :attr:`pass_user_data` and :attr:`pass_chat_data` determine whether a ``dict`` you\n can use to keep any data in will be sent to the :attr:`callback` function.. Related to\n either the user or the chat that the update was sent in. For each update from the same user\n or in the same chat, it will be the same ``dict``.\n\n Args:\n command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler\n should listen for.\n callback (:obj:`callable`): A function that takes ``bot, update`` as positional arguments.\n It will be called when the :attr:`check_update` has determined that an update should be\n processed by this handler.\n filters (:class:`telegram.ext.BaseFilter`, optional): A filter inheriting from\n :class:`telegram.ext.filters.BaseFilter`. Standard filters can be found in\n :class:`telegram.ext.filters.Filters`. Filters can be combined using bitwise\n operators (& for and, | for or, ~ for not).\n allow_edited (:obj:`bool`, optional): Determines whether the handler should also accept\n edited messages. Default is ``False``.\n pass_args (:obj:`bool`, optional): Determines whether the handler should be passed the\n arguments passed to the command as a keyword argument called ``args``. It will contain\n a list of strings, which is the text following the command split on single or\n consecutive whitespace characters. Default is ``False``\n pass_update_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``update_queue`` will be passed to the callback function. It will be the ``Queue``\n instance used by the :class:`telegram.ext.Updater` and :class:`telegram.ext.Dispatcher`\n that contains new updates which can be used to insert updates. Default is ``False``.\n pass_job_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``job_queue`` will be passed to the callback function. It will be a\n :class:`telegram.ext.JobQueue` instance created by the :class:`telegram.ext.Updater`\n which can be used to schedule new jobs. Default is ``False``.\n pass_user_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``user_data`` will be passed to the callback function. Default is ``False``.\n pass_chat_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``chat_data`` will be passed to the callback function. Default is ``False``.\n\n \"\"\"\n\n def __init__(self,\n command,\n callback,\n filters=None,\n allow_edited=False,\n pass_args=False,\n pass_update_queue=False,\n pass_job_queue=False,\n pass_user_data=False,\n pass_chat_data=False):\n super(CommandHandler, self).__init__(\n callback,\n pass_update_queue=pass_update_queue,\n pass_job_queue=pass_job_queue,\n pass_user_data=pass_user_data,\n pass_chat_data=pass_chat_data)\n\n if isinstance(command, string_types):\n self.command = [command.lower()]\n else:\n self.command = [x.lower() for x in command]\n self.filters = filters\n self.allow_edited = allow_edited\n self.pass_args = pass_args\n\n # We put this up here instead of with the rest of checking code\n # in check_update since we don't wanna spam a ton\n if isinstance(self.filters, list):\n warnings.warn('Using a list of filters in MessageHandler is getting '\n 'deprecated, please use bitwise operators (& and |) '\n 'instead. More info: https://git.io/vPTbc.')\n\n def check_update(self, update):\n \"\"\"Determines whether an update should be passed to this handlers :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update`): Incoming telegram update.\n\n Returns:\n :obj:`bool`\n\n \"\"\"\n if (isinstance(update, Update)\n and (update.message or update.edited_message and self.allow_edited)):\n message = update.message or update.edited_message\n\n if message.text:\n command = message.text[1:].split(' ')[0].split('@')\n command.append(\n message.bot.username) # in case the command was send without a username\n\n if self.filters is None:\n res = True\n elif isinstance(self.filters, list):\n res = any(func(message) for func in self.filters)\n else:\n res = self.filters(message)\n\n return res and (message.text.startswith('/') and command[0].lower() in self.command\n and command[1].lower() == message.bot.username.lower())\n else:\n return False\n\n else:\n return False\n\n def handle_update(self, update, dispatcher):\n \"\"\"Send the update to the :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update`): Incoming telegram update.\n dispatcher (:class:`telegram.ext.Dispatcher`): Dispatcher that originated the Update.\n\n \"\"\"\n optional_args = self.collect_optional_args(dispatcher, update)\n\n message = update.message or update.edited_message\n\n if self.pass_args:\n optional_args['args'] = message.text.split()[1:]\n\n return self.callback(dispatcher.bot, update, **optional_args)\n", "path": "telegram/ext/commandhandler.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2017\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the CommandHandler class.\"\"\"\nimport warnings\n\nfrom future.utils import string_types\n\nfrom .handler import Handler\nfrom telegram import Update\n\n\nclass CommandHandler(Handler):\n \"\"\"Handler class to handle Telegram commands.\n\n Commands are Telegram messages that start with ``/``, optionally followed by an ``@`` and the\n bot's name and/or some additional text.\n\n Attributes:\n command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler\n should listen for.\n callback (:obj:`callable`): The callback function for this handler.\n filters (:class:`telegram.ext.BaseFilter`): Optional. Only allow updates with these\n Filters.\n allow_edited (:obj:`bool`): Optional. Determines Whether the handler should also accept\n edited messages.\n pass_args (:obj:`bool`): Optional. Determines whether the handler should be passed\n ``args``.\n pass_update_queue (:obj:`bool`): Optional. Determines whether ``update_queue`` will be\n passed to the callback function.\n pass_job_queue (:obj:`bool`): Optional. Determines whether ``job_queue`` will be passed to\n the callback function.\n pass_user_data (:obj:`bool`): Optional. Determines whether ``user_data`` will be passed to\n the callback function.\n pass_chat_data (:obj:`bool`): Optional. Determines whether ``chat_data`` will be passed to\n the callback function.\n\n Note:\n :attr:`pass_user_data` and :attr:`pass_chat_data` determine whether a ``dict`` you\n can use to keep any data in will be sent to the :attr:`callback` function.. Related to\n either the user or the chat that the update was sent in. For each update from the same user\n or in the same chat, it will be the same ``dict``.\n\n Args:\n command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler\n should listen for.\n callback (:obj:`callable`): A function that takes ``bot, update`` as positional arguments.\n It will be called when the :attr:`check_update` has determined that an update should be\n processed by this handler.\n filters (:class:`telegram.ext.BaseFilter`, optional): A filter inheriting from\n :class:`telegram.ext.filters.BaseFilter`. Standard filters can be found in\n :class:`telegram.ext.filters.Filters`. Filters can be combined using bitwise\n operators (& for and, | for or, ~ for not).\n allow_edited (:obj:`bool`, optional): Determines whether the handler should also accept\n edited messages. Default is ``False``.\n pass_args (:obj:`bool`, optional): Determines whether the handler should be passed the\n arguments passed to the command as a keyword argument called ``args``. It will contain\n a list of strings, which is the text following the command split on single or\n consecutive whitespace characters. Default is ``False``\n pass_update_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``update_queue`` will be passed to the callback function. It will be the ``Queue``\n instance used by the :class:`telegram.ext.Updater` and :class:`telegram.ext.Dispatcher`\n that contains new updates which can be used to insert updates. Default is ``False``.\n pass_job_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``job_queue`` will be passed to the callback function. It will be a\n :class:`telegram.ext.JobQueue` instance created by the :class:`telegram.ext.Updater`\n which can be used to schedule new jobs. Default is ``False``.\n pass_user_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``user_data`` will be passed to the callback function. Default is ``False``.\n pass_chat_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``chat_data`` will be passed to the callback function. Default is ``False``.\n\n \"\"\"\n\n def __init__(self,\n command,\n callback,\n filters=None,\n allow_edited=False,\n pass_args=False,\n pass_update_queue=False,\n pass_job_queue=False,\n pass_user_data=False,\n pass_chat_data=False):\n super(CommandHandler, self).__init__(\n callback,\n pass_update_queue=pass_update_queue,\n pass_job_queue=pass_job_queue,\n pass_user_data=pass_user_data,\n pass_chat_data=pass_chat_data)\n\n if isinstance(command, string_types):\n self.command = [command.lower()]\n else:\n self.command = [x.lower() for x in command]\n self.filters = filters\n self.allow_edited = allow_edited\n self.pass_args = pass_args\n\n # We put this up here instead of with the rest of checking code\n # in check_update since we don't wanna spam a ton\n if isinstance(self.filters, list):\n warnings.warn('Using a list of filters in MessageHandler is getting '\n 'deprecated, please use bitwise operators (& and |) '\n 'instead. More info: https://git.io/vPTbc.')\n\n def check_update(self, update):\n \"\"\"Determines whether an update should be passed to this handlers :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update`): Incoming telegram update.\n\n Returns:\n :obj:`bool`\n\n \"\"\"\n if (isinstance(update, Update)\n and (update.message or update.edited_message and self.allow_edited)):\n message = update.message or update.edited_message\n\n if message.text:\n command = message.text[1:].split(None, 1)[0].split('@')\n command.append(\n message.bot.username) # in case the command was send without a username\n\n if self.filters is None:\n res = True\n elif isinstance(self.filters, list):\n res = any(func(message) for func in self.filters)\n else:\n res = self.filters(message)\n\n return res and (message.text.startswith('/') and command[0].lower() in self.command\n and command[1].lower() == message.bot.username.lower())\n else:\n return False\n\n else:\n return False\n\n def handle_update(self, update, dispatcher):\n \"\"\"Send the update to the :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update`): Incoming telegram update.\n dispatcher (:class:`telegram.ext.Dispatcher`): Dispatcher that originated the Update.\n\n \"\"\"\n optional_args = self.collect_optional_args(dispatcher, update)\n\n message = update.message or update.edited_message\n\n if self.pass_args:\n optional_args['args'] = message.text.split()[1:]\n\n return self.callback(dispatcher.bot, update, **optional_args)\n", "path": "telegram/ext/commandhandler.py"}]} | 2,854 | 124 |
gh_patches_debug_63356 | rasdani/github-patches | git_diff | getredash__redash-716 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deleting and re-adding widgets to a dashboard breaks it
There's a specific set of steps that has messed up some of our users' dashboards:
1. Create a new dashboard
2. Add multiple widgets to it.
3. Remove all those widgets from the dashboard
4. Re-add some widgets
5. Click the "Edit Dashboard (Name/Layout)" button
6. Click Save without changing anything.
7. Refresh the page
This makes none of the widgets appear and causes the "Layout" array in the admin panel to contain one or more "null" values (depending on how many widgets you added/deleted):

The only way to recover from this state is to manually delete the "null" values through the admin interface.
This is on re:dash version 0.8.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/widgets.py`
Content:
```
1 import json
2
3 from flask import request
4
5 from redash import models
6 from redash.wsgi import api
7 from redash.permissions import require_permission
8 from redash.handlers.base import BaseResource
9
10
11 class WidgetListAPI(BaseResource):
12 @require_permission('edit_dashboard')
13 def post(self):
14 widget_properties = request.get_json(force=True)
15 widget_properties['options'] = json.dumps(widget_properties['options'])
16 widget_properties.pop('id', None)
17 widget_properties['dashboard'] = widget_properties.pop('dashboard_id')
18 widget_properties['visualization'] = widget_properties.pop('visualization_id')
19 widget = models.Widget(**widget_properties)
20 widget.save()
21
22 layout = json.loads(widget.dashboard.layout)
23 new_row = True
24
25 if len(layout) == 0 or widget.width == 2:
26 layout.append([widget.id])
27 elif len(layout[-1]) == 1:
28 neighbour_widget = models.Widget.get(models.Widget.id == layout[-1][0])
29 if neighbour_widget.width == 1:
30 layout[-1].append(widget.id)
31 new_row = False
32 else:
33 layout.append([widget.id])
34 else:
35 layout.append([widget.id])
36
37 widget.dashboard.layout = json.dumps(layout)
38 widget.dashboard.save()
39
40 return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row}
41
42
43 class WidgetAPI(BaseResource):
44 @require_permission('edit_dashboard')
45 def delete(self, widget_id):
46 widget = models.Widget.get(models.Widget.id == widget_id)
47 widget.delete_instance()
48
49 api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')
50 api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py
--- a/redash/handlers/widgets.py
+++ b/redash/handlers/widgets.py
@@ -46,5 +46,7 @@
widget = models.Widget.get(models.Widget.id == widget_id)
widget.delete_instance()
+ return {'layout': widget.dashboard.layout }
+
api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')
api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')
| {"golden_diff": "diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py\n--- a/redash/handlers/widgets.py\n+++ b/redash/handlers/widgets.py\n@@ -46,5 +46,7 @@\n widget = models.Widget.get(models.Widget.id == widget_id)\n widget.delete_instance()\n \n+ return {'layout': widget.dashboard.layout }\n+\n api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')\n api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')\n", "issue": "Deleting and re-adding widgets to a dashboard breaks it\nThere's a specific set of steps that has messed up some of our users' dashboards:\n1. Create a new dashboard\n2. Add multiple widgets to it.\n3. Remove all those widgets from the dashboard\n4. Re-add some widgets\n5. Click the \"Edit Dashboard (Name/Layout)\" button\n6. Click Save without changing anything.\n7. Refresh the page\n\nThis makes none of the widgets appear and causes the \"Layout\" array in the admin panel to contain one or more \"null\" values (depending on how many widgets you added/deleted):\n\n\nThe only way to recover from this state is to manually delete the \"null\" values through the admin interface.\n\nThis is on re:dash version 0.8.2\n\n", "before_files": [{"content": "import json\n\nfrom flask import request\n\nfrom redash import models\nfrom redash.wsgi import api\nfrom redash.permissions import require_permission\nfrom redash.handlers.base import BaseResource\n\n\nclass WidgetListAPI(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n widget_properties = request.get_json(force=True)\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = widget_properties.pop('dashboard_id')\n widget_properties['visualization'] = widget_properties.pop('visualization_id')\n widget = models.Widget(**widget_properties)\n widget.save()\n\n layout = json.loads(widget.dashboard.layout)\n new_row = True\n\n if len(layout) == 0 or widget.width == 2:\n layout.append([widget.id])\n elif len(layout[-1]) == 1:\n neighbour_widget = models.Widget.get(models.Widget.id == layout[-1][0])\n if neighbour_widget.width == 1:\n layout[-1].append(widget.id)\n new_row = False\n else:\n layout.append([widget.id])\n else:\n layout.append([widget.id])\n\n widget.dashboard.layout = json.dumps(layout)\n widget.dashboard.save()\n\n return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row}\n\n\nclass WidgetAPI(BaseResource):\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n widget = models.Widget.get(models.Widget.id == widget_id)\n widget.delete_instance()\n\napi.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')\napi.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')\n", "path": "redash/handlers/widgets.py"}], "after_files": [{"content": "import json\n\nfrom flask import request\n\nfrom redash import models\nfrom redash.wsgi import api\nfrom redash.permissions import require_permission\nfrom redash.handlers.base import BaseResource\n\n\nclass WidgetListAPI(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n widget_properties = request.get_json(force=True)\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = widget_properties.pop('dashboard_id')\n widget_properties['visualization'] = widget_properties.pop('visualization_id')\n widget = models.Widget(**widget_properties)\n widget.save()\n\n layout = json.loads(widget.dashboard.layout)\n new_row = True\n\n if len(layout) == 0 or widget.width == 2:\n layout.append([widget.id])\n elif len(layout[-1]) == 1:\n neighbour_widget = models.Widget.get(models.Widget.id == layout[-1][0])\n if neighbour_widget.width == 1:\n layout[-1].append(widget.id)\n new_row = False\n else:\n layout.append([widget.id])\n else:\n layout.append([widget.id])\n\n widget.dashboard.layout = json.dumps(layout)\n widget.dashboard.save()\n\n return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row}\n\n\nclass WidgetAPI(BaseResource):\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n widget = models.Widget.get(models.Widget.id == widget_id)\n widget.delete_instance()\n\n return {'layout': widget.dashboard.layout }\n\napi.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')\napi.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')\n", "path": "redash/handlers/widgets.py"}]} | 942 | 118 |
gh_patches_debug_1763 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-802 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve push notification form
### Motivation
<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->
### Proposed Solution
<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->
- [x] Make input field to text area
- [x] Show how many characters are remaining
- [x] Rename "Push-Notifications" to "News" / "Nachrichten"
- [x] Change not sent symbol "x" to text "Message not sent yet"
### Additional Context

Files that need some editing for that issue are:
- src/cms/templates/push_notifications/push_notification_form.html (for everything related to the HTML-Input Field)
- src/cms/locale/de/LC_MESSAGES/django.po (for everything that needs some new translation. Search for "Push Notification" and change the german translation to "Nachricht/Nachrichten"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cms/models/push_notifications/push_notification_translation.py`
Content:
```
1 from django.db import models
2 from django.utils import timezone
3 from django.utils.translation import ugettext_lazy as _
4
5 from ..languages.language import Language
6 from .push_notification import PushNotification
7
8
9 class PushNotificationTranslation(models.Model):
10 """
11 Data model representing a push notification translation
12 """
13
14 title = models.CharField(
15 max_length=250,
16 blank=True,
17 verbose_name=_("title"),
18 )
19 text = models.CharField(
20 max_length=250,
21 blank=True,
22 verbose_name=_("content"),
23 )
24 language = models.ForeignKey(
25 Language,
26 on_delete=models.CASCADE,
27 related_name="push_notification_translations",
28 verbose_name=_("language"),
29 )
30 push_notification = models.ForeignKey(
31 PushNotification,
32 on_delete=models.CASCADE,
33 related_name="translations",
34 verbose_name=_("push notification"),
35 )
36 created_date = models.DateTimeField(
37 default=timezone.now,
38 verbose_name=_("creation date"),
39 )
40 last_updated = models.DateTimeField(
41 auto_now=True,
42 verbose_name=_("modification date"),
43 )
44
45 def __str__(self):
46 """
47 This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``PushNotificationTranslation object (id)``.
48 It is used in the Django admin backend and as label for ModelChoiceFields.
49
50 :return: A readable string representation of the event
51 :rtype: str
52 """
53 return self.title
54
55 def __repr__(self):
56 """
57 This overwrites the default Django ``__repr__()`` method which would return ``<PushNotificationTranslation: PushNotificationTranslation object (id)>``.
58 It is used for logging.
59
60 :return: The canonical string representation of the event
61 :rtype: str
62 """
63 return f"<PushNotificationTranslation (id: {self.id}, push_notification_id: {self.push_notification.id}, title: {self.title})>"
64
65 class Meta:
66 #: The verbose name of the model
67 verbose_name = _("push notification translation")
68 #: The plural verbose name of the model
69 verbose_name_plural = _("push notification translations")
70 #: The default permissions for this model
71 default_permissions = ()
72 #: Sets of field names that, taken together, must be unique
73 unique_together = ["push_notification", "language"]
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cms/models/push_notifications/push_notification_translation.py b/src/cms/models/push_notifications/push_notification_translation.py
--- a/src/cms/models/push_notifications/push_notification_translation.py
+++ b/src/cms/models/push_notifications/push_notification_translation.py
@@ -16,7 +16,7 @@
blank=True,
verbose_name=_("title"),
)
- text = models.CharField(
+ text = models.TextField(
max_length=250,
blank=True,
verbose_name=_("content"),
| {"golden_diff": "diff --git a/src/cms/models/push_notifications/push_notification_translation.py b/src/cms/models/push_notifications/push_notification_translation.py\n--- a/src/cms/models/push_notifications/push_notification_translation.py\n+++ b/src/cms/models/push_notifications/push_notification_translation.py\n@@ -16,7 +16,7 @@\n blank=True,\n verbose_name=_(\"title\"),\n )\n- text = models.CharField(\n+ text = models.TextField(\n max_length=250,\n blank=True,\n verbose_name=_(\"content\"),\n", "issue": "Improve push notification form\n### Motivation\r\n<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->\r\n\r\n\r\n### Proposed Solution\r\n<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->\r\n- [x] Make input field to text area\r\n- [x] Show how many characters are remaining\r\n- [x] Rename \"Push-Notifications\" to \"News\" / \"Nachrichten\"\r\n- [x] Change not sent symbol \"x\" to text \"Message not sent yet\"\r\n\r\n\r\n### Additional Context\r\n\r\n\r\nFiles that need some editing for that issue are:\r\n\r\n- src/cms/templates/push_notifications/push_notification_form.html (for everything related to the HTML-Input Field)\r\n- src/cms/locale/de/LC_MESSAGES/django.po (for everything that needs some new translation. Search for \"Push Notification\" and change the german translation to \"Nachricht/Nachrichten\"\r\n\r\n\r\n\n", "before_files": [{"content": "from django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..languages.language import Language\nfrom .push_notification import PushNotification\n\n\nclass PushNotificationTranslation(models.Model):\n \"\"\"\n Data model representing a push notification translation\n \"\"\"\n\n title = models.CharField(\n max_length=250,\n blank=True,\n verbose_name=_(\"title\"),\n )\n text = models.CharField(\n max_length=250,\n blank=True,\n verbose_name=_(\"content\"),\n )\n language = models.ForeignKey(\n Language,\n on_delete=models.CASCADE,\n related_name=\"push_notification_translations\",\n verbose_name=_(\"language\"),\n )\n push_notification = models.ForeignKey(\n PushNotification,\n on_delete=models.CASCADE,\n related_name=\"translations\",\n verbose_name=_(\"push notification\"),\n )\n created_date = models.DateTimeField(\n default=timezone.now,\n verbose_name=_(\"creation date\"),\n )\n last_updated = models.DateTimeField(\n auto_now=True,\n verbose_name=_(\"modification date\"),\n )\n\n def __str__(self):\n \"\"\"\n This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``PushNotificationTranslation object (id)``.\n It is used in the Django admin backend and as label for ModelChoiceFields.\n\n :return: A readable string representation of the event\n :rtype: str\n \"\"\"\n return self.title\n\n def __repr__(self):\n \"\"\"\n This overwrites the default Django ``__repr__()`` method which would return ``<PushNotificationTranslation: PushNotificationTranslation object (id)>``.\n It is used for logging.\n\n :return: The canonical string representation of the event\n :rtype: str\n \"\"\"\n return f\"<PushNotificationTranslation (id: {self.id}, push_notification_id: {self.push_notification.id}, title: {self.title})>\"\n\n class Meta:\n #: The verbose name of the model\n verbose_name = _(\"push notification translation\")\n #: The plural verbose name of the model\n verbose_name_plural = _(\"push notification translations\")\n #: The default permissions for this model\n default_permissions = ()\n #: Sets of field names that, taken together, must be unique\n unique_together = [\"push_notification\", \"language\"]\n", "path": "src/cms/models/push_notifications/push_notification_translation.py"}], "after_files": [{"content": "from django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..languages.language import Language\nfrom .push_notification import PushNotification\n\n\nclass PushNotificationTranslation(models.Model):\n \"\"\"\n Data model representing a push notification translation\n \"\"\"\n\n title = models.CharField(\n max_length=250,\n blank=True,\n verbose_name=_(\"title\"),\n )\n text = models.TextField(\n max_length=250,\n blank=True,\n verbose_name=_(\"content\"),\n )\n language = models.ForeignKey(\n Language,\n on_delete=models.CASCADE,\n related_name=\"push_notification_translations\",\n verbose_name=_(\"language\"),\n )\n push_notification = models.ForeignKey(\n PushNotification,\n on_delete=models.CASCADE,\n related_name=\"translations\",\n verbose_name=_(\"push notification\"),\n )\n created_date = models.DateTimeField(\n default=timezone.now,\n verbose_name=_(\"creation date\"),\n )\n last_updated = models.DateTimeField(\n auto_now=True,\n verbose_name=_(\"modification date\"),\n )\n\n def __str__(self):\n \"\"\"\n This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``PushNotificationTranslation object (id)``.\n It is used in the Django admin backend and as label for ModelChoiceFields.\n\n :return: A readable string representation of the event\n :rtype: str\n \"\"\"\n return self.title\n\n def __repr__(self):\n \"\"\"\n This overwrites the default Django ``__repr__()`` method which would return ``<PushNotificationTranslation: PushNotificationTranslation object (id)>``.\n It is used for logging.\n\n :return: The canonical string representation of the event\n :rtype: str\n \"\"\"\n return f\"<PushNotificationTranslation (id: {self.id}, push_notification_id: {self.push_notification.id}, title: {self.title})>\"\n\n class Meta:\n #: The verbose name of the model\n verbose_name = _(\"push notification translation\")\n #: The plural verbose name of the model\n verbose_name_plural = _(\"push notification translations\")\n #: The default permissions for this model\n default_permissions = ()\n #: Sets of field names that, taken together, must be unique\n unique_together = [\"push_notification\", \"language\"]\n", "path": "src/cms/models/push_notifications/push_notification_translation.py"}]} | 1,182 | 112 |
gh_patches_debug_24893 | rasdani/github-patches | git_diff | Zeroto521__my-data-toolkit-519 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DEP: Drop `inplace` option of `filter_in`
<!--
Thanks for contributing a pull request!
Please follow these standard acronyms to start the commit message:
- ENH: enhancement
- BUG: bug fix
- DOC: documentation
- TYP: type annotations
- TST: addition or modification of tests
- MAINT: maintenance commit (refactoring, typos, etc.)
- BLD: change related to building
- REL: related to releasing
- API: an (incompatible) API change
- DEP: deprecate something, or remove a deprecated object
- DEV: development tool or utility
- REV: revert an earlier commit
- PERF: performance improvement
- BOT: always commit via a bot
- CI: related to CI or CD
- CLN: Code cleanup
-->
- [ ] closes #xxxx
- [x] whatsnew entry
Refer other `filter` methods. There don't have `inplace` option.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dtoolkit/accessor/dataframe/filter_in.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 import pandas as pd
6 from pandas.util._validators import validate_bool_kwarg
7
8 from dtoolkit.accessor._util import get_mask
9 from dtoolkit.accessor._util import isin
10 from dtoolkit.accessor.register import register_dataframe_method
11
12
13 if TYPE_CHECKING:
14 from typing import Iterable
15
16 from dtoolkit._typing import IntOrStr
17
18
19 @register_dataframe_method
20 def filter_in(
21 df: pd.DataFrame,
22 condition: Iterable | pd.Series | pd.DataFrame | dict[str, list[str]],
23 axis: IntOrStr = 0,
24 how: str = "all",
25 inplace: bool = False,
26 ) -> pd.DataFrame | None:
27 """
28 Filter :obj:`~pandas.DataFrame` contents.
29
30 Similar to :meth:`~pandas.DataFrame.isin`, but the return is value not
31 bool.
32
33 Parameters
34 ----------
35 condition : iterable, Series, DataFrame or dict
36 The result will only be true at a location if all the labels match.
37
38 * If ``condition`` is a :obj:`dict`, the keys must be the row/column
39 names, which must match. And ``how`` only works on these gave keys.
40
41 - ``axis`` is 0 or 'index', keys would be recognize as column
42 names.
43 - ``axis`` is 1 or 'columns', keys would be recognize as index
44 names.
45
46 * If ``condition`` is a :obj:`~pandas.Series`, that's the index.
47
48 * If ``condition`` is a :obj:`~pandas.DataFrame`, then both the index
49 and column labels must match.
50
51 axis : {0 or 'index', 1 or 'columns'}, default 0
52 Determine if rows or columns which contain value are filtered.
53
54 * 0, or 'index' : Filter rows which contain value.
55 * 1, or 'columns' : Filter columns which contain value.
56
57 how : {'any', 'all'}, default 'all'
58 Determine if row or column is filtered from :obj:`~pandas.DataFrame`,
59 when we have at least one value or all value.
60
61 * 'any' : If any values are present, filter that row or column.
62 * 'all' : If all values are present, filter that row or column.
63
64 inplace : bool, default is False
65 If True, do operation inplace and return None.
66
67 Returns
68 -------
69 DataFrame
70
71 See Also
72 --------
73 pandas.DataFrame.isin
74 Whether each element in the DataFrame is contained in values.
75 pandas.DataFrame.filter
76 Subset the dataframe rows or columns according to the specified index
77 labels.
78
79 Examples
80 --------
81 >>> import dtoolkit.accessor
82 >>> import pandas as pd
83 >>> df = pd.DataFrame({'num_legs': [2, 4, 2], 'num_wings': [2, 0, 0]},
84 ... index=['falcon', 'dog', 'cat'])
85 >>> df
86 num_legs num_wings
87 falcon 2 2
88 dog 4 0
89 cat 2 0
90
91 When ``condition`` is a list check whether every value in the DataFrame is
92 present in the list (which animals have 0 or 2 legs or wings).
93
94 Filter rows.
95
96 >>> df.filter_in([0, 2])
97 num_legs num_wings
98 falcon 2 2
99 cat 2 0
100
101 Filter columns.
102
103 >>> df.filter_in([0, 2], axis=1)
104 num_wings
105 falcon 2
106 dog 0
107 cat 0
108
109 When ``condition`` is a :obj:`dict`, we can pass values to check for each
110 row/column (depend on ``axis``) separately.
111
112 Filter rows, to check under the column (key) whether contains the value.
113
114 >>> df.filter_in({'num_legs': [2], 'num_wings': [2]})
115 num_legs num_wings
116 falcon 2 2
117
118 Filter columns, to check under the index (key) whether contains the value.
119
120 >>> df.filter_in({'cat': [2]}, axis=1)
121 num_legs
122 falcon 2
123 dog 4
124 cat 2
125
126 When ``values`` is a Series or DataFrame the index and column must match.
127 Note that 'spider' doesn't match based on the number of legs in ``other``.
128
129 >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
130 ... index=['spider', 'falcon'])
131 >>> other
132 num_legs num_wings
133 spider 8 0
134 falcon 2 2
135 >>> df.filter_in(other)
136 num_legs num_wings
137 falcon 2 2
138 """
139
140 inplace = validate_bool_kwarg(inplace, "inplace")
141 axis = df._get_axis_number(axis)
142
143 another_axis = 1 - axis
144
145 mask = isin(df, condition, axis)
146 if isinstance(condition, dict):
147 # 'how' only works on condition's keys
148 names = condition.keys()
149 mask = mask[names] if axis == 0 else mask.loc[names]
150 mask = get_mask(how, mask, another_axis)
151
152 result = df.loc(axis=axis)[mask]
153 if not inplace:
154 return result
155
156 df._update_inplace(result)
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dtoolkit/accessor/dataframe/filter_in.py b/dtoolkit/accessor/dataframe/filter_in.py
--- a/dtoolkit/accessor/dataframe/filter_in.py
+++ b/dtoolkit/accessor/dataframe/filter_in.py
@@ -8,6 +8,7 @@
from dtoolkit.accessor._util import get_mask
from dtoolkit.accessor._util import isin
from dtoolkit.accessor.register import register_dataframe_method
+from dtoolkit.util._decorator import warning
if TYPE_CHECKING:
@@ -17,6 +18,12 @@
@register_dataframe_method
+@warning(
+ "The 'inplace' option of 'dtoolkit.accessor.dataframe.filter_in' is deprecated "
+ "and will be removed in 0.0.16. (Warning added DToolKit 0.0.15)",
+ DeprecationWarning,
+ stacklevel=3,
+)
def filter_in(
df: pd.DataFrame,
condition: Iterable | pd.Series | pd.DataFrame | dict[str, list[str]],
@@ -30,6 +37,10 @@
Similar to :meth:`~pandas.DataFrame.isin`, but the return is value not
bool.
+ .. warning::
+ The ``inplace`` option of ``dtoolkit.accessor.dataframe.filter_in`` is
+ deprecated and will be removed in 0.0.16. (Warning added DToolKit 0.0.15)
+
Parameters
----------
condition : iterable, Series, DataFrame or dict
| {"golden_diff": "diff --git a/dtoolkit/accessor/dataframe/filter_in.py b/dtoolkit/accessor/dataframe/filter_in.py\n--- a/dtoolkit/accessor/dataframe/filter_in.py\n+++ b/dtoolkit/accessor/dataframe/filter_in.py\n@@ -8,6 +8,7 @@\n from dtoolkit.accessor._util import get_mask\n from dtoolkit.accessor._util import isin\n from dtoolkit.accessor.register import register_dataframe_method\n+from dtoolkit.util._decorator import warning\n \n \n if TYPE_CHECKING:\n@@ -17,6 +18,12 @@\n \n \n @register_dataframe_method\n+@warning(\n+ \"The 'inplace' option of 'dtoolkit.accessor.dataframe.filter_in' is deprecated \"\n+ \"and will be removed in 0.0.16. (Warning added DToolKit 0.0.15)\",\n+ DeprecationWarning,\n+ stacklevel=3,\n+)\n def filter_in(\n df: pd.DataFrame,\n condition: Iterable | pd.Series | pd.DataFrame | dict[str, list[str]],\n@@ -30,6 +37,10 @@\n Similar to :meth:`~pandas.DataFrame.isin`, but the return is value not\n bool.\n \n+ .. warning::\n+ The ``inplace`` option of ``dtoolkit.accessor.dataframe.filter_in`` is\n+ deprecated and will be removed in 0.0.16. (Warning added DToolKit 0.0.15)\n+\n Parameters\n ----------\n condition : iterable, Series, DataFrame or dict\n", "issue": "DEP: Drop `inplace` option of `filter_in`\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [ ] closes #xxxx\r\n- [x] whatsnew entry\r\n\r\nRefer other `filter` methods. There don't have `inplace` option.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport pandas as pd\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom dtoolkit.accessor._util import get_mask\nfrom dtoolkit.accessor._util import isin\nfrom dtoolkit.accessor.register import register_dataframe_method\n\n\nif TYPE_CHECKING:\n from typing import Iterable\n\n from dtoolkit._typing import IntOrStr\n\n\n@register_dataframe_method\ndef filter_in(\n df: pd.DataFrame,\n condition: Iterable | pd.Series | pd.DataFrame | dict[str, list[str]],\n axis: IntOrStr = 0,\n how: str = \"all\",\n inplace: bool = False,\n) -> pd.DataFrame | None:\n \"\"\"\n Filter :obj:`~pandas.DataFrame` contents.\n\n Similar to :meth:`~pandas.DataFrame.isin`, but the return is value not\n bool.\n\n Parameters\n ----------\n condition : iterable, Series, DataFrame or dict\n The result will only be true at a location if all the labels match.\n\n * If ``condition`` is a :obj:`dict`, the keys must be the row/column\n names, which must match. And ``how`` only works on these gave keys.\n\n - ``axis`` is 0 or 'index', keys would be recognize as column\n names.\n - ``axis`` is 1 or 'columns', keys would be recognize as index\n names.\n\n * If ``condition`` is a :obj:`~pandas.Series`, that's the index.\n\n * If ``condition`` is a :obj:`~pandas.DataFrame`, then both the index\n and column labels must match.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine if rows or columns which contain value are filtered.\n\n * 0, or 'index' : Filter rows which contain value.\n * 1, or 'columns' : Filter columns which contain value.\n\n how : {'any', 'all'}, default 'all'\n Determine if row or column is filtered from :obj:`~pandas.DataFrame`,\n when we have at least one value or all value.\n\n * 'any' : If any values are present, filter that row or column.\n * 'all' : If all values are present, filter that row or column.\n\n inplace : bool, default is False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n pandas.DataFrame.isin\n Whether each element in the DataFrame is contained in values.\n pandas.DataFrame.filter\n Subset the dataframe rows or columns according to the specified index\n labels.\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> df = pd.DataFrame({'num_legs': [2, 4, 2], 'num_wings': [2, 0, 0]},\n ... index=['falcon', 'dog', 'cat'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n cat 2 0\n\n When ``condition`` is a list check whether every value in the DataFrame is\n present in the list (which animals have 0 or 2 legs or wings).\n\n Filter rows.\n\n >>> df.filter_in([0, 2])\n num_legs num_wings\n falcon 2 2\n cat 2 0\n\n Filter columns.\n\n >>> df.filter_in([0, 2], axis=1)\n num_wings\n falcon 2\n dog 0\n cat 0\n\n When ``condition`` is a :obj:`dict`, we can pass values to check for each\n row/column (depend on ``axis``) separately.\n\n Filter rows, to check under the column (key) whether contains the value.\n\n >>> df.filter_in({'num_legs': [2], 'num_wings': [2]})\n num_legs num_wings\n falcon 2 2\n\n Filter columns, to check under the index (key) whether contains the value.\n\n >>> df.filter_in({'cat': [2]}, axis=1)\n num_legs\n falcon 2\n dog 4\n cat 2\n\n When ``values`` is a Series or DataFrame the index and column must match.\n Note that 'spider' doesn't match based on the number of legs in ``other``.\n\n >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},\n ... index=['spider', 'falcon'])\n >>> other\n num_legs num_wings\n spider 8 0\n falcon 2 2\n >>> df.filter_in(other)\n num_legs num_wings\n falcon 2 2\n \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n axis = df._get_axis_number(axis)\n\n another_axis = 1 - axis\n\n mask = isin(df, condition, axis)\n if isinstance(condition, dict):\n # 'how' only works on condition's keys\n names = condition.keys()\n mask = mask[names] if axis == 0 else mask.loc[names]\n mask = get_mask(how, mask, another_axis)\n\n result = df.loc(axis=axis)[mask]\n if not inplace:\n return result\n\n df._update_inplace(result)\n", "path": "dtoolkit/accessor/dataframe/filter_in.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport pandas as pd\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom dtoolkit.accessor._util import get_mask\nfrom dtoolkit.accessor._util import isin\nfrom dtoolkit.accessor.register import register_dataframe_method\nfrom dtoolkit.util._decorator import warning\n\n\nif TYPE_CHECKING:\n from typing import Iterable\n\n from dtoolkit._typing import IntOrStr\n\n\n@register_dataframe_method\n@warning(\n \"The 'inplace' option of 'dtoolkit.accessor.dataframe.filter_in' is deprecated \"\n \"and will be removed in 0.0.16. (Warning added DToolKit 0.0.15)\",\n DeprecationWarning,\n stacklevel=3,\n)\ndef filter_in(\n df: pd.DataFrame,\n condition: Iterable | pd.Series | pd.DataFrame | dict[str, list[str]],\n axis: IntOrStr = 0,\n how: str = \"all\",\n inplace: bool = False,\n) -> pd.DataFrame | None:\n \"\"\"\n Filter :obj:`~pandas.DataFrame` contents.\n\n Similar to :meth:`~pandas.DataFrame.isin`, but the return is value not\n bool.\n\n .. warning::\n The ``inplace`` option of ``dtoolkit.accessor.dataframe.filter_in`` is\n deprecated and will be removed in 0.0.16. (Warning added DToolKit 0.0.15)\n\n Parameters\n ----------\n condition : iterable, Series, DataFrame or dict\n The result will only be true at a location if all the labels match.\n\n * If ``condition`` is a :obj:`dict`, the keys must be the row/column\n names, which must match. And ``how`` only works on these gave keys.\n\n - ``axis`` is 0 or 'index', keys would be recognize as column\n names.\n - ``axis`` is 1 or 'columns', keys would be recognize as index\n names.\n\n * If ``condition`` is a :obj:`~pandas.Series`, that's the index.\n\n * If ``condition`` is a :obj:`~pandas.DataFrame`, then both the index\n and column labels must match.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine if rows or columns which contain value are filtered.\n\n * 0, or 'index' : Filter rows which contain value.\n * 1, or 'columns' : Filter columns which contain value.\n\n how : {'any', 'all'}, default 'all'\n Determine if row or column is filtered from :obj:`~pandas.DataFrame`,\n when we have at least one value or all value.\n\n * 'any' : If any values are present, filter that row or column.\n * 'all' : If all values are present, filter that row or column.\n\n inplace : bool, default is False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n pandas.DataFrame.isin\n Whether each element in the DataFrame is contained in values.\n pandas.DataFrame.filter\n Subset the dataframe rows or columns according to the specified index\n labels.\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> df = pd.DataFrame({'num_legs': [2, 4, 2], 'num_wings': [2, 0, 0]},\n ... index=['falcon', 'dog', 'cat'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n cat 2 0\n\n When ``condition`` is a list check whether every value in the DataFrame is\n present in the list (which animals have 0 or 2 legs or wings).\n\n Filter rows.\n\n >>> df.filter_in([0, 2])\n num_legs num_wings\n falcon 2 2\n cat 2 0\n\n Filter columns.\n\n >>> df.filter_in([0, 2], axis=1)\n num_wings\n falcon 2\n dog 0\n cat 0\n\n When ``condition`` is a :obj:`dict`, we can pass values to check for each\n row/column (depend on ``axis``) separately.\n\n Filter rows, to check under the column (key) whether contains the value.\n\n >>> df.filter_in({'num_legs': [2], 'num_wings': [2]})\n num_legs num_wings\n falcon 2 2\n\n Filter columns, to check under the index (key) whether contains the value.\n\n >>> df.filter_in({'cat': [2]}, axis=1)\n num_legs\n falcon 2\n dog 4\n cat 2\n\n When ``values`` is a Series or DataFrame the index and column must match.\n Note that 'spider' doesn't match based on the number of legs in ``other``.\n\n >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},\n ... index=['spider', 'falcon'])\n >>> other\n num_legs num_wings\n spider 8 0\n falcon 2 2\n >>> df.filter_in(other)\n num_legs num_wings\n falcon 2 2\n \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n axis = df._get_axis_number(axis)\n\n another_axis = 1 - axis\n\n mask = isin(df, condition, axis)\n if isinstance(condition, dict):\n # 'how' only works on condition's keys\n names = condition.keys()\n mask = mask[names] if axis == 0 else mask.loc[names]\n mask = get_mask(how, mask, another_axis)\n\n result = df.loc(axis=axis)[mask]\n if not inplace:\n return result\n\n df._update_inplace(result)\n", "path": "dtoolkit/accessor/dataframe/filter_in.py"}]} | 2,081 | 346 |
gh_patches_debug_3531 | rasdani/github-patches | git_diff | web2py__web2py-928 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in models/db.py for mail server
Hi,
I found a small typo, which prohibits sending mails in models/db.py line 65:
mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.sender')
should be (smtp.server instead of smtp.sender):
mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server')
thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `applications/welcome/models/db.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 #########################################################################
4 ## This scaffolding model makes your app work on Google App Engine too
5 ## File is released under public domain and you can use without limitations
6 #########################################################################
7
8 ## if SSL/HTTPS is properly configured and you want all HTTP requests to
9 ## be redirected to HTTPS, uncomment the line below:
10 # request.requires_https()
11
12 ## app configuration made easy. Look inside private/appconfig.ini
13 from gluon.contrib.appconfig import AppConfig
14 ## once in production, remove reload=True to gain full speed
15 myconf = AppConfig(reload=True)
16
17
18 if not request.env.web2py_runtime_gae:
19 ## if NOT running on Google App Engine use SQLite or other DB
20 db = DAL(myconf.take('db.uri'), pool_size=myconf.take('db.pool_size', cast=int), check_reserved=['all'])
21 else:
22 ## connect to Google BigTable (optional 'google:datastore://namespace')
23 db = DAL('google:datastore+ndb')
24 ## store sessions and tickets there
25 session.connect(request, response, db=db)
26 ## or store session in Memcache, Redis, etc.
27 ## from gluon.contrib.memdb import MEMDB
28 ## from google.appengine.api.memcache import Client
29 ## session.connect(request, response, db = MEMDB(Client()))
30
31 ## by default give a view/generic.extension to all actions from localhost
32 ## none otherwise. a pattern can be 'controller/function.extension'
33 response.generic_patterns = ['*'] if request.is_local else []
34 ## choose a style for forms
35 response.formstyle = myconf.take('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other
36 response.form_label_separator = myconf.take('forms.separator')
37
38
39 ## (optional) optimize handling of static files
40 # response.optimize_css = 'concat,minify,inline'
41 # response.optimize_js = 'concat,minify,inline'
42 ## (optional) static assets folder versioning
43 # response.static_version = '0.0.0'
44 #########################################################################
45 ## Here is sample code if you need for
46 ## - email capabilities
47 ## - authentication (registration, login, logout, ... )
48 ## - authorization (role based authorization)
49 ## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
50 ## - old style crud actions
51 ## (more options discussed in gluon/tools.py)
52 #########################################################################
53
54 from gluon.tools import Auth, Service, PluginManager
55
56 auth = Auth(db)
57 service = Service()
58 plugins = PluginManager()
59
60 ## create all tables needed by auth if not custom tables
61 auth.define_tables(username=False, signature=False)
62
63 ## configure email
64 mail = auth.settings.mailer
65 mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.sender')
66 mail.settings.sender = myconf.take('smtp.sender')
67 mail.settings.login = myconf.take('smtp.login')
68
69 ## configure auth policy
70 auth.settings.registration_requires_verification = False
71 auth.settings.registration_requires_approval = False
72 auth.settings.reset_password_requires_verification = True
73
74 #########################################################################
75 ## Define your tables below (or better in another model file) for example
76 ##
77 ## >>> db.define_table('mytable',Field('myfield','string'))
78 ##
79 ## Fields can be 'string','text','password','integer','double','boolean'
80 ## 'date','time','datetime','blob','upload', 'reference TABLENAME'
81 ## There is an implicit 'id integer autoincrement' field
82 ## Consult manual for more options, validators, etc.
83 ##
84 ## More API examples for controllers:
85 ##
86 ## >>> db.mytable.insert(myfield='value')
87 ## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)
88 ## >>> for row in rows: print row.id, row.myfield
89 #########################################################################
90
91 ## after defining tables, uncomment below to enable auditing
92 # auth.enable_record_versioning(db)
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/applications/welcome/models/db.py b/applications/welcome/models/db.py
--- a/applications/welcome/models/db.py
+++ b/applications/welcome/models/db.py
@@ -62,7 +62,7 @@
## configure email
mail = auth.settings.mailer
-mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.sender')
+mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server')
mail.settings.sender = myconf.take('smtp.sender')
mail.settings.login = myconf.take('smtp.login')
| {"golden_diff": "diff --git a/applications/welcome/models/db.py b/applications/welcome/models/db.py\n--- a/applications/welcome/models/db.py\n+++ b/applications/welcome/models/db.py\n@@ -62,7 +62,7 @@\n \n ## configure email\n mail = auth.settings.mailer\n-mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.sender')\n+mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server')\n mail.settings.sender = myconf.take('smtp.sender')\n mail.settings.login = myconf.take('smtp.login')\n", "issue": "Typo in models/db.py for mail server\nHi, \n\nI found a small typo, which prohibits sending mails in models/db.py line 65:\nmail.settings.server = 'logging' if request.is_local else myconf.take('smtp.sender')\n\nshould be (smtp.server instead of smtp.sender):\nmail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server') \n\nthanks\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n#########################################################################\n## This scaffolding model makes your app work on Google App Engine too\n## File is released under public domain and you can use without limitations\n#########################################################################\n\n## if SSL/HTTPS is properly configured and you want all HTTP requests to\n## be redirected to HTTPS, uncomment the line below:\n# request.requires_https()\n\n## app configuration made easy. Look inside private/appconfig.ini\nfrom gluon.contrib.appconfig import AppConfig\n## once in production, remove reload=True to gain full speed\nmyconf = AppConfig(reload=True)\n\n\nif not request.env.web2py_runtime_gae:\n ## if NOT running on Google App Engine use SQLite or other DB\n db = DAL(myconf.take('db.uri'), pool_size=myconf.take('db.pool_size', cast=int), check_reserved=['all'])\nelse:\n ## connect to Google BigTable (optional 'google:datastore://namespace')\n db = DAL('google:datastore+ndb')\n ## store sessions and tickets there\n session.connect(request, response, db=db)\n ## or store session in Memcache, Redis, etc.\n ## from gluon.contrib.memdb import MEMDB\n ## from google.appengine.api.memcache import Client\n ## session.connect(request, response, db = MEMDB(Client()))\n\n## by default give a view/generic.extension to all actions from localhost\n## none otherwise. a pattern can be 'controller/function.extension'\nresponse.generic_patterns = ['*'] if request.is_local else []\n## choose a style for forms\nresponse.formstyle = myconf.take('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other\nresponse.form_label_separator = myconf.take('forms.separator')\n\n\n## (optional) optimize handling of static files\n# response.optimize_css = 'concat,minify,inline'\n# response.optimize_js = 'concat,minify,inline'\n## (optional) static assets folder versioning\n# response.static_version = '0.0.0'\n#########################################################################\n## Here is sample code if you need for\n## - email capabilities\n## - authentication (registration, login, logout, ... )\n## - authorization (role based authorization)\n## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)\n## - old style crud actions\n## (more options discussed in gluon/tools.py)\n#########################################################################\n\nfrom gluon.tools import Auth, Service, PluginManager\n\nauth = Auth(db)\nservice = Service()\nplugins = PluginManager()\n\n## create all tables needed by auth if not custom tables\nauth.define_tables(username=False, signature=False)\n\n## configure email\nmail = auth.settings.mailer\nmail.settings.server = 'logging' if request.is_local else myconf.take('smtp.sender')\nmail.settings.sender = myconf.take('smtp.sender')\nmail.settings.login = myconf.take('smtp.login')\n\n## configure auth policy\nauth.settings.registration_requires_verification = False\nauth.settings.registration_requires_approval = False\nauth.settings.reset_password_requires_verification = True\n\n#########################################################################\n## Define your tables below (or better in another model file) for example\n##\n## >>> db.define_table('mytable',Field('myfield','string'))\n##\n## Fields can be 'string','text','password','integer','double','boolean'\n## 'date','time','datetime','blob','upload', 'reference TABLENAME'\n## There is an implicit 'id integer autoincrement' field\n## Consult manual for more options, validators, etc.\n##\n## More API examples for controllers:\n##\n## >>> db.mytable.insert(myfield='value')\n## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)\n## >>> for row in rows: print row.id, row.myfield\n#########################################################################\n\n## after defining tables, uncomment below to enable auditing\n# auth.enable_record_versioning(db)\n", "path": "applications/welcome/models/db.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n#########################################################################\n## This scaffolding model makes your app work on Google App Engine too\n## File is released under public domain and you can use without limitations\n#########################################################################\n\n## if SSL/HTTPS is properly configured and you want all HTTP requests to\n## be redirected to HTTPS, uncomment the line below:\n# request.requires_https()\n\n## app configuration made easy. Look inside private/appconfig.ini\nfrom gluon.contrib.appconfig import AppConfig\n## once in production, remove reload=True to gain full speed\nmyconf = AppConfig(reload=True)\n\n\nif not request.env.web2py_runtime_gae:\n ## if NOT running on Google App Engine use SQLite or other DB\n db = DAL(myconf.take('db.uri'), pool_size=myconf.take('db.pool_size', cast=int), check_reserved=['all'])\nelse:\n ## connect to Google BigTable (optional 'google:datastore://namespace')\n db = DAL('google:datastore+ndb')\n ## store sessions and tickets there\n session.connect(request, response, db=db)\n ## or store session in Memcache, Redis, etc.\n ## from gluon.contrib.memdb import MEMDB\n ## from google.appengine.api.memcache import Client\n ## session.connect(request, response, db = MEMDB(Client()))\n\n## by default give a view/generic.extension to all actions from localhost\n## none otherwise. a pattern can be 'controller/function.extension'\nresponse.generic_patterns = ['*'] if request.is_local else []\n## choose a style for forms\nresponse.formstyle = myconf.take('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other\nresponse.form_label_separator = myconf.take('forms.separator')\n\n\n## (optional) optimize handling of static files\n# response.optimize_css = 'concat,minify,inline'\n# response.optimize_js = 'concat,minify,inline'\n## (optional) static assets folder versioning\n# response.static_version = '0.0.0'\n#########################################################################\n## Here is sample code if you need for\n## - email capabilities\n## - authentication (registration, login, logout, ... )\n## - authorization (role based authorization)\n## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)\n## - old style crud actions\n## (more options discussed in gluon/tools.py)\n#########################################################################\n\nfrom gluon.tools import Auth, Service, PluginManager\n\nauth = Auth(db)\nservice = Service()\nplugins = PluginManager()\n\n## create all tables needed by auth if not custom tables\nauth.define_tables(username=False, signature=False)\n\n## configure email\nmail = auth.settings.mailer\nmail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server')\nmail.settings.sender = myconf.take('smtp.sender')\nmail.settings.login = myconf.take('smtp.login')\n\n## configure auth policy\nauth.settings.registration_requires_verification = False\nauth.settings.registration_requires_approval = False\nauth.settings.reset_password_requires_verification = True\n\n#########################################################################\n## Define your tables below (or better in another model file) for example\n##\n## >>> db.define_table('mytable',Field('myfield','string'))\n##\n## Fields can be 'string','text','password','integer','double','boolean'\n## 'date','time','datetime','blob','upload', 'reference TABLENAME'\n## There is an implicit 'id integer autoincrement' field\n## Consult manual for more options, validators, etc.\n##\n## More API examples for controllers:\n##\n## >>> db.mytable.insert(myfield='value')\n## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)\n## >>> for row in rows: print row.id, row.myfield\n#########################################################################\n\n## after defining tables, uncomment below to enable auditing\n# auth.enable_record_versioning(db)\n", "path": "applications/welcome/models/db.py"}]} | 1,325 | 121 |
gh_patches_debug_13859 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5413 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_299 Fails for Values That Can't be Set
**Describe the issue**
Failing on - Check: CKV_AWS_299: "Ensure DMS S3 defines in-transit encryption"
When the endpoint of a DMS Migration Task has a Endpoint that is S3, there is no option in AWS to specify ssl_mode. AWS does not have an ssl_mode for DMS Endpoints of type S3. Setting this value on extra connections does nothing. Also, setting this in Terraform does nothing. I may be crazy, but I think DMS transfers to S3 uses SSL by default.
**Examples**
```
from __future__ import annotations
from typing import Any
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
from checkov.common.models.enums import CheckCategories
class DMSS3DefinesIntransitEncryption(BaseResourceValueCheck):
def __init__(self) -> None:
name = "Ensure DMS S3 defines in-transit encryption"
id = "CKV_AWS_299"
supported_resources = ("aws_dms_s3_endpoint",)
categories = (CheckCategories.ENCRYPTION,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self) -> str:
return 'ssl_mode'
def get_expected_values(self) -> list[Any]:
return ["require", "verify-ca", "verify-full"]
check = DMSS3DefinesIntransitEncryption()
```
**Version (please complete the following information):**
- Checkov Version 2.3.343
**Additional context**
This is property does nothing in terraform when the aws_dme_enpoint is s3 and I file and issue over there, however, it was closed with no fix.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any
4
5 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
6 from checkov.common.models.enums import CheckCategories
7
8
9 class DMSS3DefinesIntransitEncryption(BaseResourceValueCheck):
10 def __init__(self) -> None:
11 name = "Ensure DMS S3 defines in-transit encryption"
12 id = "CKV_AWS_299"
13 supported_resources = ("aws_dms_s3_endpoint",)
14 categories = (CheckCategories.ENCRYPTION,)
15 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
16
17 def get_inspected_key(self) -> str:
18 return 'ssl_mode'
19
20 def get_expected_values(self) -> list[Any]:
21 return ["require", "verify-ca", "verify-full"]
22
23
24 check = DMSS3DefinesIntransitEncryption()
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py b/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py
deleted file mode 100644
--- a/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from __future__ import annotations
-
-from typing import Any
-
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
-from checkov.common.models.enums import CheckCategories
-
-
-class DMSS3DefinesIntransitEncryption(BaseResourceValueCheck):
- def __init__(self) -> None:
- name = "Ensure DMS S3 defines in-transit encryption"
- id = "CKV_AWS_299"
- supported_resources = ("aws_dms_s3_endpoint",)
- categories = (CheckCategories.ENCRYPTION,)
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
-
- def get_inspected_key(self) -> str:
- return 'ssl_mode'
-
- def get_expected_values(self) -> list[Any]:
- return ["require", "verify-ca", "verify-full"]
-
-
-check = DMSS3DefinesIntransitEncryption()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py b/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py\n+++ /dev/null\n@@ -1,24 +0,0 @@\n-from __future__ import annotations\n-\n-from typing import Any\n-\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.enums import CheckCategories\n-\n-\n-class DMSS3DefinesIntransitEncryption(BaseResourceValueCheck):\n- def __init__(self) -> None:\n- name = \"Ensure DMS S3 defines in-transit encryption\"\n- id = \"CKV_AWS_299\"\n- supported_resources = (\"aws_dms_s3_endpoint\",)\n- categories = (CheckCategories.ENCRYPTION,)\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self) -> str:\n- return 'ssl_mode'\n-\n- def get_expected_values(self) -> list[Any]:\n- return [\"require\", \"verify-ca\", \"verify-full\"]\n-\n-\n-check = DMSS3DefinesIntransitEncryption()\n", "issue": "CKV_AWS_299 Fails for Values That Can't be Set\n**Describe the issue**\r\nFailing on - Check: CKV_AWS_299: \"Ensure DMS S3 defines in-transit encryption\"\r\n\r\nWhen the endpoint of a DMS Migration Task has a Endpoint that is S3, there is no option in AWS to specify ssl_mode. AWS does not have an ssl_mode for DMS Endpoints of type S3. Setting this value on extra connections does nothing. Also, setting this in Terraform does nothing. I may be crazy, but I think DMS transfers to S3 uses SSL by default.\r\n\r\n**Examples**\r\n```\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom typing import Any\r\n\r\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\r\nfrom checkov.common.models.enums import CheckCategories\r\n\r\n\r\nclass DMSS3DefinesIntransitEncryption(BaseResourceValueCheck):\r\n def __init__(self) -> None:\r\n name = \"Ensure DMS S3 defines in-transit encryption\"\r\n id = \"CKV_AWS_299\"\r\n supported_resources = (\"aws_dms_s3_endpoint\",)\r\n categories = (CheckCategories.ENCRYPTION,)\r\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\r\n\r\n def get_inspected_key(self) -> str:\r\n return 'ssl_mode'\r\n\r\n def get_expected_values(self) -> list[Any]:\r\n return [\"require\", \"verify-ca\", \"verify-full\"]\r\n\r\n\r\ncheck = DMSS3DefinesIntransitEncryption()\r\n\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.3.343\r\n\r\n**Additional context**\r\n\r\nThis is property does nothing in terraform when the aws_dme_enpoint is s3 and I file and issue over there, however, it was closed with no fix.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass DMSS3DefinesIntransitEncryption(BaseResourceValueCheck):\n def __init__(self) -> None:\n name = \"Ensure DMS S3 defines in-transit encryption\"\n id = \"CKV_AWS_299\"\n supported_resources = (\"aws_dms_s3_endpoint\",)\n categories = (CheckCategories.ENCRYPTION,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self) -> str:\n return 'ssl_mode'\n\n def get_expected_values(self) -> list[Any]:\n return [\"require\", \"verify-ca\", \"verify-full\"]\n\n\ncheck = DMSS3DefinesIntransitEncryption()\n", "path": "checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py"}], "after_files": [{"content": null, "path": "checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py"}]} | 902 | 296 |
gh_patches_debug_25329 | rasdani/github-patches | git_diff | pypa__pip-3540 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show INSTALLER from PEP376 in pip show
Now that #1646 is implemented, it would interesting to show this information in `pip show`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/commands/show.py`
Content:
```
1 from __future__ import absolute_import
2
3 from email.parser import FeedParser
4 import logging
5 import os
6
7 from pip.basecommand import Command
8 from pip.status_codes import SUCCESS, ERROR
9 from pip._vendor import pkg_resources
10
11
12 logger = logging.getLogger(__name__)
13
14
15 class ShowCommand(Command):
16 """Show information about one or more installed packages."""
17 name = 'show'
18 usage = """
19 %prog [options] <package> ..."""
20 summary = 'Show information about installed packages.'
21
22 def __init__(self, *args, **kw):
23 super(ShowCommand, self).__init__(*args, **kw)
24 self.cmd_opts.add_option(
25 '-f', '--files',
26 dest='files',
27 action='store_true',
28 default=False,
29 help='Show the full list of installed files for each package.')
30
31 self.parser.insert_option_group(0, self.cmd_opts)
32
33 def run(self, options, args):
34 if not args:
35 logger.warning('ERROR: Please provide a package name or names.')
36 return ERROR
37 query = args
38
39 results = search_packages_info(query)
40 if not print_results(results, options.files):
41 return ERROR
42 return SUCCESS
43
44
45 def search_packages_info(query):
46 """
47 Gather details from installed distributions. Print distribution name,
48 version, location, and installed files. Installed files requires a
49 pip generated 'installed-files.txt' in the distributions '.egg-info'
50 directory.
51 """
52 installed = dict(
53 [(p.project_name.lower(), p) for p in pkg_resources.working_set])
54 query_names = [name.lower() for name in query]
55 for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
56 package = {
57 'name': dist.project_name,
58 'version': dist.version,
59 'location': dist.location,
60 'requires': [dep.project_name for dep in dist.requires()],
61 }
62 file_list = None
63 metadata = None
64 if isinstance(dist, pkg_resources.DistInfoDistribution):
65 # RECORDs should be part of .dist-info metadatas
66 if dist.has_metadata('RECORD'):
67 lines = dist.get_metadata_lines('RECORD')
68 paths = [l.split(',')[0] for l in lines]
69 paths = [os.path.join(dist.location, p) for p in paths]
70 file_list = [os.path.relpath(p, dist.location) for p in paths]
71
72 if dist.has_metadata('METADATA'):
73 metadata = dist.get_metadata('METADATA')
74 else:
75 # Otherwise use pip's log for .egg-info's
76 if dist.has_metadata('installed-files.txt'):
77 paths = dist.get_metadata_lines('installed-files.txt')
78 paths = [os.path.join(dist.egg_info, p) for p in paths]
79 file_list = [os.path.relpath(p, dist.location) for p in paths]
80
81 if dist.has_metadata('PKG-INFO'):
82 metadata = dist.get_metadata('PKG-INFO')
83
84 if dist.has_metadata('entry_points.txt'):
85 entry_points = dist.get_metadata_lines('entry_points.txt')
86 package['entry_points'] = entry_points
87
88 # @todo: Should pkg_resources.Distribution have a
89 # `get_pkg_info` method?
90 feed_parser = FeedParser()
91 feed_parser.feed(metadata)
92 pkg_info_dict = feed_parser.close()
93 for key in ('metadata-version', 'summary',
94 'home-page', 'author', 'author-email', 'license'):
95 package[key] = pkg_info_dict.get(key)
96
97 # It looks like FeedParser can not deal with repeated headers
98 classifiers = []
99 for line in metadata.splitlines():
100 if not line:
101 break
102 # Classifier: License :: OSI Approved :: MIT License
103 if line.startswith('Classifier: '):
104 classifiers.append(line[len('Classifier: '):])
105 package['classifiers'] = classifiers
106
107 if file_list:
108 package['files'] = sorted(file_list)
109 yield package
110
111
112 def print_results(distributions, list_all_files):
113 """
114 Print the informations from installed distributions found.
115 """
116 results_printed = False
117 for dist in distributions:
118 results_printed = True
119 logger.info("---")
120 logger.info("Metadata-Version: %s", dist.get('metadata-version'))
121 logger.info("Name: %s", dist['name'])
122 logger.info("Version: %s", dist['version'])
123 logger.info("Summary: %s", dist.get('summary'))
124 logger.info("Home-page: %s", dist.get('home-page'))
125 logger.info("Author: %s", dist.get('author'))
126 logger.info("Author-email: %s", dist.get('author-email'))
127 logger.info("License: %s", dist.get('license'))
128 logger.info("Location: %s", dist['location'])
129 logger.info("Requires: %s", ', '.join(dist['requires']))
130 logger.info("Classifiers:")
131 for classifier in dist['classifiers']:
132 logger.info(" %s", classifier)
133 if list_all_files:
134 logger.info("Files:")
135 if 'files' in dist:
136 for line in dist['files']:
137 logger.info(" %s", line.strip())
138 else:
139 logger.info("Cannot locate installed-files.txt")
140 if 'entry_points' in dist:
141 logger.info("Entry-points:")
142 for line in dist['entry_points']:
143 logger.info(" %s", line.strip())
144 return results_printed
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pip/commands/show.py b/pip/commands/show.py
--- a/pip/commands/show.py
+++ b/pip/commands/show.py
@@ -85,6 +85,14 @@
entry_points = dist.get_metadata_lines('entry_points.txt')
package['entry_points'] = entry_points
+ installer = None
+ if dist.has_metadata('INSTALLER'):
+ for line in dist.get_metadata_lines('INSTALLER'):
+ if line.strip():
+ installer = line.strip()
+ break
+ package['installer'] = installer
+
# @todo: Should pkg_resources.Distribution have a
# `get_pkg_info` method?
feed_parser = FeedParser()
@@ -124,6 +132,8 @@
logger.info("Home-page: %s", dist.get('home-page'))
logger.info("Author: %s", dist.get('author'))
logger.info("Author-email: %s", dist.get('author-email'))
+ if dist['installer'] is not None:
+ logger.info("Installer: %s", dist['installer'])
logger.info("License: %s", dist.get('license'))
logger.info("Location: %s", dist['location'])
logger.info("Requires: %s", ', '.join(dist['requires']))
| {"golden_diff": "diff --git a/pip/commands/show.py b/pip/commands/show.py\n--- a/pip/commands/show.py\n+++ b/pip/commands/show.py\n@@ -85,6 +85,14 @@\n entry_points = dist.get_metadata_lines('entry_points.txt')\n package['entry_points'] = entry_points\n \n+ installer = None\n+ if dist.has_metadata('INSTALLER'):\n+ for line in dist.get_metadata_lines('INSTALLER'):\n+ if line.strip():\n+ installer = line.strip()\n+ break\n+ package['installer'] = installer\n+\n # @todo: Should pkg_resources.Distribution have a\n # `get_pkg_info` method?\n feed_parser = FeedParser()\n@@ -124,6 +132,8 @@\n logger.info(\"Home-page: %s\", dist.get('home-page'))\n logger.info(\"Author: %s\", dist.get('author'))\n logger.info(\"Author-email: %s\", dist.get('author-email'))\n+ if dist['installer'] is not None:\n+ logger.info(\"Installer: %s\", dist['installer'])\n logger.info(\"License: %s\", dist.get('license'))\n logger.info(\"Location: %s\", dist['location'])\n logger.info(\"Requires: %s\", ', '.join(dist['requires']))\n", "issue": "Show INSTALLER from PEP376 in pip show\nNow that #1646 is implemented, it would interesting to show this information in `pip show`.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom email.parser import FeedParser\nimport logging\nimport os\n\nfrom pip.basecommand import Command\nfrom pip.status_codes import SUCCESS, ERROR\nfrom pip._vendor import pkg_resources\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ShowCommand(Command):\n \"\"\"Show information about one or more installed packages.\"\"\"\n name = 'show'\n usage = \"\"\"\n %prog [options] <package> ...\"\"\"\n summary = 'Show information about installed packages.'\n\n def __init__(self, *args, **kw):\n super(ShowCommand, self).__init__(*args, **kw)\n self.cmd_opts.add_option(\n '-f', '--files',\n dest='files',\n action='store_true',\n default=False,\n help='Show the full list of installed files for each package.')\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options, args):\n if not args:\n logger.warning('ERROR: Please provide a package name or names.')\n return ERROR\n query = args\n\n results = search_packages_info(query)\n if not print_results(results, options.files):\n return ERROR\n return SUCCESS\n\n\ndef search_packages_info(query):\n \"\"\"\n Gather details from installed distributions. Print distribution name,\n version, location, and installed files. Installed files requires a\n pip generated 'installed-files.txt' in the distributions '.egg-info'\n directory.\n \"\"\"\n installed = dict(\n [(p.project_name.lower(), p) for p in pkg_resources.working_set])\n query_names = [name.lower() for name in query]\n for dist in [installed[pkg] for pkg in query_names if pkg in installed]:\n package = {\n 'name': dist.project_name,\n 'version': dist.version,\n 'location': dist.location,\n 'requires': [dep.project_name for dep in dist.requires()],\n }\n file_list = None\n metadata = None\n if isinstance(dist, pkg_resources.DistInfoDistribution):\n # RECORDs should be part of .dist-info metadatas\n if dist.has_metadata('RECORD'):\n lines = dist.get_metadata_lines('RECORD')\n paths = [l.split(',')[0] for l in lines]\n paths = [os.path.join(dist.location, p) for p in paths]\n file_list = [os.path.relpath(p, dist.location) for p in paths]\n\n if dist.has_metadata('METADATA'):\n metadata = dist.get_metadata('METADATA')\n else:\n # Otherwise use pip's log for .egg-info's\n if dist.has_metadata('installed-files.txt'):\n paths = dist.get_metadata_lines('installed-files.txt')\n paths = [os.path.join(dist.egg_info, p) for p in paths]\n file_list = [os.path.relpath(p, dist.location) for p in paths]\n\n if dist.has_metadata('PKG-INFO'):\n metadata = dist.get_metadata('PKG-INFO')\n\n if dist.has_metadata('entry_points.txt'):\n entry_points = dist.get_metadata_lines('entry_points.txt')\n package['entry_points'] = entry_points\n\n # @todo: Should pkg_resources.Distribution have a\n # `get_pkg_info` method?\n feed_parser = FeedParser()\n feed_parser.feed(metadata)\n pkg_info_dict = feed_parser.close()\n for key in ('metadata-version', 'summary',\n 'home-page', 'author', 'author-email', 'license'):\n package[key] = pkg_info_dict.get(key)\n\n # It looks like FeedParser can not deal with repeated headers\n classifiers = []\n for line in metadata.splitlines():\n if not line:\n break\n # Classifier: License :: OSI Approved :: MIT License\n if line.startswith('Classifier: '):\n classifiers.append(line[len('Classifier: '):])\n package['classifiers'] = classifiers\n\n if file_list:\n package['files'] = sorted(file_list)\n yield package\n\n\ndef print_results(distributions, list_all_files):\n \"\"\"\n Print the informations from installed distributions found.\n \"\"\"\n results_printed = False\n for dist in distributions:\n results_printed = True\n logger.info(\"---\")\n logger.info(\"Metadata-Version: %s\", dist.get('metadata-version'))\n logger.info(\"Name: %s\", dist['name'])\n logger.info(\"Version: %s\", dist['version'])\n logger.info(\"Summary: %s\", dist.get('summary'))\n logger.info(\"Home-page: %s\", dist.get('home-page'))\n logger.info(\"Author: %s\", dist.get('author'))\n logger.info(\"Author-email: %s\", dist.get('author-email'))\n logger.info(\"License: %s\", dist.get('license'))\n logger.info(\"Location: %s\", dist['location'])\n logger.info(\"Requires: %s\", ', '.join(dist['requires']))\n logger.info(\"Classifiers:\")\n for classifier in dist['classifiers']:\n logger.info(\" %s\", classifier)\n if list_all_files:\n logger.info(\"Files:\")\n if 'files' in dist:\n for line in dist['files']:\n logger.info(\" %s\", line.strip())\n else:\n logger.info(\"Cannot locate installed-files.txt\")\n if 'entry_points' in dist:\n logger.info(\"Entry-points:\")\n for line in dist['entry_points']:\n logger.info(\" %s\", line.strip())\n return results_printed\n", "path": "pip/commands/show.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom email.parser import FeedParser\nimport logging\nimport os\n\nfrom pip.basecommand import Command\nfrom pip.status_codes import SUCCESS, ERROR\nfrom pip._vendor import pkg_resources\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ShowCommand(Command):\n \"\"\"Show information about one or more installed packages.\"\"\"\n name = 'show'\n usage = \"\"\"\n %prog [options] <package> ...\"\"\"\n summary = 'Show information about installed packages.'\n\n def __init__(self, *args, **kw):\n super(ShowCommand, self).__init__(*args, **kw)\n self.cmd_opts.add_option(\n '-f', '--files',\n dest='files',\n action='store_true',\n default=False,\n help='Show the full list of installed files for each package.')\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options, args):\n if not args:\n logger.warning('ERROR: Please provide a package name or names.')\n return ERROR\n query = args\n\n results = search_packages_info(query)\n if not print_results(results, options.files):\n return ERROR\n return SUCCESS\n\n\ndef search_packages_info(query):\n \"\"\"\n Gather details from installed distributions. Print distribution name,\n version, location, and installed files. Installed files requires a\n pip generated 'installed-files.txt' in the distributions '.egg-info'\n directory.\n \"\"\"\n installed = dict(\n [(p.project_name.lower(), p) for p in pkg_resources.working_set])\n query_names = [name.lower() for name in query]\n for dist in [installed[pkg] for pkg in query_names if pkg in installed]:\n package = {\n 'name': dist.project_name,\n 'version': dist.version,\n 'location': dist.location,\n 'requires': [dep.project_name for dep in dist.requires()],\n }\n file_list = None\n metadata = None\n if isinstance(dist, pkg_resources.DistInfoDistribution):\n # RECORDs should be part of .dist-info metadatas\n if dist.has_metadata('RECORD'):\n lines = dist.get_metadata_lines('RECORD')\n paths = [l.split(',')[0] for l in lines]\n paths = [os.path.join(dist.location, p) for p in paths]\n file_list = [os.path.relpath(p, dist.location) for p in paths]\n\n if dist.has_metadata('METADATA'):\n metadata = dist.get_metadata('METADATA')\n else:\n # Otherwise use pip's log for .egg-info's\n if dist.has_metadata('installed-files.txt'):\n paths = dist.get_metadata_lines('installed-files.txt')\n paths = [os.path.join(dist.egg_info, p) for p in paths]\n file_list = [os.path.relpath(p, dist.location) for p in paths]\n\n if dist.has_metadata('PKG-INFO'):\n metadata = dist.get_metadata('PKG-INFO')\n\n if dist.has_metadata('entry_points.txt'):\n entry_points = dist.get_metadata_lines('entry_points.txt')\n package['entry_points'] = entry_points\n\n installer = None\n if dist.has_metadata('INSTALLER'):\n for line in dist.get_metadata_lines('INSTALLER'):\n if line.strip():\n installer = line.strip()\n break\n package['installer'] = installer\n\n # @todo: Should pkg_resources.Distribution have a\n # `get_pkg_info` method?\n feed_parser = FeedParser()\n feed_parser.feed(metadata)\n pkg_info_dict = feed_parser.close()\n for key in ('metadata-version', 'summary',\n 'home-page', 'author', 'author-email', 'license'):\n package[key] = pkg_info_dict.get(key)\n\n # It looks like FeedParser can not deal with repeated headers\n classifiers = []\n for line in metadata.splitlines():\n if not line:\n break\n # Classifier: License :: OSI Approved :: MIT License\n if line.startswith('Classifier: '):\n classifiers.append(line[len('Classifier: '):])\n package['classifiers'] = classifiers\n\n if file_list:\n package['files'] = sorted(file_list)\n yield package\n\n\ndef print_results(distributions, list_all_files):\n \"\"\"\n Print the informations from installed distributions found.\n \"\"\"\n results_printed = False\n for dist in distributions:\n results_printed = True\n logger.info(\"---\")\n logger.info(\"Metadata-Version: %s\", dist.get('metadata-version'))\n logger.info(\"Name: %s\", dist['name'])\n logger.info(\"Version: %s\", dist['version'])\n logger.info(\"Summary: %s\", dist.get('summary'))\n logger.info(\"Home-page: %s\", dist.get('home-page'))\n logger.info(\"Author: %s\", dist.get('author'))\n logger.info(\"Author-email: %s\", dist.get('author-email'))\n if dist['installer'] is not None:\n logger.info(\"Installer: %s\", dist['installer'])\n logger.info(\"License: %s\", dist.get('license'))\n logger.info(\"Location: %s\", dist['location'])\n logger.info(\"Requires: %s\", ', '.join(dist['requires']))\n logger.info(\"Classifiers:\")\n for classifier in dist['classifiers']:\n logger.info(\" %s\", classifier)\n if list_all_files:\n logger.info(\"Files:\")\n if 'files' in dist:\n for line in dist['files']:\n logger.info(\" %s\", line.strip())\n else:\n logger.info(\"Cannot locate installed-files.txt\")\n if 'entry_points' in dist:\n logger.info(\"Entry-points:\")\n for line in dist['entry_points']:\n logger.info(\" %s\", line.strip())\n return results_printed\n", "path": "pip/commands/show.py"}]} | 1,783 | 285 |
gh_patches_debug_10757 | rasdani/github-patches | git_diff | saleor__saleor-13533 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: argument of type 'NoneType' is not iterable
### What are you trying to achieve?
Create a category with `null` passed as `seo` value in the input.
### Steps to reproduce the problem
1. Run the following mutation:
```graphql
mutation CreateCategory($input: CategoryInput!, $parentId: ID) {
categoryCreate(input: $input, parent: $parentId) {
errors {
code
}
category {
id
}
}
}
```
Variables:
```json
{
"input": {
"backgroundImage": null,
"backgroundImageAlt": "Root",
"description": "{\"blocks\":[{\"type\":\"paragraph\", \"data\":\"\"}]}",
"metadata": null,
"name": "Root",
"privateMetadata": null,
"seo": null,
"slug": "root"
},
"parentId": null
}
```
2. It crashes with the following error:
```json
{
"errors": [
{
"message": "argument of type 'NoneType' is not iterable",
"locations": [
{
"line": 2,
"column": 3
}
],
"path": [
"categoryCreate"
],
}
],
"data": {
"categoryCreate": null
},
}
```
API crashes in the `clean_seo_fields` function when `null` is passed as `seo` value.
### What did you expect to happen?
In this case, the API shouldn't crash; it should ignore the SEO value and create the category without it.
### Logs
_No response_
### Environment
Saleor version: 3.14
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/graphql/core/validators/__init__.py`
Content:
```
1 from typing import TYPE_CHECKING, Optional
2 from uuid import UUID
3
4 import graphene
5 from django.core.exceptions import ValidationError
6 from django_prices.utils.formatting import get_currency_fraction
7 from graphene.utils.str_converters import to_camel_case
8 from graphql.error import GraphQLError
9
10 from ....core.utils import generate_unique_slug
11 from ....product.models import ProductVariantChannelListing
12
13 if TYPE_CHECKING:
14 from decimal import Decimal
15
16 from django.db.models import Model
17
18
19 def validate_one_of_args_is_in_mutation(*args, **kwargs):
20 try:
21 validate_one_of_args_is_in_query(*args, **kwargs)
22 except GraphQLError as e:
23 raise ValidationError(str(e), code="graphql_error")
24
25
26 def validate_one_of_args_is_in_query(*args, **kwargs):
27 # split args into a list with 2-element tuples:
28 # [(arg1_name, arg1_value), (arg2_name, arg2_value), ...]
29 splitted_args = [args[i : i + 2] for i in range(0, len(args), 2)] # noqa: E203
30 # filter trueish values from each tuple
31 filter_args = list(filter(lambda item: bool(item[1]) is True, splitted_args))
32 use_camel_case = kwargs.get("use_camel_case")
33
34 if len(filter_args) > 1:
35 if use_camel_case:
36 first_arg = to_camel_case(filter_args[0][0])
37 rest_args = ", ".join(
38 [f"'{to_camel_case(item[0])}'" for item in filter_args[1:]]
39 )
40 else:
41 first_arg = filter_args[0][0]
42 rest_args = ", ".join([f"'{item[0]}'" for item in filter_args[1:]])
43 raise GraphQLError(
44 f"Argument '{first_arg}' cannot be combined with {rest_args}"
45 )
46
47 if not filter_args:
48 if use_camel_case:
49 required_args = ", ".join(
50 [f"'{to_camel_case(item[0])}'" for item in splitted_args]
51 )
52 else:
53 required_args = ", ".join([f"'{item[0]}'" for item in splitted_args])
54 raise GraphQLError(f"At least one of arguments is required: {required_args}.")
55
56
57 def validate_price_precision(
58 value: Optional["Decimal"],
59 currency: str,
60 currency_fractions=None,
61 ):
62 """Validate if price amount does not have too many decimal places.
63
64 Price amount can't have more decimal places than currency allow to.
65 Works only with decimal created from a string.
66 """
67
68 # check no needed when there is no value
69 if not value:
70 return
71
72 if currency_fractions:
73 try:
74 currency_fraction = currency_fractions[currency][0]
75 except KeyError:
76 currency_fraction = currency_fractions["DEFAULT"][0]
77 else:
78 currency_fraction = get_currency_fraction(currency)
79
80 value = value.normalize()
81 if value.as_tuple().exponent < -currency_fraction:
82 raise ValidationError(
83 f"Value cannot have more than {currency_fraction} decimal places."
84 )
85
86
87 def validate_decimal_max_value(value: "Decimal", max_value=10**9):
88 """Validate if price amount is not higher than the limit for precision field.
89
90 Decimal fields in database have value limits.
91 By default its 10^9 for fields with precision 12.
92 """
93 if value >= max_value:
94 raise ValidationError(f"Value must be lower than {max_value}.")
95
96
97 def get_not_available_variants_in_channel(
98 variants_id: set, channel_id: int
99 ) -> tuple[set[int], set[str]]:
100 available_variants = ProductVariantChannelListing.objects.filter(
101 variant__id__in=variants_id,
102 channel_id=channel_id,
103 price_amount__isnull=False,
104 ).values_list("variant_id", flat=True)
105 not_available_variants = variants_id - set(available_variants)
106 not_available_graphql_ids = {
107 graphene.Node.to_global_id("ProductVariant", pk)
108 for pk in not_available_variants
109 }
110 return not_available_variants, not_available_graphql_ids
111
112
113 def validate_variants_available_in_channel(
114 variants_id: set,
115 channel_id: int,
116 error_code: str,
117 ):
118 """Validate available variants in specific channel."""
119 (
120 not_available_variants,
121 not_available_graphql_ids,
122 ) = get_not_available_variants_in_channel(variants_id, channel_id)
123 if not_available_variants:
124 raise ValidationError(
125 {
126 "lines": ValidationError(
127 "Cannot add lines with unavailable variants.",
128 code=error_code,
129 params={"variants": not_available_graphql_ids},
130 )
131 }
132 )
133
134
135 def validate_end_is_after_start(start_date, end_date):
136 """Validate if the end date provided is after start date."""
137
138 # check is not needed if no end date
139 if end_date is None:
140 return
141
142 if start_date > end_date:
143 raise ValidationError("End date cannot be before the start date.")
144
145
146 def validate_slug_and_generate_if_needed(
147 instance: "Model",
148 slugable_field: str,
149 cleaned_input: dict,
150 slug_field_name: str = "slug",
151 ) -> dict:
152 """Validate slug from input and generate in create mutation if is not given."""
153
154 # update mutation - just check if slug value is not empty
155 # _state.adding is True only when it's new not saved instance.
156 if not instance._state.adding:
157 validate_slug_value(cleaned_input)
158 return cleaned_input
159
160 # create mutation - generate slug if slug value is empty
161 slug = cleaned_input.get(slug_field_name)
162 if not slug and slugable_field in cleaned_input:
163 slug = generate_unique_slug(instance, cleaned_input[slugable_field])
164 cleaned_input[slug_field_name] = slug
165 return cleaned_input
166
167
168 def validate_slug_value(cleaned_input, slug_field_name: str = "slug"):
169 if slug_field_name in cleaned_input:
170 slug = cleaned_input[slug_field_name]
171 if not slug:
172 raise ValidationError(
173 f"{slug_field_name.capitalize()} value cannot be blank."
174 )
175
176
177 def clean_seo_fields(data):
178 """Extract and assign seo fields to given dictionary."""
179 seo_fields = data.pop("seo", {})
180
181 if "title" in seo_fields:
182 data["seo_title"] = seo_fields["title"]
183
184 if "description" in seo_fields:
185 data["seo_description"] = seo_fields["description"]
186
187
188 def validate_required_string_field(cleaned_input, field_name: str):
189 """Strip and validate field value."""
190 field_value = cleaned_input.get(field_name)
191 field_value = field_value.strip() if field_value else ""
192 if field_value:
193 cleaned_input[field_name] = field_value
194 else:
195 raise ValidationError(f"{field_name.capitalize()} is required.")
196 return cleaned_input
197
198
199 def validate_if_int_or_uuid(id):
200 result = True
201 try:
202 int(id)
203 except ValueError:
204 try:
205 UUID(id)
206 except (AttributeError, ValueError):
207 result = False
208 return result
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/graphql/core/validators/__init__.py b/saleor/graphql/core/validators/__init__.py
--- a/saleor/graphql/core/validators/__init__.py
+++ b/saleor/graphql/core/validators/__init__.py
@@ -178,11 +178,12 @@
"""Extract and assign seo fields to given dictionary."""
seo_fields = data.pop("seo", {})
- if "title" in seo_fields:
- data["seo_title"] = seo_fields["title"]
+ if seo_fields:
+ if "title" in seo_fields:
+ data["seo_title"] = seo_fields["title"]
- if "description" in seo_fields:
- data["seo_description"] = seo_fields["description"]
+ if "description" in seo_fields:
+ data["seo_description"] = seo_fields["description"]
def validate_required_string_field(cleaned_input, field_name: str):
| {"golden_diff": "diff --git a/saleor/graphql/core/validators/__init__.py b/saleor/graphql/core/validators/__init__.py\n--- a/saleor/graphql/core/validators/__init__.py\n+++ b/saleor/graphql/core/validators/__init__.py\n@@ -178,11 +178,12 @@\n \"\"\"Extract and assign seo fields to given dictionary.\"\"\"\n seo_fields = data.pop(\"seo\", {})\n \n- if \"title\" in seo_fields:\n- data[\"seo_title\"] = seo_fields[\"title\"]\n+ if seo_fields:\n+ if \"title\" in seo_fields:\n+ data[\"seo_title\"] = seo_fields[\"title\"]\n \n- if \"description\" in seo_fields:\n- data[\"seo_description\"] = seo_fields[\"description\"]\n+ if \"description\" in seo_fields:\n+ data[\"seo_description\"] = seo_fields[\"description\"]\n \n \n def validate_required_string_field(cleaned_input, field_name: str):\n", "issue": "Bug: argument of type 'NoneType' is not iterable\n### What are you trying to achieve?\n\nCreate a category with `null` passed as `seo` value in the input.\n\n### Steps to reproduce the problem\n\n1. Run the following mutation:\r\n```graphql\r\nmutation CreateCategory($input: CategoryInput!, $parentId: ID) {\r\n categoryCreate(input: $input, parent: $parentId) {\r\n errors {\r\n code\r\n }\r\n category {\r\n id\r\n }\r\n }\r\n}\r\n```\r\n\r\nVariables:\r\n\r\n```json\r\n{\r\n \"input\": {\r\n \"backgroundImage\": null,\r\n \"backgroundImageAlt\": \"Root\",\r\n \"description\": \"{\\\"blocks\\\":[{\\\"type\\\":\\\"paragraph\\\", \\\"data\\\":\\\"\\\"}]}\",\r\n \"metadata\": null,\r\n \"name\": \"Root\",\r\n \"privateMetadata\": null,\r\n \"seo\": null,\r\n \"slug\": \"root\"\r\n },\r\n \"parentId\": null\r\n}\r\n```\r\n\r\n2. It crashes with the following error:\r\n\r\n```json\r\n{\r\n \"errors\": [\r\n {\r\n \"message\": \"argument of type 'NoneType' is not iterable\",\r\n \"locations\": [\r\n {\r\n \"line\": 2,\r\n \"column\": 3\r\n }\r\n ],\r\n \"path\": [\r\n \"categoryCreate\"\r\n ],\r\n }\r\n ],\r\n \"data\": {\r\n \"categoryCreate\": null\r\n },\r\n}\r\n```\r\n\r\nAPI crashes in the `clean_seo_fields` function when `null` is passed as `seo` value. \r\n\r\n\n\n### What did you expect to happen?\n\nIn this case, the API shouldn't crash; it should ignore the SEO value and create the category without it.\n\n### Logs\n\n_No response_\n\n### Environment\n\nSaleor version: 3.14\r\n\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Optional\nfrom uuid import UUID\n\nimport graphene\nfrom django.core.exceptions import ValidationError\nfrom django_prices.utils.formatting import get_currency_fraction\nfrom graphene.utils.str_converters import to_camel_case\nfrom graphql.error import GraphQLError\n\nfrom ....core.utils import generate_unique_slug\nfrom ....product.models import ProductVariantChannelListing\n\nif TYPE_CHECKING:\n from decimal import Decimal\n\n from django.db.models import Model\n\n\ndef validate_one_of_args_is_in_mutation(*args, **kwargs):\n try:\n validate_one_of_args_is_in_query(*args, **kwargs)\n except GraphQLError as e:\n raise ValidationError(str(e), code=\"graphql_error\")\n\n\ndef validate_one_of_args_is_in_query(*args, **kwargs):\n # split args into a list with 2-element tuples:\n # [(arg1_name, arg1_value), (arg2_name, arg2_value), ...]\n splitted_args = [args[i : i + 2] for i in range(0, len(args), 2)] # noqa: E203\n # filter trueish values from each tuple\n filter_args = list(filter(lambda item: bool(item[1]) is True, splitted_args))\n use_camel_case = kwargs.get(\"use_camel_case\")\n\n if len(filter_args) > 1:\n if use_camel_case:\n first_arg = to_camel_case(filter_args[0][0])\n rest_args = \", \".join(\n [f\"'{to_camel_case(item[0])}'\" for item in filter_args[1:]]\n )\n else:\n first_arg = filter_args[0][0]\n rest_args = \", \".join([f\"'{item[0]}'\" for item in filter_args[1:]])\n raise GraphQLError(\n f\"Argument '{first_arg}' cannot be combined with {rest_args}\"\n )\n\n if not filter_args:\n if use_camel_case:\n required_args = \", \".join(\n [f\"'{to_camel_case(item[0])}'\" for item in splitted_args]\n )\n else:\n required_args = \", \".join([f\"'{item[0]}'\" for item in splitted_args])\n raise GraphQLError(f\"At least one of arguments is required: {required_args}.\")\n\n\ndef validate_price_precision(\n value: Optional[\"Decimal\"],\n currency: str,\n currency_fractions=None,\n):\n \"\"\"Validate if price amount does not have too many decimal places.\n\n Price amount can't have more decimal places than currency allow to.\n Works only with decimal created from a string.\n \"\"\"\n\n # check no needed when there is no value\n if not value:\n return\n\n if currency_fractions:\n try:\n currency_fraction = currency_fractions[currency][0]\n except KeyError:\n currency_fraction = currency_fractions[\"DEFAULT\"][0]\n else:\n currency_fraction = get_currency_fraction(currency)\n\n value = value.normalize()\n if value.as_tuple().exponent < -currency_fraction:\n raise ValidationError(\n f\"Value cannot have more than {currency_fraction} decimal places.\"\n )\n\n\ndef validate_decimal_max_value(value: \"Decimal\", max_value=10**9):\n \"\"\"Validate if price amount is not higher than the limit for precision field.\n\n Decimal fields in database have value limits.\n By default its 10^9 for fields with precision 12.\n \"\"\"\n if value >= max_value:\n raise ValidationError(f\"Value must be lower than {max_value}.\")\n\n\ndef get_not_available_variants_in_channel(\n variants_id: set, channel_id: int\n) -> tuple[set[int], set[str]]:\n available_variants = ProductVariantChannelListing.objects.filter(\n variant__id__in=variants_id,\n channel_id=channel_id,\n price_amount__isnull=False,\n ).values_list(\"variant_id\", flat=True)\n not_available_variants = variants_id - set(available_variants)\n not_available_graphql_ids = {\n graphene.Node.to_global_id(\"ProductVariant\", pk)\n for pk in not_available_variants\n }\n return not_available_variants, not_available_graphql_ids\n\n\ndef validate_variants_available_in_channel(\n variants_id: set,\n channel_id: int,\n error_code: str,\n):\n \"\"\"Validate available variants in specific channel.\"\"\"\n (\n not_available_variants,\n not_available_graphql_ids,\n ) = get_not_available_variants_in_channel(variants_id, channel_id)\n if not_available_variants:\n raise ValidationError(\n {\n \"lines\": ValidationError(\n \"Cannot add lines with unavailable variants.\",\n code=error_code,\n params={\"variants\": not_available_graphql_ids},\n )\n }\n )\n\n\ndef validate_end_is_after_start(start_date, end_date):\n \"\"\"Validate if the end date provided is after start date.\"\"\"\n\n # check is not needed if no end date\n if end_date is None:\n return\n\n if start_date > end_date:\n raise ValidationError(\"End date cannot be before the start date.\")\n\n\ndef validate_slug_and_generate_if_needed(\n instance: \"Model\",\n slugable_field: str,\n cleaned_input: dict,\n slug_field_name: str = \"slug\",\n) -> dict:\n \"\"\"Validate slug from input and generate in create mutation if is not given.\"\"\"\n\n # update mutation - just check if slug value is not empty\n # _state.adding is True only when it's new not saved instance.\n if not instance._state.adding:\n validate_slug_value(cleaned_input)\n return cleaned_input\n\n # create mutation - generate slug if slug value is empty\n slug = cleaned_input.get(slug_field_name)\n if not slug and slugable_field in cleaned_input:\n slug = generate_unique_slug(instance, cleaned_input[slugable_field])\n cleaned_input[slug_field_name] = slug\n return cleaned_input\n\n\ndef validate_slug_value(cleaned_input, slug_field_name: str = \"slug\"):\n if slug_field_name in cleaned_input:\n slug = cleaned_input[slug_field_name]\n if not slug:\n raise ValidationError(\n f\"{slug_field_name.capitalize()} value cannot be blank.\"\n )\n\n\ndef clean_seo_fields(data):\n \"\"\"Extract and assign seo fields to given dictionary.\"\"\"\n seo_fields = data.pop(\"seo\", {})\n\n if \"title\" in seo_fields:\n data[\"seo_title\"] = seo_fields[\"title\"]\n\n if \"description\" in seo_fields:\n data[\"seo_description\"] = seo_fields[\"description\"]\n\n\ndef validate_required_string_field(cleaned_input, field_name: str):\n \"\"\"Strip and validate field value.\"\"\"\n field_value = cleaned_input.get(field_name)\n field_value = field_value.strip() if field_value else \"\"\n if field_value:\n cleaned_input[field_name] = field_value\n else:\n raise ValidationError(f\"{field_name.capitalize()} is required.\")\n return cleaned_input\n\n\ndef validate_if_int_or_uuid(id):\n result = True\n try:\n int(id)\n except ValueError:\n try:\n UUID(id)\n except (AttributeError, ValueError):\n result = False\n return result\n", "path": "saleor/graphql/core/validators/__init__.py"}], "after_files": [{"content": "from typing import TYPE_CHECKING, Optional\nfrom uuid import UUID\n\nimport graphene\nfrom django.core.exceptions import ValidationError\nfrom django_prices.utils.formatting import get_currency_fraction\nfrom graphene.utils.str_converters import to_camel_case\nfrom graphql.error import GraphQLError\n\nfrom ....core.utils import generate_unique_slug\nfrom ....product.models import ProductVariantChannelListing\n\nif TYPE_CHECKING:\n from decimal import Decimal\n\n from django.db.models import Model\n\n\ndef validate_one_of_args_is_in_mutation(*args, **kwargs):\n try:\n validate_one_of_args_is_in_query(*args, **kwargs)\n except GraphQLError as e:\n raise ValidationError(str(e), code=\"graphql_error\")\n\n\ndef validate_one_of_args_is_in_query(*args, **kwargs):\n # split args into a list with 2-element tuples:\n # [(arg1_name, arg1_value), (arg2_name, arg2_value), ...]\n splitted_args = [args[i : i + 2] for i in range(0, len(args), 2)] # noqa: E203\n # filter trueish values from each tuple\n filter_args = list(filter(lambda item: bool(item[1]) is True, splitted_args))\n use_camel_case = kwargs.get(\"use_camel_case\")\n\n if len(filter_args) > 1:\n if use_camel_case:\n first_arg = to_camel_case(filter_args[0][0])\n rest_args = \", \".join(\n [f\"'{to_camel_case(item[0])}'\" for item in filter_args[1:]]\n )\n else:\n first_arg = filter_args[0][0]\n rest_args = \", \".join([f\"'{item[0]}'\" for item in filter_args[1:]])\n raise GraphQLError(\n f\"Argument '{first_arg}' cannot be combined with {rest_args}\"\n )\n\n if not filter_args:\n if use_camel_case:\n required_args = \", \".join(\n [f\"'{to_camel_case(item[0])}'\" for item in splitted_args]\n )\n else:\n required_args = \", \".join([f\"'{item[0]}'\" for item in splitted_args])\n raise GraphQLError(f\"At least one of arguments is required: {required_args}.\")\n\n\ndef validate_price_precision(\n value: Optional[\"Decimal\"],\n currency: str,\n currency_fractions=None,\n):\n \"\"\"Validate if price amount does not have too many decimal places.\n\n Price amount can't have more decimal places than currency allow to.\n Works only with decimal created from a string.\n \"\"\"\n\n # check no needed when there is no value\n if not value:\n return\n\n if currency_fractions:\n try:\n currency_fraction = currency_fractions[currency][0]\n except KeyError:\n currency_fraction = currency_fractions[\"DEFAULT\"][0]\n else:\n currency_fraction = get_currency_fraction(currency)\n\n value = value.normalize()\n if value.as_tuple().exponent < -currency_fraction:\n raise ValidationError(\n f\"Value cannot have more than {currency_fraction} decimal places.\"\n )\n\n\ndef validate_decimal_max_value(value: \"Decimal\", max_value=10**9):\n \"\"\"Validate if price amount is not higher than the limit for precision field.\n\n Decimal fields in database have value limits.\n By default its 10^9 for fields with precision 12.\n \"\"\"\n if value >= max_value:\n raise ValidationError(f\"Value must be lower than {max_value}.\")\n\n\ndef get_not_available_variants_in_channel(\n variants_id: set, channel_id: int\n) -> tuple[set[int], set[str]]:\n available_variants = ProductVariantChannelListing.objects.filter(\n variant__id__in=variants_id,\n channel_id=channel_id,\n price_amount__isnull=False,\n ).values_list(\"variant_id\", flat=True)\n not_available_variants = variants_id - set(available_variants)\n not_available_graphql_ids = {\n graphene.Node.to_global_id(\"ProductVariant\", pk)\n for pk in not_available_variants\n }\n return not_available_variants, not_available_graphql_ids\n\n\ndef validate_variants_available_in_channel(\n variants_id: set,\n channel_id: int,\n error_code: str,\n):\n \"\"\"Validate available variants in specific channel.\"\"\"\n (\n not_available_variants,\n not_available_graphql_ids,\n ) = get_not_available_variants_in_channel(variants_id, channel_id)\n if not_available_variants:\n raise ValidationError(\n {\n \"lines\": ValidationError(\n \"Cannot add lines with unavailable variants.\",\n code=error_code,\n params={\"variants\": not_available_graphql_ids},\n )\n }\n )\n\n\ndef validate_end_is_after_start(start_date, end_date):\n \"\"\"Validate if the end date provided is after start date.\"\"\"\n\n # check is not needed if no end date\n if end_date is None:\n return\n\n if start_date > end_date:\n raise ValidationError(\"End date cannot be before the start date.\")\n\n\ndef validate_slug_and_generate_if_needed(\n instance: \"Model\",\n slugable_field: str,\n cleaned_input: dict,\n slug_field_name: str = \"slug\",\n) -> dict:\n \"\"\"Validate slug from input and generate in create mutation if is not given.\"\"\"\n\n # update mutation - just check if slug value is not empty\n # _state.adding is True only when it's new not saved instance.\n if not instance._state.adding:\n validate_slug_value(cleaned_input)\n return cleaned_input\n\n # create mutation - generate slug if slug value is empty\n slug = cleaned_input.get(slug_field_name)\n if not slug and slugable_field in cleaned_input:\n slug = generate_unique_slug(instance, cleaned_input[slugable_field])\n cleaned_input[slug_field_name] = slug\n return cleaned_input\n\n\ndef validate_slug_value(cleaned_input, slug_field_name: str = \"slug\"):\n if slug_field_name in cleaned_input:\n slug = cleaned_input[slug_field_name]\n if not slug:\n raise ValidationError(\n f\"{slug_field_name.capitalize()} value cannot be blank.\"\n )\n\n\ndef clean_seo_fields(data):\n \"\"\"Extract and assign seo fields to given dictionary.\"\"\"\n seo_fields = data.pop(\"seo\", {})\n\n if seo_fields:\n if \"title\" in seo_fields:\n data[\"seo_title\"] = seo_fields[\"title\"]\n\n if \"description\" in seo_fields:\n data[\"seo_description\"] = seo_fields[\"description\"]\n\n\ndef validate_required_string_field(cleaned_input, field_name: str):\n \"\"\"Strip and validate field value.\"\"\"\n field_value = cleaned_input.get(field_name)\n field_value = field_value.strip() if field_value else \"\"\n if field_value:\n cleaned_input[field_name] = field_value\n else:\n raise ValidationError(f\"{field_name.capitalize()} is required.\")\n return cleaned_input\n\n\ndef validate_if_int_or_uuid(id):\n result = True\n try:\n int(id)\n except ValueError:\n try:\n UUID(id)\n except (AttributeError, ValueError):\n result = False\n return result\n", "path": "saleor/graphql/core/validators/__init__.py"}]} | 2,699 | 208 |
gh_patches_debug_23164 | rasdani/github-patches | git_diff | nonebot__nonebot2-1716 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: 插件加载日志存在歧义
1. 先 success 再报错
2. 当插件模块名内含 `.` 时,实际插件名和模块名不一致
https://github.com/nonebot/nonebot2/blob/1dc02bfe8e9e481b7b22f71fa878463e798f4114/nonebot/plugin/manager.py#L151-L158
3. 这里使用了已经处理过的名字即插件名,日志显示名称会与实际用户提供名称不一致,产生迷惑
https://github.com/nonebot/nonebot2/blob/1dc02bfe8e9e481b7b22f71fa878463e798f4114/nonebot/plugin/manager.py#L165-L170
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nonebot/plugin/manager.py`
Content:
```
1 """本模块实现插件加载流程。
2
3 参考: [import hooks](https://docs.python.org/3/reference/import.html#import-hooks), [PEP302](https://www.python.org/dev/peps/pep-0302/)
4
5 FrontMatter:
6 sidebar_position: 5
7 description: nonebot.plugin.manager 模块
8 """
9 import sys
10 import pkgutil
11 import importlib
12 from pathlib import Path
13 from itertools import chain
14 from types import ModuleType
15 from importlib.abc import MetaPathFinder
16 from importlib.machinery import PathFinder, SourceFileLoader
17 from typing import Set, Dict, List, Iterable, Optional, Sequence
18
19 from nonebot.log import logger
20 from nonebot.utils import escape_tag, path_to_module_name
21
22 from .plugin import Plugin, PluginMetadata
23 from . import (
24 _managers,
25 _new_plugin,
26 _revert_plugin,
27 _current_plugin_chain,
28 _module_name_to_plugin_name,
29 )
30
31
32 class PluginManager:
33 """插件管理器。
34
35 参数:
36 plugins: 独立插件模块名集合。
37 search_path: 插件搜索路径(文件夹)。
38 """
39
40 def __init__(
41 self,
42 plugins: Optional[Iterable[str]] = None,
43 search_path: Optional[Iterable[str]] = None,
44 ):
45 # simple plugin not in search path
46 self.plugins: Set[str] = set(plugins or [])
47 self.search_path: Set[str] = set(search_path or [])
48
49 # cache plugins
50 self._third_party_plugin_names: Dict[str, str] = {}
51 self._searched_plugin_names: Dict[str, Path] = {}
52 self.prepare_plugins()
53
54 def __repr__(self) -> str:
55 return f"PluginManager(plugins={self.plugins}, search_path={self.search_path})"
56
57 @property
58 def third_party_plugins(self) -> Set[str]:
59 """返回所有独立插件名称。"""
60 return set(self._third_party_plugin_names.keys())
61
62 @property
63 def searched_plugins(self) -> Set[str]:
64 """返回已搜索到的插件名称。"""
65 return set(self._searched_plugin_names.keys())
66
67 @property
68 def available_plugins(self) -> Set[str]:
69 """返回当前插件管理器中可用的插件名称。"""
70 return self.third_party_plugins | self.searched_plugins
71
72 def _previous_plugins(self) -> Set[str]:
73 _pre_managers: List[PluginManager]
74 if self in _managers:
75 _pre_managers = _managers[: _managers.index(self)]
76 else:
77 _pre_managers = _managers[:]
78
79 return {
80 *chain.from_iterable(manager.available_plugins for manager in _pre_managers)
81 }
82
83 def prepare_plugins(self) -> Set[str]:
84 """搜索插件并缓存插件名称。"""
85 # get all previous ready to load plugins
86 previous_plugins = self._previous_plugins()
87 searched_plugins: Dict[str, Path] = {}
88 third_party_plugins: Dict[str, str] = {}
89
90 # check third party plugins
91 for plugin in self.plugins:
92 name = _module_name_to_plugin_name(plugin)
93 if name in third_party_plugins or name in previous_plugins:
94 raise RuntimeError(
95 f"Plugin already exists: {name}! Check your plugin name"
96 )
97 third_party_plugins[name] = plugin
98
99 self._third_party_plugin_names = third_party_plugins
100
101 # check plugins in search path
102 for module_info in pkgutil.iter_modules(self.search_path):
103 # ignore if startswith "_"
104 if module_info.name.startswith("_"):
105 continue
106
107 if (
108 module_info.name in searched_plugins
109 or module_info.name in previous_plugins
110 or module_info.name in third_party_plugins
111 ):
112 raise RuntimeError(
113 f"Plugin already exists: {module_info.name}! Check your plugin name"
114 )
115
116 if not (
117 module_spec := module_info.module_finder.find_spec(
118 module_info.name, None
119 )
120 ):
121 continue
122 if not (module_path := module_spec.origin):
123 continue
124 searched_plugins[module_info.name] = Path(module_path).resolve()
125
126 self._searched_plugin_names = searched_plugins
127
128 return self.available_plugins
129
130 def load_plugin(self, name: str) -> Optional[Plugin]:
131 """加载指定插件。
132
133 对于独立插件,可以使用完整插件模块名或者插件名称。
134
135 参数:
136 name: 插件名称。
137 """
138
139 try:
140 if name in self.plugins:
141 module = importlib.import_module(name)
142 elif name in self._third_party_plugin_names:
143 module = importlib.import_module(self._third_party_plugin_names[name])
144 elif name in self._searched_plugin_names:
145 module = importlib.import_module(
146 path_to_module_name(self._searched_plugin_names[name])
147 )
148 else:
149 raise RuntimeError(f"Plugin not found: {name}! Check your plugin name")
150
151 logger.opt(colors=True).success(
152 f'Succeeded to import "<y>{escape_tag(name)}</y>"'
153 )
154 if (plugin := getattr(module, "__plugin__", None)) is None:
155 raise RuntimeError(
156 f"Module {module.__name__} is not loaded as a plugin! "
157 "Make sure not to import it before loading."
158 )
159 return plugin
160 except Exception as e:
161 logger.opt(colors=True, exception=e).error(
162 f'<r><bg #f8bbd0>Failed to import "{escape_tag(name)}"</bg #f8bbd0></r>'
163 )
164
165 def load_all_plugins(self) -> Set[Plugin]:
166 """加载所有可用插件。"""
167
168 return set(
169 filter(None, (self.load_plugin(name) for name in self.available_plugins))
170 )
171
172
173 class PluginFinder(MetaPathFinder):
174 def find_spec(
175 self,
176 fullname: str,
177 path: Optional[Sequence[str]],
178 target: Optional[ModuleType] = None,
179 ):
180 if _managers:
181 module_spec = PathFinder.find_spec(fullname, path, target)
182 if not module_spec:
183 return
184 module_origin = module_spec.origin
185 if not module_origin:
186 return
187 module_path = Path(module_origin).resolve()
188
189 for manager in reversed(_managers):
190 # use path instead of name in case of submodule name conflict
191 if (
192 fullname in manager.plugins
193 or module_path in manager._searched_plugin_names.values()
194 ):
195 module_spec.loader = PluginLoader(manager, fullname, module_origin)
196 return module_spec
197 return
198
199
200 class PluginLoader(SourceFileLoader):
201 def __init__(self, manager: PluginManager, fullname: str, path) -> None:
202 self.manager = manager
203 self.loaded = False
204 super().__init__(fullname, path)
205
206 def create_module(self, spec) -> Optional[ModuleType]:
207 if self.name in sys.modules:
208 self.loaded = True
209 return sys.modules[self.name]
210 # return None to use default module creation
211 return super().create_module(spec)
212
213 def exec_module(self, module: ModuleType) -> None:
214 if self.loaded:
215 return
216
217 # create plugin before executing
218 plugin = _new_plugin(self.name, module, self.manager)
219 setattr(module, "__plugin__", plugin)
220
221 # detect parent plugin before entering current plugin context
222 parent_plugins = _current_plugin_chain.get()
223 for pre_plugin in reversed(parent_plugins):
224 if _managers.index(pre_plugin.manager) < _managers.index(self.manager):
225 plugin.parent_plugin = pre_plugin
226 pre_plugin.sub_plugins.add(plugin)
227 break
228
229 # enter plugin context
230 _plugin_token = _current_plugin_chain.set(parent_plugins + (plugin,))
231
232 try:
233 super().exec_module(module)
234 except Exception:
235 _revert_plugin(plugin)
236 raise
237 finally:
238 # leave plugin context
239 _current_plugin_chain.reset(_plugin_token)
240
241 # get plugin metadata
242 metadata: Optional[PluginMetadata] = getattr(module, "__plugin_meta__", None)
243 plugin.metadata = metadata
244
245 return
246
247
248 sys.meta_path.insert(0, PluginFinder())
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nonebot/plugin/manager.py b/nonebot/plugin/manager.py
--- a/nonebot/plugin/manager.py
+++ b/nonebot/plugin/manager.py
@@ -148,14 +148,21 @@
else:
raise RuntimeError(f"Plugin not found: {name}! Check your plugin name")
- logger.opt(colors=True).success(
- f'Succeeded to import "<y>{escape_tag(name)}</y>"'
- )
- if (plugin := getattr(module, "__plugin__", None)) is None:
+ if (
+ plugin := getattr(module, "__plugin__", None)
+ ) is None or not isinstance(plugin, Plugin):
raise RuntimeError(
f"Module {module.__name__} is not loaded as a plugin! "
"Make sure not to import it before loading."
)
+ logger.opt(colors=True).success(
+ f'Succeeded to load plugin "<y>{escape_tag(plugin.name)}</y>"'
+ + (
+ f' from "<m>{escape_tag(plugin.module_name)}</m>"'
+ if plugin.module_name != plugin.name
+ else ""
+ )
+ )
return plugin
except Exception as e:
logger.opt(colors=True, exception=e).error(
| {"golden_diff": "diff --git a/nonebot/plugin/manager.py b/nonebot/plugin/manager.py\n--- a/nonebot/plugin/manager.py\n+++ b/nonebot/plugin/manager.py\n@@ -148,14 +148,21 @@\n else:\n raise RuntimeError(f\"Plugin not found: {name}! Check your plugin name\")\n \n- logger.opt(colors=True).success(\n- f'Succeeded to import \"<y>{escape_tag(name)}</y>\"'\n- )\n- if (plugin := getattr(module, \"__plugin__\", None)) is None:\n+ if (\n+ plugin := getattr(module, \"__plugin__\", None)\n+ ) is None or not isinstance(plugin, Plugin):\n raise RuntimeError(\n f\"Module {module.__name__} is not loaded as a plugin! \"\n \"Make sure not to import it before loading.\"\n )\n+ logger.opt(colors=True).success(\n+ f'Succeeded to load plugin \"<y>{escape_tag(plugin.name)}</y>\"'\n+ + (\n+ f' from \"<m>{escape_tag(plugin.module_name)}</m>\"'\n+ if plugin.module_name != plugin.name\n+ else \"\"\n+ )\n+ )\n return plugin\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n", "issue": "Bug: \u63d2\u4ef6\u52a0\u8f7d\u65e5\u5fd7\u5b58\u5728\u6b67\u4e49\n1. \u5148 success \u518d\u62a5\u9519\r\n2. \u5f53\u63d2\u4ef6\u6a21\u5757\u540d\u5185\u542b `.` \u65f6\uff0c\u5b9e\u9645\u63d2\u4ef6\u540d\u548c\u6a21\u5757\u540d\u4e0d\u4e00\u81f4\r\n\r\nhttps://github.com/nonebot/nonebot2/blob/1dc02bfe8e9e481b7b22f71fa878463e798f4114/nonebot/plugin/manager.py#L151-L158\r\n\r\n3. \u8fd9\u91cc\u4f7f\u7528\u4e86\u5df2\u7ecf\u5904\u7406\u8fc7\u7684\u540d\u5b57\u5373\u63d2\u4ef6\u540d\uff0c\u65e5\u5fd7\u663e\u793a\u540d\u79f0\u4f1a\u4e0e\u5b9e\u9645\u7528\u6237\u63d0\u4f9b\u540d\u79f0\u4e0d\u4e00\u81f4\uff0c\u4ea7\u751f\u8ff7\u60d1\r\n\r\nhttps://github.com/nonebot/nonebot2/blob/1dc02bfe8e9e481b7b22f71fa878463e798f4114/nonebot/plugin/manager.py#L165-L170\n", "before_files": [{"content": "\"\"\"\u672c\u6a21\u5757\u5b9e\u73b0\u63d2\u4ef6\u52a0\u8f7d\u6d41\u7a0b\u3002\n\n\u53c2\u8003: [import hooks](https://docs.python.org/3/reference/import.html#import-hooks), [PEP302](https://www.python.org/dev/peps/pep-0302/)\n\nFrontMatter:\n sidebar_position: 5\n description: nonebot.plugin.manager \u6a21\u5757\n\"\"\"\nimport sys\nimport pkgutil\nimport importlib\nfrom pathlib import Path\nfrom itertools import chain\nfrom types import ModuleType\nfrom importlib.abc import MetaPathFinder\nfrom importlib.machinery import PathFinder, SourceFileLoader\nfrom typing import Set, Dict, List, Iterable, Optional, Sequence\n\nfrom nonebot.log import logger\nfrom nonebot.utils import escape_tag, path_to_module_name\n\nfrom .plugin import Plugin, PluginMetadata\nfrom . import (\n _managers,\n _new_plugin,\n _revert_plugin,\n _current_plugin_chain,\n _module_name_to_plugin_name,\n)\n\n\nclass PluginManager:\n \"\"\"\u63d2\u4ef6\u7ba1\u7406\u5668\u3002\n\n \u53c2\u6570:\n plugins: \u72ec\u7acb\u63d2\u4ef6\u6a21\u5757\u540d\u96c6\u5408\u3002\n search_path: \u63d2\u4ef6\u641c\u7d22\u8def\u5f84\uff08\u6587\u4ef6\u5939\uff09\u3002\n \"\"\"\n\n def __init__(\n self,\n plugins: Optional[Iterable[str]] = None,\n search_path: Optional[Iterable[str]] = None,\n ):\n # simple plugin not in search path\n self.plugins: Set[str] = set(plugins or [])\n self.search_path: Set[str] = set(search_path or [])\n\n # cache plugins\n self._third_party_plugin_names: Dict[str, str] = {}\n self._searched_plugin_names: Dict[str, Path] = {}\n self.prepare_plugins()\n\n def __repr__(self) -> str:\n return f\"PluginManager(plugins={self.plugins}, search_path={self.search_path})\"\n\n @property\n def third_party_plugins(self) -> Set[str]:\n \"\"\"\u8fd4\u56de\u6240\u6709\u72ec\u7acb\u63d2\u4ef6\u540d\u79f0\u3002\"\"\"\n return set(self._third_party_plugin_names.keys())\n\n @property\n def searched_plugins(self) -> Set[str]:\n \"\"\"\u8fd4\u56de\u5df2\u641c\u7d22\u5230\u7684\u63d2\u4ef6\u540d\u79f0\u3002\"\"\"\n return set(self._searched_plugin_names.keys())\n\n @property\n def available_plugins(self) -> Set[str]:\n \"\"\"\u8fd4\u56de\u5f53\u524d\u63d2\u4ef6\u7ba1\u7406\u5668\u4e2d\u53ef\u7528\u7684\u63d2\u4ef6\u540d\u79f0\u3002\"\"\"\n return self.third_party_plugins | self.searched_plugins\n\n def _previous_plugins(self) -> Set[str]:\n _pre_managers: List[PluginManager]\n if self in _managers:\n _pre_managers = _managers[: _managers.index(self)]\n else:\n _pre_managers = _managers[:]\n\n return {\n *chain.from_iterable(manager.available_plugins for manager in _pre_managers)\n }\n\n def prepare_plugins(self) -> Set[str]:\n \"\"\"\u641c\u7d22\u63d2\u4ef6\u5e76\u7f13\u5b58\u63d2\u4ef6\u540d\u79f0\u3002\"\"\"\n # get all previous ready to load plugins\n previous_plugins = self._previous_plugins()\n searched_plugins: Dict[str, Path] = {}\n third_party_plugins: Dict[str, str] = {}\n\n # check third party plugins\n for plugin in self.plugins:\n name = _module_name_to_plugin_name(plugin)\n if name in third_party_plugins or name in previous_plugins:\n raise RuntimeError(\n f\"Plugin already exists: {name}! Check your plugin name\"\n )\n third_party_plugins[name] = plugin\n\n self._third_party_plugin_names = third_party_plugins\n\n # check plugins in search path\n for module_info in pkgutil.iter_modules(self.search_path):\n # ignore if startswith \"_\"\n if module_info.name.startswith(\"_\"):\n continue\n\n if (\n module_info.name in searched_plugins\n or module_info.name in previous_plugins\n or module_info.name in third_party_plugins\n ):\n raise RuntimeError(\n f\"Plugin already exists: {module_info.name}! Check your plugin name\"\n )\n\n if not (\n module_spec := module_info.module_finder.find_spec(\n module_info.name, None\n )\n ):\n continue\n if not (module_path := module_spec.origin):\n continue\n searched_plugins[module_info.name] = Path(module_path).resolve()\n\n self._searched_plugin_names = searched_plugins\n\n return self.available_plugins\n\n def load_plugin(self, name: str) -> Optional[Plugin]:\n \"\"\"\u52a0\u8f7d\u6307\u5b9a\u63d2\u4ef6\u3002\n\n \u5bf9\u4e8e\u72ec\u7acb\u63d2\u4ef6\uff0c\u53ef\u4ee5\u4f7f\u7528\u5b8c\u6574\u63d2\u4ef6\u6a21\u5757\u540d\u6216\u8005\u63d2\u4ef6\u540d\u79f0\u3002\n\n \u53c2\u6570:\n name: \u63d2\u4ef6\u540d\u79f0\u3002\n \"\"\"\n\n try:\n if name in self.plugins:\n module = importlib.import_module(name)\n elif name in self._third_party_plugin_names:\n module = importlib.import_module(self._third_party_plugin_names[name])\n elif name in self._searched_plugin_names:\n module = importlib.import_module(\n path_to_module_name(self._searched_plugin_names[name])\n )\n else:\n raise RuntimeError(f\"Plugin not found: {name}! Check your plugin name\")\n\n logger.opt(colors=True).success(\n f'Succeeded to import \"<y>{escape_tag(name)}</y>\"'\n )\n if (plugin := getattr(module, \"__plugin__\", None)) is None:\n raise RuntimeError(\n f\"Module {module.__name__} is not loaded as a plugin! \"\n \"Make sure not to import it before loading.\"\n )\n return plugin\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f'<r><bg #f8bbd0>Failed to import \"{escape_tag(name)}\"</bg #f8bbd0></r>'\n )\n\n def load_all_plugins(self) -> Set[Plugin]:\n \"\"\"\u52a0\u8f7d\u6240\u6709\u53ef\u7528\u63d2\u4ef6\u3002\"\"\"\n\n return set(\n filter(None, (self.load_plugin(name) for name in self.available_plugins))\n )\n\n\nclass PluginFinder(MetaPathFinder):\n def find_spec(\n self,\n fullname: str,\n path: Optional[Sequence[str]],\n target: Optional[ModuleType] = None,\n ):\n if _managers:\n module_spec = PathFinder.find_spec(fullname, path, target)\n if not module_spec:\n return\n module_origin = module_spec.origin\n if not module_origin:\n return\n module_path = Path(module_origin).resolve()\n\n for manager in reversed(_managers):\n # use path instead of name in case of submodule name conflict\n if (\n fullname in manager.plugins\n or module_path in manager._searched_plugin_names.values()\n ):\n module_spec.loader = PluginLoader(manager, fullname, module_origin)\n return module_spec\n return\n\n\nclass PluginLoader(SourceFileLoader):\n def __init__(self, manager: PluginManager, fullname: str, path) -> None:\n self.manager = manager\n self.loaded = False\n super().__init__(fullname, path)\n\n def create_module(self, spec) -> Optional[ModuleType]:\n if self.name in sys.modules:\n self.loaded = True\n return sys.modules[self.name]\n # return None to use default module creation\n return super().create_module(spec)\n\n def exec_module(self, module: ModuleType) -> None:\n if self.loaded:\n return\n\n # create plugin before executing\n plugin = _new_plugin(self.name, module, self.manager)\n setattr(module, \"__plugin__\", plugin)\n\n # detect parent plugin before entering current plugin context\n parent_plugins = _current_plugin_chain.get()\n for pre_plugin in reversed(parent_plugins):\n if _managers.index(pre_plugin.manager) < _managers.index(self.manager):\n plugin.parent_plugin = pre_plugin\n pre_plugin.sub_plugins.add(plugin)\n break\n\n # enter plugin context\n _plugin_token = _current_plugin_chain.set(parent_plugins + (plugin,))\n\n try:\n super().exec_module(module)\n except Exception:\n _revert_plugin(plugin)\n raise\n finally:\n # leave plugin context\n _current_plugin_chain.reset(_plugin_token)\n\n # get plugin metadata\n metadata: Optional[PluginMetadata] = getattr(module, \"__plugin_meta__\", None)\n plugin.metadata = metadata\n\n return\n\n\nsys.meta_path.insert(0, PluginFinder())\n", "path": "nonebot/plugin/manager.py"}], "after_files": [{"content": "\"\"\"\u672c\u6a21\u5757\u5b9e\u73b0\u63d2\u4ef6\u52a0\u8f7d\u6d41\u7a0b\u3002\n\n\u53c2\u8003: [import hooks](https://docs.python.org/3/reference/import.html#import-hooks), [PEP302](https://www.python.org/dev/peps/pep-0302/)\n\nFrontMatter:\n sidebar_position: 5\n description: nonebot.plugin.manager \u6a21\u5757\n\"\"\"\nimport sys\nimport pkgutil\nimport importlib\nfrom pathlib import Path\nfrom itertools import chain\nfrom types import ModuleType\nfrom importlib.abc import MetaPathFinder\nfrom importlib.machinery import PathFinder, SourceFileLoader\nfrom typing import Set, Dict, List, Iterable, Optional, Sequence\n\nfrom nonebot.log import logger\nfrom nonebot.utils import escape_tag, path_to_module_name\n\nfrom .plugin import Plugin, PluginMetadata\nfrom . import (\n _managers,\n _new_plugin,\n _revert_plugin,\n _current_plugin_chain,\n _module_name_to_plugin_name,\n)\n\n\nclass PluginManager:\n \"\"\"\u63d2\u4ef6\u7ba1\u7406\u5668\u3002\n\n \u53c2\u6570:\n plugins: \u72ec\u7acb\u63d2\u4ef6\u6a21\u5757\u540d\u96c6\u5408\u3002\n search_path: \u63d2\u4ef6\u641c\u7d22\u8def\u5f84\uff08\u6587\u4ef6\u5939\uff09\u3002\n \"\"\"\n\n def __init__(\n self,\n plugins: Optional[Iterable[str]] = None,\n search_path: Optional[Iterable[str]] = None,\n ):\n # simple plugin not in search path\n self.plugins: Set[str] = set(plugins or [])\n self.search_path: Set[str] = set(search_path or [])\n\n # cache plugins\n self._third_party_plugin_names: Dict[str, str] = {}\n self._searched_plugin_names: Dict[str, Path] = {}\n self.prepare_plugins()\n\n def __repr__(self) -> str:\n return f\"PluginManager(plugins={self.plugins}, search_path={self.search_path})\"\n\n @property\n def third_party_plugins(self) -> Set[str]:\n \"\"\"\u8fd4\u56de\u6240\u6709\u72ec\u7acb\u63d2\u4ef6\u540d\u79f0\u3002\"\"\"\n return set(self._third_party_plugin_names.keys())\n\n @property\n def searched_plugins(self) -> Set[str]:\n \"\"\"\u8fd4\u56de\u5df2\u641c\u7d22\u5230\u7684\u63d2\u4ef6\u540d\u79f0\u3002\"\"\"\n return set(self._searched_plugin_names.keys())\n\n @property\n def available_plugins(self) -> Set[str]:\n \"\"\"\u8fd4\u56de\u5f53\u524d\u63d2\u4ef6\u7ba1\u7406\u5668\u4e2d\u53ef\u7528\u7684\u63d2\u4ef6\u540d\u79f0\u3002\"\"\"\n return self.third_party_plugins | self.searched_plugins\n\n def _previous_plugins(self) -> Set[str]:\n _pre_managers: List[PluginManager]\n if self in _managers:\n _pre_managers = _managers[: _managers.index(self)]\n else:\n _pre_managers = _managers[:]\n\n return {\n *chain.from_iterable(manager.available_plugins for manager in _pre_managers)\n }\n\n def prepare_plugins(self) -> Set[str]:\n \"\"\"\u641c\u7d22\u63d2\u4ef6\u5e76\u7f13\u5b58\u63d2\u4ef6\u540d\u79f0\u3002\"\"\"\n # get all previous ready to load plugins\n previous_plugins = self._previous_plugins()\n searched_plugins: Dict[str, Path] = {}\n third_party_plugins: Dict[str, str] = {}\n\n # check third party plugins\n for plugin in self.plugins:\n name = _module_name_to_plugin_name(plugin)\n if name in third_party_plugins or name in previous_plugins:\n raise RuntimeError(\n f\"Plugin already exists: {name}! Check your plugin name\"\n )\n third_party_plugins[name] = plugin\n\n self._third_party_plugin_names = third_party_plugins\n\n # check plugins in search path\n for module_info in pkgutil.iter_modules(self.search_path):\n # ignore if startswith \"_\"\n if module_info.name.startswith(\"_\"):\n continue\n\n if (\n module_info.name in searched_plugins\n or module_info.name in previous_plugins\n or module_info.name in third_party_plugins\n ):\n raise RuntimeError(\n f\"Plugin already exists: {module_info.name}! Check your plugin name\"\n )\n\n if not (\n module_spec := module_info.module_finder.find_spec(\n module_info.name, None\n )\n ):\n continue\n if not (module_path := module_spec.origin):\n continue\n searched_plugins[module_info.name] = Path(module_path).resolve()\n\n self._searched_plugin_names = searched_plugins\n\n return self.available_plugins\n\n def load_plugin(self, name: str) -> Optional[Plugin]:\n \"\"\"\u52a0\u8f7d\u6307\u5b9a\u63d2\u4ef6\u3002\n\n \u5bf9\u4e8e\u72ec\u7acb\u63d2\u4ef6\uff0c\u53ef\u4ee5\u4f7f\u7528\u5b8c\u6574\u63d2\u4ef6\u6a21\u5757\u540d\u6216\u8005\u63d2\u4ef6\u540d\u79f0\u3002\n\n \u53c2\u6570:\n name: \u63d2\u4ef6\u540d\u79f0\u3002\n \"\"\"\n\n try:\n if name in self.plugins:\n module = importlib.import_module(name)\n elif name in self._third_party_plugin_names:\n module = importlib.import_module(self._third_party_plugin_names[name])\n elif name in self._searched_plugin_names:\n module = importlib.import_module(\n path_to_module_name(self._searched_plugin_names[name])\n )\n else:\n raise RuntimeError(f\"Plugin not found: {name}! Check your plugin name\")\n\n if (\n plugin := getattr(module, \"__plugin__\", None)\n ) is None or not isinstance(plugin, Plugin):\n raise RuntimeError(\n f\"Module {module.__name__} is not loaded as a plugin! \"\n \"Make sure not to import it before loading.\"\n )\n logger.opt(colors=True).success(\n f'Succeeded to load plugin \"<y>{escape_tag(plugin.name)}</y>\"'\n + (\n f' from \"<m>{escape_tag(plugin.module_name)}</m>\"'\n if plugin.module_name != plugin.name\n else \"\"\n )\n )\n return plugin\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f'<r><bg #f8bbd0>Failed to import \"{escape_tag(name)}\"</bg #f8bbd0></r>'\n )\n\n def load_all_plugins(self) -> Set[Plugin]:\n \"\"\"\u52a0\u8f7d\u6240\u6709\u53ef\u7528\u63d2\u4ef6\u3002\"\"\"\n\n return set(\n filter(None, (self.load_plugin(name) for name in self.available_plugins))\n )\n\n\nclass PluginFinder(MetaPathFinder):\n def find_spec(\n self,\n fullname: str,\n path: Optional[Sequence[str]],\n target: Optional[ModuleType] = None,\n ):\n if _managers:\n module_spec = PathFinder.find_spec(fullname, path, target)\n if not module_spec:\n return\n module_origin = module_spec.origin\n if not module_origin:\n return\n module_path = Path(module_origin).resolve()\n\n for manager in reversed(_managers):\n # use path instead of name in case of submodule name conflict\n if (\n fullname in manager.plugins\n or module_path in manager._searched_plugin_names.values()\n ):\n module_spec.loader = PluginLoader(manager, fullname, module_origin)\n return module_spec\n return\n\n\nclass PluginLoader(SourceFileLoader):\n def __init__(self, manager: PluginManager, fullname: str, path) -> None:\n self.manager = manager\n self.loaded = False\n super().__init__(fullname, path)\n\n def create_module(self, spec) -> Optional[ModuleType]:\n if self.name in sys.modules:\n self.loaded = True\n return sys.modules[self.name]\n # return None to use default module creation\n return super().create_module(spec)\n\n def exec_module(self, module: ModuleType) -> None:\n if self.loaded:\n return\n\n # create plugin before executing\n plugin = _new_plugin(self.name, module, self.manager)\n setattr(module, \"__plugin__\", plugin)\n\n # detect parent plugin before entering current plugin context\n parent_plugins = _current_plugin_chain.get()\n for pre_plugin in reversed(parent_plugins):\n if _managers.index(pre_plugin.manager) < _managers.index(self.manager):\n plugin.parent_plugin = pre_plugin\n pre_plugin.sub_plugins.add(plugin)\n break\n\n # enter plugin context\n _plugin_token = _current_plugin_chain.set(parent_plugins + (plugin,))\n\n try:\n super().exec_module(module)\n except Exception:\n _revert_plugin(plugin)\n raise\n finally:\n # leave plugin context\n _current_plugin_chain.reset(_plugin_token)\n\n # get plugin metadata\n metadata: Optional[PluginMetadata] = getattr(module, \"__plugin_meta__\", None)\n plugin.metadata = metadata\n\n return\n\n\nsys.meta_path.insert(0, PluginFinder())\n", "path": "nonebot/plugin/manager.py"}]} | 2,887 | 279 |
gh_patches_debug_27488 | rasdani/github-patches | git_diff | coala__coala-2732 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WIP: JuliaRequirement: Add installation methods
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/bears/requirements/JuliaRequirement.py`
Content:
```
1 from coalib.bears.requirements.PackageRequirement import PackageRequirement
2
3
4 class JuliaRequirement(PackageRequirement):
5 """
6 This class is a subclass of ``PackageRequirement``, and helps specifying
7 requirements from ``julia``, without using the manager name.
8 """
9
10 def __init__(self, package, version=""):
11 """
12 Constructs a new ``JuliaRequirement``, using the ``PackageRequirement``
13 constructor.
14
15 >>> pr = JuliaRequirement('Lint', '19.2')
16 >>> pr.manager
17 'julia'
18 >>> pr.package
19 'Lint'
20 >>> pr.version
21 '19.2'
22
23 :param package: A string with the name of the package to be installed.
24 :param version: A version string. Leave empty to specify latest version.
25 """
26 PackageRequirement.__init__(self, 'julia', package, version)
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/coalib/bears/requirements/JuliaRequirement.py b/coalib/bears/requirements/JuliaRequirement.py
--- a/coalib/bears/requirements/JuliaRequirement.py
+++ b/coalib/bears/requirements/JuliaRequirement.py
@@ -1,4 +1,9 @@
+import shlex
+
from coalib.bears.requirements.PackageRequirement import PackageRequirement
+from coalib.misc.Shell import call_without_output
+
+from coala_utils.string_processing import escape
class JuliaRequirement(PackageRequirement):
@@ -24,3 +29,31 @@
:param version: A version string. Leave empty to specify latest version.
"""
PackageRequirement.__init__(self, 'julia', package, version)
+
+ def install_command(self):
+ """
+ Creates the installation command for the instance of the class.
+
+ >>> JuliaRequirement('Lint').install_command()
+ 'julia -e \\'Pkg.add("Lint")\\''
+
+ :return: A string with the installation command.
+ """
+ code = 'Pkg.add("{}")'.format(escape(self.package, '\\"'))
+ args = ('julia', '-e', shlex.quote(code))
+ return ' '.join(args)
+
+ def is_installed(self):
+ """
+ Checks if the dependency is installed.
+
+ :return: ``True`` if dependency is installed, ``False`` otherwise.
+ """
+ # We need to check explicitly if `nothing` is returned, as this happens
+ # when the package is *registered*, but *not installed*. If it's not
+ # even registered, julia will throw an exception which lets julia exit
+ # with an error code different from 0.
+ code = 'Pkg.installed("{}")==nothing?exit(1):exit(0)'.format(
+ escape(self.package, '\\"'))
+ args = ('julia', '-e', code)
+ return not call_without_output(args)
| {"golden_diff": "diff --git a/coalib/bears/requirements/JuliaRequirement.py b/coalib/bears/requirements/JuliaRequirement.py\n--- a/coalib/bears/requirements/JuliaRequirement.py\n+++ b/coalib/bears/requirements/JuliaRequirement.py\n@@ -1,4 +1,9 @@\n+import shlex\n+\n from coalib.bears.requirements.PackageRequirement import PackageRequirement\n+from coalib.misc.Shell import call_without_output\n+\n+from coala_utils.string_processing import escape\n \n \n class JuliaRequirement(PackageRequirement):\n@@ -24,3 +29,31 @@\n :param version: A version string. Leave empty to specify latest version.\n \"\"\"\n PackageRequirement.__init__(self, 'julia', package, version)\n+\n+ def install_command(self):\n+ \"\"\"\n+ Creates the installation command for the instance of the class.\n+\n+ >>> JuliaRequirement('Lint').install_command()\n+ 'julia -e \\\\'Pkg.add(\"Lint\")\\\\''\n+\n+ :return: A string with the installation command.\n+ \"\"\"\n+ code = 'Pkg.add(\"{}\")'.format(escape(self.package, '\\\\\"'))\n+ args = ('julia', '-e', shlex.quote(code))\n+ return ' '.join(args)\n+\n+ def is_installed(self):\n+ \"\"\"\n+ Checks if the dependency is installed.\n+\n+ :return: ``True`` if dependency is installed, ``False`` otherwise.\n+ \"\"\"\n+ # We need to check explicitly if `nothing` is returned, as this happens\n+ # when the package is *registered*, but *not installed*. If it's not\n+ # even registered, julia will throw an exception which lets julia exit\n+ # with an error code different from 0.\n+ code = 'Pkg.installed(\"{}\")==nothing?exit(1):exit(0)'.format(\n+ escape(self.package, '\\\\\"'))\n+ args = ('julia', '-e', code)\n+ return not call_without_output(args)\n", "issue": "WIP: JuliaRequirement: Add installation methods\n\n", "before_files": [{"content": "from coalib.bears.requirements.PackageRequirement import PackageRequirement\n\n\nclass JuliaRequirement(PackageRequirement):\n \"\"\"\n This class is a subclass of ``PackageRequirement``, and helps specifying\n requirements from ``julia``, without using the manager name.\n \"\"\"\n\n def __init__(self, package, version=\"\"):\n \"\"\"\n Constructs a new ``JuliaRequirement``, using the ``PackageRequirement``\n constructor.\n\n >>> pr = JuliaRequirement('Lint', '19.2')\n >>> pr.manager\n 'julia'\n >>> pr.package\n 'Lint'\n >>> pr.version\n '19.2'\n\n :param package: A string with the name of the package to be installed.\n :param version: A version string. Leave empty to specify latest version.\n \"\"\"\n PackageRequirement.__init__(self, 'julia', package, version)\n", "path": "coalib/bears/requirements/JuliaRequirement.py"}], "after_files": [{"content": "import shlex\n\nfrom coalib.bears.requirements.PackageRequirement import PackageRequirement\nfrom coalib.misc.Shell import call_without_output\n\nfrom coala_utils.string_processing import escape\n\n\nclass JuliaRequirement(PackageRequirement):\n \"\"\"\n This class is a subclass of ``PackageRequirement``, and helps specifying\n requirements from ``julia``, without using the manager name.\n \"\"\"\n\n def __init__(self, package, version=\"\"):\n \"\"\"\n Constructs a new ``JuliaRequirement``, using the ``PackageRequirement``\n constructor.\n\n >>> pr = JuliaRequirement('Lint', '19.2')\n >>> pr.manager\n 'julia'\n >>> pr.package\n 'Lint'\n >>> pr.version\n '19.2'\n\n :param package: A string with the name of the package to be installed.\n :param version: A version string. Leave empty to specify latest version.\n \"\"\"\n PackageRequirement.__init__(self, 'julia', package, version)\n\n def install_command(self):\n \"\"\"\n Creates the installation command for the instance of the class.\n\n >>> JuliaRequirement('Lint').install_command()\n 'julia -e \\\\'Pkg.add(\"Lint\")\\\\''\n\n :return: A string with the installation command.\n \"\"\"\n code = 'Pkg.add(\"{}\")'.format(escape(self.package, '\\\\\"'))\n args = ('julia', '-e', shlex.quote(code))\n return ' '.join(args)\n\n def is_installed(self):\n \"\"\"\n Checks if the dependency is installed.\n\n :return: ``True`` if dependency is installed, ``False`` otherwise.\n \"\"\"\n # We need to check explicitly if `nothing` is returned, as this happens\n # when the package is *registered*, but *not installed*. If it's not\n # even registered, julia will throw an exception which lets julia exit\n # with an error code different from 0.\n code = 'Pkg.installed(\"{}\")==nothing?exit(1):exit(0)'.format(\n escape(self.package, '\\\\\"'))\n args = ('julia', '-e', code)\n return not call_without_output(args)\n", "path": "coalib/bears/requirements/JuliaRequirement.py"}]} | 506 | 446 |
gh_patches_debug_4320 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2284 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ND failing since at least 2018-05-05
ND has been failing since 2018-05-05
Based on automated runs it appears that ND has not run successfully in 2 days (2018-05-05).
```
/opt/openstates/venv-pupa/lib/python3.5/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use "pip install psycopg2-binary" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>.
""")
02:02:07 INFO scrapelib: GET - http://www.legis.nd.gov/assembly/
02:02:07 CRITICAL pupa: Session(s) 1862 1st Territorial Assembly, 1868-69 Territorial Assembly, 1885 Territorial Assembly, 1877 Territorial Assembly, 1872-73 Territorial Assembly, 1887 Territorial Assembly, 1881 Territorial Assembly, 1889 Territorial Assembly, 1862 2nd Territorial Assembly, 1874-75 Territorial Assembly, 1870-71 Territorial Assembly, 1864-65 Territorial Assembly, 1866-67 Territorial Assembly, 1883 Territorial Assembly, 1867-68 Territorial Assembly, 1879 Territorial Assembly, 1865-66 Territorial Assembly, 1863-64 Territorial Assembly were reported by NorthDakota.get_session_list() but were not found in NorthDakota.legislative_sessions or NorthDakota.ignored_scraped_sessions.
loaded Open States pupa settings...
nd (scrape, import)
people: {}
votes: {}
bills: {}
committees: {}
```
Visit http://bobsled.openstates.org for more info.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/nd/__init__.py`
Content:
```
1 from pupa.scrape import Jurisdiction, Organization
2 from .committees import NDCommitteeScraper
3 from .votes import NDVoteScraper
4 from .people import NDPersonScraper
5 from .bills import NDBillScraper
6
7
8 class NorthDakota(Jurisdiction):
9 division_id = "ocd-division/country:us/state:nd"
10 classification = "government"
11 name = "North Dakota"
12 url = "http://www.legis.nd.gov/"
13 scrapers = {
14 'people': NDPersonScraper,
15 'votes': NDVoteScraper,
16 'committees': NDCommitteeScraper,
17 'bills': NDBillScraper,
18 }
19 legislative_sessions = [
20 {
21 "_scraped_name": "62nd Legislative Assembly (2011-12)",
22 "identifier": "62",
23 "name": "62nd Legislative Assembly (2011-2012)",
24 "start_date": "2011-01-04"
25 },
26 {
27 "_scraped_name": "63rd Legislative Assembly (2013-14)",
28 "identifier": "63",
29 "name": "63rd Legislative Assembly (2013-2014)",
30 "start_date": "2013-01-08"
31 },
32 {
33 "_scraped_name": "64th Legislative Assembly (2015-16)",
34 "identifier": "64",
35 "name": "64th Legislative Assembly (2015-2016)",
36 "start_date": "2015-01-08"
37 },
38 {
39 "_scraped_name": "65th Legislative Assembly (2017-18)",
40 "identifier": "65",
41 "name": "65th Legislative Assembly (2017-2018)",
42 "start_date": "2017-01-03",
43 "end_date": "2017-04-27",
44 }
45 ]
46 ignored_scraped_sessions = [
47 "66th Legislative Assembly (2019-20)",
48 "61st Legislative Assembly (2009-10)",
49 "60th Legislative Assembly (2007-08)",
50 "59th Legislative Assembly (2005-06)",
51 "58th Legislative Assembly (2003-04)",
52 "57th Legislative Assembly (2001-02)",
53 "56th Legislative Assembly (1999-2000)",
54 "55th Legislative Assembly (1997-98)",
55 "54th Legislative Assembly (1995-96)",
56 "53rd Legislative Assembly (1993-94)",
57 "52nd Legislative Assembly (1991-92)",
58 "51st Legislative Assembly (1989-90)",
59 "50th Legislative Assembly (1987-88)",
60 "49th Legislative Assembly (1985-86)",
61 "48th Legislative Assembly (1983-84)",
62 "47th Legislative Assembly (1981-82)",
63 "46th Legislative Assembly (1979-80)",
64 "45th Legislative Assembly (1977-78)",
65 "44th Legislative Assembly (1975-76)",
66 "43rd Legislative Assembly (1973-74)",
67 "42nd Legislative Assembly (1971-72)",
68 "41st Legislative Assembly (1969-70)",
69 "40th Legislative Assembly (1967-68)",
70 "39th Legislative Assembly (1965-66)",
71 "38th Legislative Assembly (1963-64)",
72 "37th Legislative Assembly (1961-62)",
73 "36th Legislative Assembly (1959-60)",
74 "35th Legislative Assembly (1957-58)",
75 '1st Legislative Assembly (1889-90)',
76 '2nd Legislative Assembly (1891-92)',
77 '3rd Legislative Assembly (1893-94)',
78 '4th Legislative Assembly (1895-96)',
79 '5th Legislative Assembly (1897-98)',
80 '6th Legislative Assembly (1899-1900)',
81 '7th Legislative Assembly (1901-02)',
82 '8th Legislative Assembly (1903-04)',
83 '9th Legislative Assembly (1905-06)',
84 '10th Legislative Assembly (1907-08)',
85 '11th Legislative Assembly (1909-10)',
86 '12th Legislative Assembly (1911-12)',
87 '13th Legislative Assembly (1913-14)',
88 '14th Legislative Assembly (1915-16)',
89 '15th Legislative Assembly (1917-18)',
90 '16th Legislative Assembly (1919-20)',
91 '17th Legislative Assembly (1921-22)',
92 '18th Legislative Assembly (1923-24)',
93 '19th Legislative Assembly (1925-26)',
94 '20th Legislative Assembly (1927-28)',
95 '21st Legislative Assembly (1929-30)',
96 '22nd Legislative Assembly (1931-32)',
97 '23rd Legislative Assembly (1933-34)',
98 '24th Legislative Assembly (1935-36)',
99 '25th Legislative Assembly (1937-38)',
100 '26th Legislative Assembly (1939-40)',
101 '27th Legislative Assembly (1941-42)',
102 '28th Legislative Assembly (1943-44)',
103 '29th Legislative Assembly (1945-46)',
104 '30th Legislative Assembly (1947-48)',
105 '31st Legislative Assembly (1949-50)',
106 '32nd Legislative Assembly (1951-52)',
107 '33rd Legislative Assembly (1953-54)',
108 '34th Legislative Assembly (1955-56)',
109
110 ]
111
112 def get_organizations(self):
113 legislature_name = "North Dakota Legislative Assembly"
114 lower_chamber_name = "House"
115 lower_seats = 47
116 lower_title = "Senator"
117 upper_chamber_name = "Senate"
118 upper_seats = 47
119 upper_title = "Senator"
120
121 legislature = Organization(name=legislature_name,
122 classification="legislature")
123 upper = Organization(upper_chamber_name, classification='upper',
124 parent_id=legislature._id)
125 lower = Organization(lower_chamber_name, classification='lower',
126 parent_id=legislature._id)
127
128 for n in range(1, upper_seats + 1):
129 upper.add_post(
130 label=str(n), role=upper_title,
131 division_id='{}/sldu:{}'.format(self.division_id, n))
132 for n in range(1, lower_seats + 1):
133 lower.add_post(
134 label=str(n), role=lower_title,
135 division_id='{}/sldl:{}'.format(self.division_id, n))
136
137 yield legislature
138 yield upper
139 yield lower
140
141 def get_session_list(self):
142 import scrapelib
143 import lxml.html
144
145 url = 'http://www.legis.nd.gov/assembly/'
146 html = scrapelib.Scraper().get(url).text
147 doc = lxml.html.fromstring(html)
148 doc.make_links_absolute(url)
149 return doc.xpath("//div[@class='view-content']//a/text()")
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/nd/__init__.py b/openstates/nd/__init__.py
--- a/openstates/nd/__init__.py
+++ b/openstates/nd/__init__.py
@@ -146,4 +146,6 @@
html = scrapelib.Scraper().get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
- return doc.xpath("//div[@class='view-content']//a/text()")
+ sessions = doc.xpath("//div[@class='view-content']//a/text()")
+ sessions = [session for session in sessions if 'Territorial Assembly' not in session]
+ return sessions
| {"golden_diff": "diff --git a/openstates/nd/__init__.py b/openstates/nd/__init__.py\n--- a/openstates/nd/__init__.py\n+++ b/openstates/nd/__init__.py\n@@ -146,4 +146,6 @@\n html = scrapelib.Scraper().get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n- return doc.xpath(\"//div[@class='view-content']//a/text()\")\n+ sessions = doc.xpath(\"//div[@class='view-content']//a/text()\")\n+ sessions = [session for session in sessions if 'Territorial Assembly' not in session]\n+ return sessions\n", "issue": "ND failing since at least 2018-05-05\nND has been failing since 2018-05-05\n\nBased on automated runs it appears that ND has not run successfully in 2 days (2018-05-05).\n\n\n```\n /opt/openstates/venv-pupa/lib/python3.5/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use \"pip install psycopg2-binary\" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>.\n \"\"\")\n02:02:07 INFO scrapelib: GET - http://www.legis.nd.gov/assembly/\n02:02:07 CRITICAL pupa: Session(s) 1862 1st Territorial Assembly, 1868-69 Territorial Assembly, 1885 Territorial Assembly, 1877 Territorial Assembly, 1872-73 Territorial Assembly, 1887 Territorial Assembly, 1881 Territorial Assembly, 1889 Territorial Assembly, 1862 2nd Territorial Assembly, 1874-75 Territorial Assembly, 1870-71 Territorial Assembly, 1864-65 Territorial Assembly, 1866-67 Territorial Assembly, 1883 Territorial Assembly, 1867-68 Territorial Assembly, 1879 Territorial Assembly, 1865-66 Territorial Assembly, 1863-64 Territorial Assembly were reported by NorthDakota.get_session_list() but were not found in NorthDakota.legislative_sessions or NorthDakota.ignored_scraped_sessions.\nloaded Open States pupa settings...\nnd (scrape, import)\n people: {}\n votes: {}\n bills: {}\n committees: {}\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\nfrom .committees import NDCommitteeScraper\nfrom .votes import NDVoteScraper\nfrom .people import NDPersonScraper\nfrom .bills import NDBillScraper\n\n\nclass NorthDakota(Jurisdiction):\n division_id = \"ocd-division/country:us/state:nd\"\n classification = \"government\"\n name = \"North Dakota\"\n url = \"http://www.legis.nd.gov/\"\n scrapers = {\n 'people': NDPersonScraper,\n 'votes': NDVoteScraper,\n 'committees': NDCommitteeScraper,\n 'bills': NDBillScraper,\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"62nd Legislative Assembly (2011-12)\",\n \"identifier\": \"62\",\n \"name\": \"62nd Legislative Assembly (2011-2012)\",\n \"start_date\": \"2011-01-04\"\n },\n {\n \"_scraped_name\": \"63rd Legislative Assembly (2013-14)\",\n \"identifier\": \"63\",\n \"name\": \"63rd Legislative Assembly (2013-2014)\",\n \"start_date\": \"2013-01-08\"\n },\n {\n \"_scraped_name\": \"64th Legislative Assembly (2015-16)\",\n \"identifier\": \"64\",\n \"name\": \"64th Legislative Assembly (2015-2016)\",\n \"start_date\": \"2015-01-08\"\n },\n {\n \"_scraped_name\": \"65th Legislative Assembly (2017-18)\",\n \"identifier\": \"65\",\n \"name\": \"65th Legislative Assembly (2017-2018)\",\n \"start_date\": \"2017-01-03\",\n \"end_date\": \"2017-04-27\",\n }\n ]\n ignored_scraped_sessions = [\n \"66th Legislative Assembly (2019-20)\",\n \"61st Legislative Assembly (2009-10)\",\n \"60th Legislative Assembly (2007-08)\",\n \"59th Legislative Assembly (2005-06)\",\n \"58th Legislative Assembly (2003-04)\",\n \"57th Legislative Assembly (2001-02)\",\n \"56th Legislative Assembly (1999-2000)\",\n \"55th Legislative Assembly (1997-98)\",\n \"54th Legislative Assembly (1995-96)\",\n \"53rd Legislative Assembly (1993-94)\",\n \"52nd Legislative Assembly (1991-92)\",\n \"51st Legislative Assembly (1989-90)\",\n \"50th Legislative Assembly (1987-88)\",\n \"49th Legislative Assembly (1985-86)\",\n \"48th Legislative Assembly (1983-84)\",\n \"47th Legislative Assembly (1981-82)\",\n \"46th Legislative Assembly (1979-80)\",\n \"45th Legislative Assembly (1977-78)\",\n \"44th Legislative Assembly (1975-76)\",\n \"43rd Legislative Assembly (1973-74)\",\n \"42nd Legislative Assembly (1971-72)\",\n \"41st Legislative Assembly (1969-70)\",\n \"40th Legislative Assembly (1967-68)\",\n \"39th Legislative Assembly (1965-66)\",\n \"38th Legislative Assembly (1963-64)\",\n \"37th Legislative Assembly (1961-62)\",\n \"36th Legislative Assembly (1959-60)\",\n \"35th Legislative Assembly (1957-58)\",\n '1st Legislative Assembly (1889-90)',\n '2nd Legislative Assembly (1891-92)',\n '3rd Legislative Assembly (1893-94)',\n '4th Legislative Assembly (1895-96)',\n '5th Legislative Assembly (1897-98)',\n '6th Legislative Assembly (1899-1900)',\n '7th Legislative Assembly (1901-02)',\n '8th Legislative Assembly (1903-04)',\n '9th Legislative Assembly (1905-06)',\n '10th Legislative Assembly (1907-08)',\n '11th Legislative Assembly (1909-10)',\n '12th Legislative Assembly (1911-12)',\n '13th Legislative Assembly (1913-14)',\n '14th Legislative Assembly (1915-16)',\n '15th Legislative Assembly (1917-18)',\n '16th Legislative Assembly (1919-20)',\n '17th Legislative Assembly (1921-22)',\n '18th Legislative Assembly (1923-24)',\n '19th Legislative Assembly (1925-26)',\n '20th Legislative Assembly (1927-28)',\n '21st Legislative Assembly (1929-30)',\n '22nd Legislative Assembly (1931-32)',\n '23rd Legislative Assembly (1933-34)',\n '24th Legislative Assembly (1935-36)',\n '25th Legislative Assembly (1937-38)',\n '26th Legislative Assembly (1939-40)',\n '27th Legislative Assembly (1941-42)',\n '28th Legislative Assembly (1943-44)',\n '29th Legislative Assembly (1945-46)',\n '30th Legislative Assembly (1947-48)',\n '31st Legislative Assembly (1949-50)',\n '32nd Legislative Assembly (1951-52)',\n '33rd Legislative Assembly (1953-54)',\n '34th Legislative Assembly (1955-56)',\n\n ]\n\n def get_organizations(self):\n legislature_name = \"North Dakota Legislative Assembly\"\n lower_chamber_name = \"House\"\n lower_seats = 47\n lower_title = \"Senator\"\n upper_chamber_name = \"Senate\"\n upper_seats = 47\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats + 1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats + 1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n import scrapelib\n import lxml.html\n\n url = 'http://www.legis.nd.gov/assembly/'\n html = scrapelib.Scraper().get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n return doc.xpath(\"//div[@class='view-content']//a/text()\")\n", "path": "openstates/nd/__init__.py"}], "after_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\nfrom .committees import NDCommitteeScraper\nfrom .votes import NDVoteScraper\nfrom .people import NDPersonScraper\nfrom .bills import NDBillScraper\n\n\nclass NorthDakota(Jurisdiction):\n division_id = \"ocd-division/country:us/state:nd\"\n classification = \"government\"\n name = \"North Dakota\"\n url = \"http://www.legis.nd.gov/\"\n scrapers = {\n 'people': NDPersonScraper,\n 'votes': NDVoteScraper,\n 'committees': NDCommitteeScraper,\n 'bills': NDBillScraper,\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"62nd Legislative Assembly (2011-12)\",\n \"identifier\": \"62\",\n \"name\": \"62nd Legislative Assembly (2011-2012)\",\n \"start_date\": \"2011-01-04\"\n },\n {\n \"_scraped_name\": \"63rd Legislative Assembly (2013-14)\",\n \"identifier\": \"63\",\n \"name\": \"63rd Legislative Assembly (2013-2014)\",\n \"start_date\": \"2013-01-08\"\n },\n {\n \"_scraped_name\": \"64th Legislative Assembly (2015-16)\",\n \"identifier\": \"64\",\n \"name\": \"64th Legislative Assembly (2015-2016)\",\n \"start_date\": \"2015-01-08\"\n },\n {\n \"_scraped_name\": \"65th Legislative Assembly (2017-18)\",\n \"identifier\": \"65\",\n \"name\": \"65th Legislative Assembly (2017-2018)\",\n \"start_date\": \"2017-01-03\",\n \"end_date\": \"2017-04-27\",\n }\n ]\n ignored_scraped_sessions = [\n \"66th Legislative Assembly (2019-20)\",\n \"61st Legislative Assembly (2009-10)\",\n \"60th Legislative Assembly (2007-08)\",\n \"59th Legislative Assembly (2005-06)\",\n \"58th Legislative Assembly (2003-04)\",\n \"57th Legislative Assembly (2001-02)\",\n \"56th Legislative Assembly (1999-2000)\",\n \"55th Legislative Assembly (1997-98)\",\n \"54th Legislative Assembly (1995-96)\",\n \"53rd Legislative Assembly (1993-94)\",\n \"52nd Legislative Assembly (1991-92)\",\n \"51st Legislative Assembly (1989-90)\",\n \"50th Legislative Assembly (1987-88)\",\n \"49th Legislative Assembly (1985-86)\",\n \"48th Legislative Assembly (1983-84)\",\n \"47th Legislative Assembly (1981-82)\",\n \"46th Legislative Assembly (1979-80)\",\n \"45th Legislative Assembly (1977-78)\",\n \"44th Legislative Assembly (1975-76)\",\n \"43rd Legislative Assembly (1973-74)\",\n \"42nd Legislative Assembly (1971-72)\",\n \"41st Legislative Assembly (1969-70)\",\n \"40th Legislative Assembly (1967-68)\",\n \"39th Legislative Assembly (1965-66)\",\n \"38th Legislative Assembly (1963-64)\",\n \"37th Legislative Assembly (1961-62)\",\n \"36th Legislative Assembly (1959-60)\",\n \"35th Legislative Assembly (1957-58)\",\n '1st Legislative Assembly (1889-90)',\n '2nd Legislative Assembly (1891-92)',\n '3rd Legislative Assembly (1893-94)',\n '4th Legislative Assembly (1895-96)',\n '5th Legislative Assembly (1897-98)',\n '6th Legislative Assembly (1899-1900)',\n '7th Legislative Assembly (1901-02)',\n '8th Legislative Assembly (1903-04)',\n '9th Legislative Assembly (1905-06)',\n '10th Legislative Assembly (1907-08)',\n '11th Legislative Assembly (1909-10)',\n '12th Legislative Assembly (1911-12)',\n '13th Legislative Assembly (1913-14)',\n '14th Legislative Assembly (1915-16)',\n '15th Legislative Assembly (1917-18)',\n '16th Legislative Assembly (1919-20)',\n '17th Legislative Assembly (1921-22)',\n '18th Legislative Assembly (1923-24)',\n '19th Legislative Assembly (1925-26)',\n '20th Legislative Assembly (1927-28)',\n '21st Legislative Assembly (1929-30)',\n '22nd Legislative Assembly (1931-32)',\n '23rd Legislative Assembly (1933-34)',\n '24th Legislative Assembly (1935-36)',\n '25th Legislative Assembly (1937-38)',\n '26th Legislative Assembly (1939-40)',\n '27th Legislative Assembly (1941-42)',\n '28th Legislative Assembly (1943-44)',\n '29th Legislative Assembly (1945-46)',\n '30th Legislative Assembly (1947-48)',\n '31st Legislative Assembly (1949-50)',\n '32nd Legislative Assembly (1951-52)',\n '33rd Legislative Assembly (1953-54)',\n '34th Legislative Assembly (1955-56)',\n\n ]\n\n def get_organizations(self):\n legislature_name = \"North Dakota Legislative Assembly\"\n lower_chamber_name = \"House\"\n lower_seats = 47\n lower_title = \"Senator\"\n upper_chamber_name = \"Senate\"\n upper_seats = 47\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats + 1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats + 1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n import scrapelib\n import lxml.html\n\n url = 'http://www.legis.nd.gov/assembly/'\n html = scrapelib.Scraper().get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n sessions = doc.xpath(\"//div[@class='view-content']//a/text()\")\n sessions = [session for session in sessions if 'Territorial Assembly' not in session]\n return sessions\n", "path": "openstates/nd/__init__.py"}]} | 2,880 | 148 |
gh_patches_debug_371 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSpeech-1311 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
speech_recognition示例运行出错
这是源码:
import paddle
from paddlespeech.cli import ASRExecutor
asr_executor = ASRExecutor()
text = asr_executor(
model='conformer_wenetspeech',
lang='zh',
sample_rate=16000,
config=None, # Set `config` and `ckpt_path` to None to use pretrained model.
ckpt_path=None,
audio_file='input.wav',
force_yes=False,
device=paddle.get_device())
print('ASR Result: \n{}'.format(text))
运行后提示:
Traceback (most recent call last):
File "D:\BaiduNetdiskDownload\PaddleSpeech-r0.1.0\demos\speech_recognition\speech_recognition.py", line 5, in <module>
text = asr_executor(
File "D:\BaiduNetdiskDownload\PaddleSpeech-r0.1.0\paddlespeech\cli\asr\infer.py", line 449, in __call__
self._init_from_path(model, lang, sample_rate, config, ckpt_path)
File "D:\BaiduNetdiskDownload\PaddleSpeech-r0.1.0\paddlespeech\cli\asr\infer.py", line 166, in _init_from_path
self.config.merge_from_file(self.cfg_path)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 212, in merge_from_file
cfg = self.load_cfg(f)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 349, in load_cfg
return cls._load_cfg_from_file(cfg_file_obj_or_str)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 358, in _load_cfg_from_file
return cls._load_cfg_from_yaml_str(file_obj.read())
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 371, in _load_cfg_from_yaml_str
return cls(cfg_as_dict)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 86, in __init__
init_dict = self._create_config_tree_from_dict(init_dict, key_list)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 126, in _create_config_tree_from_dict
dic[k] = cls(v, key_list=key_list + [k])
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 86, in __init__
init_dict = self._create_config_tree_from_dict(init_dict, key_list)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 126, in _create_config_tree_from_dict
dic[k] = cls(v, key_list=key_list + [k])
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 86, in __init__
init_dict = self._create_config_tree_from_dict(init_dict, key_list)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 129, in _create_config_tree_from_dict
_assert_with_logging(
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 521, in _assert_with_logging
assert cond, msg
AssertionError: Key model.model_conf.ctc_grad_norm_type with value <class 'NoneType'> is not a valid type; valid types: {<class 'list'>, <class 'tuple'>, <class 'str'>, <class 'int'>, <class 'float'>, <class 'bool'>}
目前是按照文档在windows下进行的paddlepaddle安装,并下载了PaddleSpeech-r0.1.0源码后直接进行的测试,请问我该如何解决这个问题,谢谢
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import contextlib
15 import inspect
16 import io
17 import os
18 import subprocess as sp
19 import sys
20 from pathlib import Path
21
22 from setuptools import Command
23 from setuptools import find_packages
24 from setuptools import setup
25 from setuptools.command.develop import develop
26 from setuptools.command.install import install
27
28 HERE = Path(os.path.abspath(os.path.dirname(__file__)))
29
30 requirements = {
31 "install": [
32 "editdistance",
33 "g2p_en",
34 "g2pM",
35 "h5py",
36 "inflect",
37 "jieba",
38 "jsonlines",
39 "kaldiio",
40 "librosa",
41 "loguru",
42 "matplotlib",
43 "nara_wpe",
44 "nltk",
45 "pandas",
46 "paddleaudio",
47 "paddlenlp",
48 "paddlespeech_feat",
49 "praatio==5.0.0",
50 "pypinyin",
51 "python-dateutil",
52 "pyworld",
53 "resampy==0.2.2",
54 "sacrebleu",
55 "scipy",
56 "sentencepiece~=0.1.96",
57 "soundfile~=0.10",
58 "textgrid",
59 "timer",
60 "tqdm",
61 "typeguard",
62 "visualdl",
63 "webrtcvad",
64 "yacs",
65 ],
66 "develop": [
67 "ConfigArgParse",
68 "coverage",
69 "gpustat",
70 "paddlespeech_ctcdecoders",
71 "phkit",
72 "Pillow",
73 "pybind11",
74 "pypi-kenlm",
75 "snakeviz",
76 "sox",
77 "soxbindings",
78 "unidecode",
79 "yq",
80 "pre-commit",
81 ]
82 }
83
84
85 @contextlib.contextmanager
86 def pushd(new_dir):
87 old_dir = os.getcwd()
88 os.chdir(new_dir)
89 print(new_dir)
90 yield
91 os.chdir(old_dir)
92 print(old_dir)
93
94
95 def read(*names, **kwargs):
96 with io.open(
97 os.path.join(os.path.dirname(__file__), *names),
98 encoding=kwargs.get("encoding", "utf8")) as fp:
99 return fp.read()
100
101
102 def check_call(cmd: str, shell=False, executable=None):
103 try:
104 sp.check_call(
105 cmd.split(),
106 shell=shell,
107 executable="/bin/bash" if shell else executable)
108 except sp.CalledProcessError as e:
109 print(
110 f"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:",
111 e.output,
112 file=sys.stderr)
113 raise e
114
115
116 def _remove(files: str):
117 for f in files:
118 f.unlink()
119
120
121 def _post_install(install_lib_dir):
122 # tools/make
123 tool_dir = HERE / "tools"
124 _remove(tool_dir.glob("*.done"))
125 with pushd(tool_dir):
126 check_call("make")
127 print("tools install.")
128
129 # ctcdecoder
130 ctcdecoder_dir = HERE / 'paddlespeech/s2t/decoders/ctcdecoder/swig'
131 with pushd(ctcdecoder_dir):
132 check_call("bash -e setup.sh")
133 print("ctcdecoder install.")
134
135
136 class DevelopCommand(develop):
137 def run(self):
138 develop.run(self)
139 # must after develop.run, or pkg install by shell will not see
140 self.execute(_post_install, (self.install_lib, ), msg="Post Install...")
141
142
143 class InstallCommand(install):
144 def run(self):
145 install.run(self)
146
147
148 # cmd: python setup.py upload
149 class UploadCommand(Command):
150 description = "Build and publish the package."
151 user_options = []
152
153 def initialize_options(self):
154 pass
155
156 def finalize_options(self):
157 pass
158
159 def run(self):
160 try:
161 print("Removing previous dist/ ...")
162 shutil.rmtree(str(HERE / "dist"))
163 except OSError:
164 pass
165 print("Building source distribution...")
166 sp.check_call([sys.executable, "setup.py", "sdist"])
167 print("Uploading package to PyPi...")
168 sp.check_call(["twine", "upload", "dist/*"])
169 sys.exit()
170
171
172 setup_info = dict(
173 # Metadata
174 name='paddlespeech',
175 version='0.1.0',
176 author='PaddlePaddle Speech and Language Team',
177 author_email='[email protected]',
178 url='https://github.com/PaddlePaddle/PaddleSpeech',
179 license='Apache 2.0',
180 description='Speech tools and models based on Paddlepaddle',
181 long_description=read("README.md"),
182 long_description_content_type="text/markdown",
183 keywords=[
184 "speech",
185 "asr",
186 "tts",
187 "speaker verfication",
188 "speech classfication",
189 "text frontend",
190 "MFA",
191 "paddlepaddle",
192 "beam search",
193 "ctcdecoder",
194 "deepspeech2",
195 "transformer",
196 "conformer",
197 "fastspeech",
198 "vocoder",
199 "pwgan",
200 "gan",
201 ],
202 python_requires='>=3.7',
203 install_requires=requirements["install"],
204 extras_require={
205 'develop':
206 requirements["develop"],
207 'doc': [
208 "sphinx", "sphinx-rtd-theme", "numpydoc", "myst_parser",
209 "recommonmark>=0.5.0", "sphinx-markdown-tables", "sphinx-autobuild"
210 ],
211 },
212 cmdclass={
213 'develop': DevelopCommand,
214 'install': InstallCommand,
215 'upload': UploadCommand,
216 },
217
218 # Package info
219 packages=find_packages(include=('paddlespeech*')),
220 zip_safe=True,
221 classifiers=[
222 'Development Status :: 5 - Production/Stable',
223 'Intended Audience :: Developers',
224 'Intended Audience :: Science/Research',
225 'Topic :: Scientific/Engineering :: Artificial Intelligence',
226 'License :: OSI Approved :: Apache Software License',
227 'Programming Language :: Python',
228 'Programming Language :: Python :: 3',
229 'Programming Language :: Python :: 3.7',
230 'Programming Language :: Python :: 3.8',
231 'Programming Language :: Python :: 3.9',
232 ],
233 entry_points={
234 'console_scripts': ['paddlespeech=paddlespeech.cli.entry:_execute']
235 })
236
237 setup(**setup_info)
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@
"typeguard",
"visualdl",
"webrtcvad",
- "yacs",
+ "yacs~=0.1.8",
],
"develop": [
"ConfigArgParse",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,7 +61,7 @@\n \"typeguard\",\n \"visualdl\",\n \"webrtcvad\",\n- \"yacs\",\n+ \"yacs~=0.1.8\",\n ],\n \"develop\": [\n \"ConfigArgParse\",\n", "issue": "speech_recognition\u793a\u4f8b\u8fd0\u884c\u51fa\u9519\n\u8fd9\u662f\u6e90\u7801\uff1a\r\nimport paddle\r\nfrom paddlespeech.cli import ASRExecutor\r\n\r\nasr_executor = ASRExecutor()\r\ntext = asr_executor(\r\n model='conformer_wenetspeech',\r\n lang='zh',\r\n sample_rate=16000,\r\n config=None, # Set `config` and `ckpt_path` to None to use pretrained model.\r\n ckpt_path=None,\r\n audio_file='input.wav',\r\n force_yes=False,\r\n device=paddle.get_device())\r\nprint('ASR Result: \\n{}'.format(text))\r\n\r\n\u8fd0\u884c\u540e\u63d0\u793a\uff1a\r\nTraceback (most recent call last):\r\n File \"D:\\BaiduNetdiskDownload\\PaddleSpeech-r0.1.0\\demos\\speech_recognition\\speech_recognition.py\", line 5, in <module>\r\n text = asr_executor(\r\n File \"D:\\BaiduNetdiskDownload\\PaddleSpeech-r0.1.0\\paddlespeech\\cli\\asr\\infer.py\", line 449, in __call__\r\n self._init_from_path(model, lang, sample_rate, config, ckpt_path)\r\n File \"D:\\BaiduNetdiskDownload\\PaddleSpeech-r0.1.0\\paddlespeech\\cli\\asr\\infer.py\", line 166, in _init_from_path\r\n self.config.merge_from_file(self.cfg_path)\r\n File \"D:\\SoftWare\\Anaconda\\lib\\site-packages\\yacs\\config.py\", line 212, in merge_from_file\r\n cfg = self.load_cfg(f)\r\n File \"D:\\SoftWare\\Anaconda\\lib\\site-packages\\yacs\\config.py\", line 349, in load_cfg\r\n return cls._load_cfg_from_file(cfg_file_obj_or_str)\r\n File \"D:\\SoftWare\\Anaconda\\lib\\site-packages\\yacs\\config.py\", line 358, in _load_cfg_from_file\r\n return cls._load_cfg_from_yaml_str(file_obj.read())\r\n File \"D:\\SoftWare\\Anaconda\\lib\\site-packages\\yacs\\config.py\", line 371, in _load_cfg_from_yaml_str\r\n return cls(cfg_as_dict)\r\n File \"D:\\SoftWare\\Anaconda\\lib\\site-packages\\yacs\\config.py\", line 86, in __init__\r\n init_dict = self._create_config_tree_from_dict(init_dict, key_list)\r\n File \"D:\\SoftWare\\Anaconda\\lib\\site-packages\\yacs\\config.py\", line 126, in _create_config_tree_from_dict\r\n dic[k] = cls(v, key_list=key_list + [k])\r\n File \"D:\\SoftWare\\Anaconda\\lib\\site-packages\\yacs\\config.py\", line 86, in __init__\r\n init_dict = self._create_config_tree_from_dict(init_dict, key_list)\r\n File \"D:\\SoftWare\\Anaconda\\lib\\site-packages\\yacs\\config.py\", line 126, in _create_config_tree_from_dict\r\n dic[k] = cls(v, key_list=key_list + [k])\r\n File \"D:\\SoftWare\\Anaconda\\lib\\site-packages\\yacs\\config.py\", line 86, in __init__\r\n init_dict = self._create_config_tree_from_dict(init_dict, key_list)\r\n File \"D:\\SoftWare\\Anaconda\\lib\\site-packages\\yacs\\config.py\", line 129, in _create_config_tree_from_dict\r\n _assert_with_logging(\r\n File \"D:\\SoftWare\\Anaconda\\lib\\site-packages\\yacs\\config.py\", line 521, in _assert_with_logging\r\n assert cond, msg\r\nAssertionError: Key model.model_conf.ctc_grad_norm_type with value <class 'NoneType'> is not a valid type; valid types: {<class 'list'>, <class 'tuple'>, <class 'str'>, <class 'int'>, <class 'float'>, <class 'bool'>}\r\n\r\n\u76ee\u524d\u662f\u6309\u7167\u6587\u6863\u5728windows\u4e0b\u8fdb\u884c\u7684paddlepaddle\u5b89\u88c5\uff0c\u5e76\u4e0b\u8f7d\u4e86PaddleSpeech-r0.1.0\u6e90\u7801\u540e\u76f4\u63a5\u8fdb\u884c\u7684\u6d4b\u8bd5\uff0c\u8bf7\u95ee\u6211\u8be5\u5982\u4f55\u89e3\u51b3\u8fd9\u4e2a\u95ee\u9898\uff0c\u8c22\u8c22\n", "before_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport contextlib\nimport inspect\nimport io\nimport os\nimport subprocess as sp\nimport sys\nfrom pathlib import Path\n\nfrom setuptools import Command\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\n\nHERE = Path(os.path.abspath(os.path.dirname(__file__)))\n\nrequirements = {\n \"install\": [\n \"editdistance\",\n \"g2p_en\",\n \"g2pM\",\n \"h5py\",\n \"inflect\",\n \"jieba\",\n \"jsonlines\",\n \"kaldiio\",\n \"librosa\",\n \"loguru\",\n \"matplotlib\",\n \"nara_wpe\",\n \"nltk\",\n \"pandas\",\n \"paddleaudio\",\n \"paddlenlp\",\n \"paddlespeech_feat\",\n \"praatio==5.0.0\",\n \"pypinyin\",\n \"python-dateutil\",\n \"pyworld\",\n \"resampy==0.2.2\",\n \"sacrebleu\",\n \"scipy\",\n \"sentencepiece~=0.1.96\",\n \"soundfile~=0.10\",\n \"textgrid\",\n \"timer\",\n \"tqdm\",\n \"typeguard\",\n \"visualdl\",\n \"webrtcvad\",\n \"yacs\",\n ],\n \"develop\": [\n \"ConfigArgParse\",\n \"coverage\",\n \"gpustat\",\n \"paddlespeech_ctcdecoders\",\n \"phkit\",\n \"Pillow\",\n \"pybind11\",\n \"pypi-kenlm\",\n \"snakeviz\",\n \"sox\",\n \"soxbindings\",\n \"unidecode\",\n \"yq\",\n \"pre-commit\",\n ]\n}\n\n\[email protected]\ndef pushd(new_dir):\n old_dir = os.getcwd()\n os.chdir(new_dir)\n print(new_dir)\n yield\n os.chdir(old_dir)\n print(old_dir)\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef check_call(cmd: str, shell=False, executable=None):\n try:\n sp.check_call(\n cmd.split(),\n shell=shell,\n executable=\"/bin/bash\" if shell else executable)\n except sp.CalledProcessError as e:\n print(\n f\"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:\",\n e.output,\n file=sys.stderr)\n raise e\n\n\ndef _remove(files: str):\n for f in files:\n f.unlink()\n\n\ndef _post_install(install_lib_dir):\n # tools/make\n tool_dir = HERE / \"tools\"\n _remove(tool_dir.glob(\"*.done\"))\n with pushd(tool_dir):\n check_call(\"make\")\n print(\"tools install.\")\n\n # ctcdecoder\n ctcdecoder_dir = HERE / 'paddlespeech/s2t/decoders/ctcdecoder/swig'\n with pushd(ctcdecoder_dir):\n check_call(\"bash -e setup.sh\")\n print(\"ctcdecoder install.\")\n\n\nclass DevelopCommand(develop):\n def run(self):\n develop.run(self)\n # must after develop.run, or pkg install by shell will not see\n self.execute(_post_install, (self.install_lib, ), msg=\"Post Install...\")\n\n\nclass InstallCommand(install):\n def run(self):\n install.run(self)\n\n\n # cmd: python setup.py upload\nclass UploadCommand(Command):\n description = \"Build and publish the package.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n print(\"Removing previous dist/ ...\")\n shutil.rmtree(str(HERE / \"dist\"))\n except OSError:\n pass\n print(\"Building source distribution...\")\n sp.check_call([sys.executable, \"setup.py\", \"sdist\"])\n print(\"Uploading package to PyPi...\")\n sp.check_call([\"twine\", \"upload\", \"dist/*\"])\n sys.exit()\n\n\nsetup_info = dict(\n # Metadata\n name='paddlespeech',\n version='0.1.0',\n author='PaddlePaddle Speech and Language Team',\n author_email='[email protected]',\n url='https://github.com/PaddlePaddle/PaddleSpeech',\n license='Apache 2.0',\n description='Speech tools and models based on Paddlepaddle',\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n keywords=[\n \"speech\",\n \"asr\",\n \"tts\",\n \"speaker verfication\",\n \"speech classfication\",\n \"text frontend\",\n \"MFA\",\n \"paddlepaddle\",\n \"beam search\",\n \"ctcdecoder\",\n \"deepspeech2\",\n \"transformer\",\n \"conformer\",\n \"fastspeech\",\n \"vocoder\",\n \"pwgan\",\n \"gan\",\n ],\n python_requires='>=3.7',\n install_requires=requirements[\"install\"],\n extras_require={\n 'develop':\n requirements[\"develop\"],\n 'doc': [\n \"sphinx\", \"sphinx-rtd-theme\", \"numpydoc\", \"myst_parser\",\n \"recommonmark>=0.5.0\", \"sphinx-markdown-tables\", \"sphinx-autobuild\"\n ],\n },\n cmdclass={\n 'develop': DevelopCommand,\n 'install': InstallCommand,\n 'upload': UploadCommand,\n },\n\n # Package info\n packages=find_packages(include=('paddlespeech*')),\n zip_safe=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n entry_points={\n 'console_scripts': ['paddlespeech=paddlespeech.cli.entry:_execute']\n })\n\nsetup(**setup_info)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport contextlib\nimport inspect\nimport io\nimport os\nimport subprocess as sp\nimport sys\nfrom pathlib import Path\n\nfrom setuptools import Command\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\n\nHERE = Path(os.path.abspath(os.path.dirname(__file__)))\n\nrequirements = {\n \"install\": [\n \"editdistance\",\n \"g2p_en\",\n \"g2pM\",\n \"h5py\",\n \"inflect\",\n \"jieba\",\n \"jsonlines\",\n \"kaldiio\",\n \"librosa\",\n \"loguru\",\n \"matplotlib\",\n \"nara_wpe\",\n \"nltk\",\n \"pandas\",\n \"paddleaudio\",\n \"paddlenlp\",\n \"paddlespeech_feat\",\n \"praatio==5.0.0\",\n \"pypinyin\",\n \"python-dateutil\",\n \"pyworld\",\n \"resampy==0.2.2\",\n \"sacrebleu\",\n \"scipy\",\n \"sentencepiece~=0.1.96\",\n \"soundfile~=0.10\",\n \"textgrid\",\n \"timer\",\n \"tqdm\",\n \"typeguard\",\n \"visualdl\",\n \"webrtcvad\",\n \"yacs~=0.1.8\",\n ],\n \"develop\": [\n \"ConfigArgParse\",\n \"coverage\",\n \"gpustat\",\n \"paddlespeech_ctcdecoders\",\n \"phkit\",\n \"Pillow\",\n \"pybind11\",\n \"pypi-kenlm\",\n \"snakeviz\",\n \"sox\",\n \"soxbindings\",\n \"unidecode\",\n \"yq\",\n \"pre-commit\",\n ]\n}\n\n\[email protected]\ndef pushd(new_dir):\n old_dir = os.getcwd()\n os.chdir(new_dir)\n print(new_dir)\n yield\n os.chdir(old_dir)\n print(old_dir)\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef check_call(cmd: str, shell=False, executable=None):\n try:\n sp.check_call(\n cmd.split(),\n shell=shell,\n executable=\"/bin/bash\" if shell else executable)\n except sp.CalledProcessError as e:\n print(\n f\"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:\",\n e.output,\n file=sys.stderr)\n raise e\n\n\ndef _remove(files: str):\n for f in files:\n f.unlink()\n\n\ndef _post_install(install_lib_dir):\n # tools/make\n tool_dir = HERE / \"tools\"\n _remove(tool_dir.glob(\"*.done\"))\n with pushd(tool_dir):\n check_call(\"make\")\n print(\"tools install.\")\n\n # ctcdecoder\n ctcdecoder_dir = HERE / 'paddlespeech/s2t/decoders/ctcdecoder/swig'\n with pushd(ctcdecoder_dir):\n check_call(\"bash -e setup.sh\")\n print(\"ctcdecoder install.\")\n\n\nclass DevelopCommand(develop):\n def run(self):\n develop.run(self)\n # must after develop.run, or pkg install by shell will not see\n self.execute(_post_install, (self.install_lib, ), msg=\"Post Install...\")\n\n\nclass InstallCommand(install):\n def run(self):\n install.run(self)\n\n\n # cmd: python setup.py upload\nclass UploadCommand(Command):\n description = \"Build and publish the package.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n print(\"Removing previous dist/ ...\")\n shutil.rmtree(str(HERE / \"dist\"))\n except OSError:\n pass\n print(\"Building source distribution...\")\n sp.check_call([sys.executable, \"setup.py\", \"sdist\"])\n print(\"Uploading package to PyPi...\")\n sp.check_call([\"twine\", \"upload\", \"dist/*\"])\n sys.exit()\n\n\nsetup_info = dict(\n # Metadata\n name='paddlespeech',\n version='0.1.0',\n author='PaddlePaddle Speech and Language Team',\n author_email='[email protected]',\n url='https://github.com/PaddlePaddle/PaddleSpeech',\n license='Apache 2.0',\n description='Speech tools and models based on Paddlepaddle',\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n keywords=[\n \"speech\",\n \"asr\",\n \"tts\",\n \"speaker verfication\",\n \"speech classfication\",\n \"text frontend\",\n \"MFA\",\n \"paddlepaddle\",\n \"beam search\",\n \"ctcdecoder\",\n \"deepspeech2\",\n \"transformer\",\n \"conformer\",\n \"fastspeech\",\n \"vocoder\",\n \"pwgan\",\n \"gan\",\n ],\n python_requires='>=3.7',\n install_requires=requirements[\"install\"],\n extras_require={\n 'develop':\n requirements[\"develop\"],\n 'doc': [\n \"sphinx\", \"sphinx-rtd-theme\", \"numpydoc\", \"myst_parser\",\n \"recommonmark>=0.5.0\", \"sphinx-markdown-tables\", \"sphinx-autobuild\"\n ],\n },\n cmdclass={\n 'develop': DevelopCommand,\n 'install': InstallCommand,\n 'upload': UploadCommand,\n },\n\n # Package info\n packages=find_packages(include=('paddlespeech*')),\n zip_safe=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n entry_points={\n 'console_scripts': ['paddlespeech=paddlespeech.cli.entry:_execute']\n })\n\nsetup(**setup_info)\n", "path": "setup.py"}]} | 3,316 | 80 |
gh_patches_debug_8769 | rasdani/github-patches | git_diff | translate__translate-3429 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Multistring doesnt work as a python2 string
Previously you were able to so ~~`str(multistring("foo"))`~~ but it seems this no longer works.
EDIT: apologies that was a bad example it should have read something like `str(multistring("é"))`
Im wondering if we should add py2 compat similar to the one in django (https://docs.djangoproject.com/en/1.9/_modules/django/utils/six/#python_2_unicode_compatible)
@claudep any thoughts on this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `translate/misc/multistring.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2006 Zuza Software Foundation
4 #
5 # This file is part of translate.
6 #
7 # translate is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
11 #
12 # translate is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
19
20 """Supports a hybrid Unicode string that can also have a list of alternate
21 strings in the strings attribute
22 """
23
24 import six
25
26
27 class multistring(six.text_type):
28
29 def __new__(newtype, string=u"", *args, **kwargs):
30 if isinstance(string, list):
31 if not string:
32 raise ValueError("multistring must contain at least one string")
33 mainstring = string[0]
34 newstring = multistring.__new__(newtype, string[0])
35 newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]
36 else:
37 newstring = six.text_type.__new__(newtype, string)
38 newstring.strings = [newstring]
39 return newstring
40
41 def __init__(self, *args, **kwargs):
42 super(multistring, self).__init__()
43 if not hasattr(self, "strings"):
44 self.strings = []
45
46 def __cmp__(self, otherstring):
47 def cmp_compat(s1, s2):
48 # Python 3 compatible cmp() equivalent
49 return (s1 > s2) - (s1 < s2)
50 if isinstance(otherstring, multistring):
51 parentcompare = cmp_compat(six.text_type(self), otherstring)
52 if parentcompare:
53 return parentcompare
54 else:
55 return cmp_compat(self.strings[1:], otherstring.strings[1:])
56 elif isinstance(otherstring, six.text_type):
57 return cmp_compat(six.text_type(self), otherstring)
58 elif isinstance(otherstring, bytes):
59 return cmp_compat(self.encode('utf-8'), otherstring)
60 elif isinstance(otherstring, list) and otherstring:
61 return cmp_compat(self, multistring(otherstring))
62 else:
63 return cmp_compat(str(type(self)), str(type(otherstring)))
64
65 def __hash__(self):
66 return hash(''.join(self.strings))
67
68 def __ne__(self, otherstring):
69 return self.__cmp__(otherstring) != 0
70
71 def __eq__(self, otherstring):
72 return self.__cmp__(otherstring) == 0
73
74 def __repr__(self):
75 _repr = "multistring([" + u",".join(self.strings) + "])"
76 return _repr.encode('utf-8') if six.PY2 else _repr
77
78 def replace(self, old, new, count=None):
79 if count is None:
80 newstr = multistring(super(multistring, self).replace(old, new))
81 else:
82 newstr = multistring(super(multistring, self).replace(old, new, count))
83 for s in self.strings[1:]:
84 if count is None:
85 newstr.strings.append(s.replace(old, new))
86 else:
87 newstr.strings.append(s.replace(old, new, count))
88 return newstr
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/translate/misc/multistring.py b/translate/misc/multistring.py
--- a/translate/misc/multistring.py
+++ b/translate/misc/multistring.py
@@ -75,6 +75,11 @@
_repr = "multistring([" + u",".join(self.strings) + "])"
return _repr.encode('utf-8') if six.PY2 else _repr
+ def __str__(self):
+ if six.PY2:
+ return self.encode('utf-8')
+ return super(multistring, self).__str__()
+
def replace(self, old, new, count=None):
if count is None:
newstr = multistring(super(multistring, self).replace(old, new))
| {"golden_diff": "diff --git a/translate/misc/multistring.py b/translate/misc/multistring.py\n--- a/translate/misc/multistring.py\n+++ b/translate/misc/multistring.py\n@@ -75,6 +75,11 @@\n _repr = \"multistring([\" + u\",\".join(self.strings) + \"])\"\n return _repr.encode('utf-8') if six.PY2 else _repr\n \n+ def __str__(self):\n+ if six.PY2:\n+ return self.encode('utf-8')\n+ return super(multistring, self).__str__()\n+\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super(multistring, self).replace(old, new))\n", "issue": "Multistring doesnt work as a python2 string\nPreviously you were able to so ~~`str(multistring(\"foo\"))`~~ but it seems this no longer works.\n\nEDIT: apologies that was a bad example it should have read something like `str(multistring(\"\u00e9\"))`\n\nIm wondering if we should add py2 compat similar to the one in django (https://docs.djangoproject.com/en/1.9/_modules/django/utils/six/#python_2_unicode_compatible)\n\n@claudep any thoughts on this?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Supports a hybrid Unicode string that can also have a list of alternate\nstrings in the strings attribute\n\"\"\"\n\nimport six\n\n\nclass multistring(six.text_type):\n\n def __new__(newtype, string=u\"\", *args, **kwargs):\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n mainstring = string[0]\n newstring = multistring.__new__(newtype, string[0])\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n newstring = six.text_type.__new__(newtype, string)\n newstring.strings = [newstring]\n return newstring\n\n def __init__(self, *args, **kwargs):\n super(multistring, self).__init__()\n if not hasattr(self, \"strings\"):\n self.strings = []\n\n def __cmp__(self, otherstring):\n def cmp_compat(s1, s2):\n # Python 3 compatible cmp() equivalent\n return (s1 > s2) - (s1 < s2)\n if isinstance(otherstring, multistring):\n parentcompare = cmp_compat(six.text_type(self), otherstring)\n if parentcompare:\n return parentcompare\n else:\n return cmp_compat(self.strings[1:], otherstring.strings[1:])\n elif isinstance(otherstring, six.text_type):\n return cmp_compat(six.text_type(self), otherstring)\n elif isinstance(otherstring, bytes):\n return cmp_compat(self.encode('utf-8'), otherstring)\n elif isinstance(otherstring, list) and otherstring:\n return cmp_compat(self, multistring(otherstring))\n else:\n return cmp_compat(str(type(self)), str(type(otherstring)))\n\n def __hash__(self):\n return hash(''.join(self.strings))\n\n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n\n def __eq__(self, otherstring):\n return self.__cmp__(otherstring) == 0\n\n def __repr__(self):\n _repr = \"multistring([\" + u\",\".join(self.strings) + \"])\"\n return _repr.encode('utf-8') if six.PY2 else _repr\n\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super(multistring, self).replace(old, new))\n else:\n newstr = multistring(super(multistring, self).replace(old, new, count))\n for s in self.strings[1:]:\n if count is None:\n newstr.strings.append(s.replace(old, new))\n else:\n newstr.strings.append(s.replace(old, new, count))\n return newstr\n", "path": "translate/misc/multistring.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Supports a hybrid Unicode string that can also have a list of alternate\nstrings in the strings attribute\n\"\"\"\n\nimport six\n\n\nclass multistring(six.text_type):\n\n def __new__(newtype, string=u\"\", *args, **kwargs):\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n mainstring = string[0]\n newstring = multistring.__new__(newtype, string[0])\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n newstring = six.text_type.__new__(newtype, string)\n newstring.strings = [newstring]\n return newstring\n\n def __init__(self, *args, **kwargs):\n super(multistring, self).__init__()\n if not hasattr(self, \"strings\"):\n self.strings = []\n\n def __cmp__(self, otherstring):\n def cmp_compat(s1, s2):\n # Python 3 compatible cmp() equivalent\n return (s1 > s2) - (s1 < s2)\n if isinstance(otherstring, multistring):\n parentcompare = cmp_compat(six.text_type(self), otherstring)\n if parentcompare:\n return parentcompare\n else:\n return cmp_compat(self.strings[1:], otherstring.strings[1:])\n elif isinstance(otherstring, six.text_type):\n return cmp_compat(six.text_type(self), otherstring)\n elif isinstance(otherstring, bytes):\n return cmp_compat(self.encode('utf-8'), otherstring)\n elif isinstance(otherstring, list) and otherstring:\n return cmp_compat(self, multistring(otherstring))\n else:\n return cmp_compat(str(type(self)), str(type(otherstring)))\n\n def __hash__(self):\n return hash(''.join(self.strings))\n\n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n\n def __eq__(self, otherstring):\n return self.__cmp__(otherstring) == 0\n\n def __repr__(self):\n _repr = \"multistring([\" + u\",\".join(self.strings) + \"])\"\n return _repr.encode('utf-8') if six.PY2 else _repr\n\n def __str__(self):\n if six.PY2:\n return self.encode('utf-8')\n return super(multistring, self).__str__()\n\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super(multistring, self).replace(old, new))\n else:\n newstr = multistring(super(multistring, self).replace(old, new, count))\n for s in self.strings[1:]:\n if count is None:\n newstr.strings.append(s.replace(old, new))\n else:\n newstr.strings.append(s.replace(old, new, count))\n return newstr\n", "path": "translate/misc/multistring.py"}]} | 1,314 | 167 |
gh_patches_debug_15897 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-563 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve help access and prompts from the CLI
A new user might enter any of the following and expect to be guided to the Help information about using the CLI:
- `cookiecutter`
- `cookiecutter -h`
- `cookiecutter --h`
- `cookiecutter help`
It would be helpful to improve the CLI messages or handle some of the additional cases for a new user.
@audreyr Thanks for the handy tool :bouquet:
<img width="773" alt="screen shot 2015-08-07 at 7 54 57 am" src="https://cloud.githubusercontent.com/assets/2680980/9138629/e64cb42a-3cd9-11e5-8c9c-cb9d726bd1eb.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/cli.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.cli
6 -----------------
7
8 Main `cookiecutter` CLI.
9 """
10
11 import os
12 import sys
13 import logging
14
15 import click
16
17 from cookiecutter import __version__
18 from cookiecutter.main import cookiecutter
19 from cookiecutter.exceptions import (
20 OutputDirExistsException, InvalidModeException
21 )
22
23 logger = logging.getLogger(__name__)
24
25
26 def version_msg():
27 python_version = sys.version[:3]
28 location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
29 message = u'Cookiecutter %(version)s from {} (Python {})'
30 return message.format(location, python_version)
31
32
33 @click.command()
34 @click.version_option(__version__, u'-V', u'--version', message=version_msg())
35 @click.argument(u'template')
36 @click.option(
37 u'--no-input', is_flag=True,
38 help=u'Do not prompt for parameters and only use cookiecutter.json '
39 u'file content',
40 )
41 @click.option(
42 u'-c', u'--checkout',
43 help=u'branch, tag or commit to checkout after git clone',
44 )
45 @click.option(
46 '-v', '--verbose',
47 is_flag=True, help='Print debug information', default=False
48 )
49 @click.option(
50 u'--replay', is_flag=True,
51 help=u'Do not prompt for parameters and only use information entered '
52 u'previously',
53 )
54 @click.option(
55 u'-f', u'--overwrite-if-exists', is_flag=True,
56 help=u'Overwrite the contents of the output directory if it already exists'
57 )
58 @click.option(
59 u'-o', u'--output-dir', default='.', type=click.Path(),
60 help=u'Where to output the generated project dir into'
61 )
62 def main(template, no_input, checkout, verbose, replay, overwrite_if_exists,
63 output_dir):
64 """Create a project from a Cookiecutter project template (TEMPLATE)."""
65 if verbose:
66 logging.basicConfig(
67 format=u'%(levelname)s %(filename)s: %(message)s',
68 level=logging.DEBUG
69 )
70 else:
71 # Log info and above to console
72 logging.basicConfig(
73 format=u'%(levelname)s: %(message)s',
74 level=logging.INFO
75 )
76
77 try:
78 cookiecutter(
79 template, checkout, no_input,
80 replay=replay,
81 overwrite_if_exists=overwrite_if_exists,
82 output_dir=output_dir
83 )
84 except (OutputDirExistsException, InvalidModeException) as e:
85 click.echo(e)
86 sys.exit(1)
87
88 if __name__ == "__main__":
89 main()
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/cli.py b/cookiecutter/cli.py
--- a/cookiecutter/cli.py
+++ b/cookiecutter/cli.py
@@ -30,7 +30,7 @@
return message.format(location, python_version)
[email protected]()
[email protected](context_settings=dict(help_option_names=[u'-h', u'--help']))
@click.version_option(__version__, u'-V', u'--version', message=version_msg())
@click.argument(u'template')
@click.option(
@@ -75,6 +75,13 @@
)
try:
+
+ # If you _need_ to support a local template in a directory
+ # called 'help', use a qualified path to the directory.
+ if template == u'help':
+ click.echo(click.get_current_context().get_help())
+ sys.exit(0)
+
cookiecutter(
template, checkout, no_input,
replay=replay,
| {"golden_diff": "diff --git a/cookiecutter/cli.py b/cookiecutter/cli.py\n--- a/cookiecutter/cli.py\n+++ b/cookiecutter/cli.py\n@@ -30,7 +30,7 @@\n return message.format(location, python_version)\n \n \[email protected]()\[email protected](context_settings=dict(help_option_names=[u'-h', u'--help']))\n @click.version_option(__version__, u'-V', u'--version', message=version_msg())\n @click.argument(u'template')\n @click.option(\n@@ -75,6 +75,13 @@\n )\n \n try:\n+\n+ # If you _need_ to support a local template in a directory\n+ # called 'help', use a qualified path to the directory.\n+ if template == u'help':\n+ click.echo(click.get_current_context().get_help())\n+ sys.exit(0)\n+\n cookiecutter(\n template, checkout, no_input,\n replay=replay,\n", "issue": "Improve help access and prompts from the CLI\nA new user might enter any of the following and expect to be guided to the Help information about using the CLI:\n- `cookiecutter`\n- `cookiecutter -h`\n- `cookiecutter --h`\n- `cookiecutter help`\n\nIt would be helpful to improve the CLI messages or handle some of the additional cases for a new user.\n\n@audreyr Thanks for the handy tool :bouquet: \n\n<img width=\"773\" alt=\"screen shot 2015-08-07 at 7 54 57 am\" src=\"https://cloud.githubusercontent.com/assets/2680980/9138629/e64cb42a-3cd9-11e5-8c9c-cb9d726bd1eb.png\">\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.cli\n-----------------\n\nMain `cookiecutter` CLI.\n\"\"\"\n\nimport os\nimport sys\nimport logging\n\nimport click\n\nfrom cookiecutter import __version__\nfrom cookiecutter.main import cookiecutter\nfrom cookiecutter.exceptions import (\n OutputDirExistsException, InvalidModeException\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef version_msg():\n python_version = sys.version[:3]\n location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n message = u'Cookiecutter %(version)s from {} (Python {})'\n return message.format(location, python_version)\n\n\[email protected]()\[email protected]_option(__version__, u'-V', u'--version', message=version_msg())\[email protected](u'template')\[email protected](\n u'--no-input', is_flag=True,\n help=u'Do not prompt for parameters and only use cookiecutter.json '\n u'file content',\n)\[email protected](\n u'-c', u'--checkout',\n help=u'branch, tag or commit to checkout after git clone',\n)\[email protected](\n '-v', '--verbose',\n is_flag=True, help='Print debug information', default=False\n)\[email protected](\n u'--replay', is_flag=True,\n help=u'Do not prompt for parameters and only use information entered '\n u'previously',\n)\[email protected](\n u'-f', u'--overwrite-if-exists', is_flag=True,\n help=u'Overwrite the contents of the output directory if it already exists'\n)\[email protected](\n u'-o', u'--output-dir', default='.', type=click.Path(),\n help=u'Where to output the generated project dir into'\n)\ndef main(template, no_input, checkout, verbose, replay, overwrite_if_exists,\n output_dir):\n \"\"\"Create a project from a Cookiecutter project template (TEMPLATE).\"\"\"\n if verbose:\n logging.basicConfig(\n format=u'%(levelname)s %(filename)s: %(message)s',\n level=logging.DEBUG\n )\n else:\n # Log info and above to console\n logging.basicConfig(\n format=u'%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n try:\n cookiecutter(\n template, checkout, no_input,\n replay=replay,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n except (OutputDirExistsException, InvalidModeException) as e:\n click.echo(e)\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n", "path": "cookiecutter/cli.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.cli\n-----------------\n\nMain `cookiecutter` CLI.\n\"\"\"\n\nimport os\nimport sys\nimport logging\n\nimport click\n\nfrom cookiecutter import __version__\nfrom cookiecutter.main import cookiecutter\nfrom cookiecutter.exceptions import (\n OutputDirExistsException, InvalidModeException\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef version_msg():\n python_version = sys.version[:3]\n location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n message = u'Cookiecutter %(version)s from {} (Python {})'\n return message.format(location, python_version)\n\n\[email protected](context_settings=dict(help_option_names=[u'-h', u'--help']))\[email protected]_option(__version__, u'-V', u'--version', message=version_msg())\[email protected](u'template')\[email protected](\n u'--no-input', is_flag=True,\n help=u'Do not prompt for parameters and only use cookiecutter.json '\n u'file content',\n)\[email protected](\n u'-c', u'--checkout',\n help=u'branch, tag or commit to checkout after git clone',\n)\[email protected](\n '-v', '--verbose',\n is_flag=True, help='Print debug information', default=False\n)\[email protected](\n u'--replay', is_flag=True,\n help=u'Do not prompt for parameters and only use information entered '\n u'previously',\n)\[email protected](\n u'-f', u'--overwrite-if-exists', is_flag=True,\n help=u'Overwrite the contents of the output directory if it already exists'\n)\[email protected](\n u'-o', u'--output-dir', default='.', type=click.Path(),\n help=u'Where to output the generated project dir into'\n)\ndef main(template, no_input, checkout, verbose, replay, overwrite_if_exists,\n output_dir):\n \"\"\"Create a project from a Cookiecutter project template (TEMPLATE).\"\"\"\n if verbose:\n logging.basicConfig(\n format=u'%(levelname)s %(filename)s: %(message)s',\n level=logging.DEBUG\n )\n else:\n # Log info and above to console\n logging.basicConfig(\n format=u'%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n try:\n\n # If you _need_ to support a local template in a directory\n # called 'help', use a qualified path to the directory.\n if template == u'help':\n click.echo(click.get_current_context().get_help())\n sys.exit(0)\n\n cookiecutter(\n template, checkout, no_input,\n replay=replay,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n except (OutputDirExistsException, InvalidModeException) as e:\n click.echo(e)\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n", "path": "cookiecutter/cli.py"}]} | 1,187 | 214 |
gh_patches_debug_9774 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1214 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NoEcho password warning on a Lambda Function named AccountPassword
*cfn-lint version: 0.25.1*
*Description of issue.*
I'm getting this warning:
```
W2501 Inappropriate map found for password on Resources/AccountPassword
```
on this Resource in my SAM template:
```yaml
AccountPassword:
Type: AWS::Serverless::Function
Properties:
FunctionName: !Sub ${RepositoryName}-account-password-${StageName}
Runtime: nodejs10.x
Layers:
- !Ref LambdaDependencyLayer
CodeUri: dist/account
Handler: password.handler
Description: Reset User Password
Timeout: 20
Events:
GET:
Type: Api
Properties:
RestApiId:
Ref: RestApi
Path: /account/password
Method: get
Role: !GetAtt LambdaExecutionRole.Arn
DeploymentPreference:
Alarms:
- !Ref AccountPasswordLatestVersionErrorMetricGreaterThanZeroAlarm
```
I'm guessing the resolution would be to make sure this check is only done on Parameters and not Resouces.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/properties/Password.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import re
6 import six
7 from cfnlint.rules import CloudFormationLintRule
8 from cfnlint.rules import RuleMatch
9 from cfnlint.helpers import REGEX_DYN_REF_SSM, REGEX_DYN_REF
10
11
12 class Password(CloudFormationLintRule):
13 """Check if Password Properties are properly configured"""
14 id = 'W2501'
15 shortdesc = 'Check if Password Properties are correctly configured'
16 description = 'Password properties should be strings and if parameter using NoEcho'
17 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/best-practices.html#creds'
18 tags = ['parameters', 'passwords', 'security']
19
20 def match(self, cfn):
21 """Check CloudFormation Password Parameters"""
22
23 matches = []
24 password_properties = ['AccountPassword', 'AdminPassword', 'ADDomainJoinPassword', 'CrossRealmTrustPrincipalPassword',
25 'KdcAdminPassword', 'Password', 'DbPassword', 'MasterUserPassword', 'PasswordParam']
26
27 parameters = cfn.get_parameter_names()
28 fix_params = []
29 for password_property in password_properties:
30 # Build the list of refs
31 refs = cfn.search_deep_keys(password_property)
32 trees = [tree for tree in refs if tree[0] == 'Resources']
33
34 for tree in trees:
35 obj = tree[-1]
36 if isinstance(obj, (six.string_types)):
37 if re.match(REGEX_DYN_REF, obj):
38 if re.match(REGEX_DYN_REF_SSM, obj):
39 message = 'Password should use a secure dynamic reference for %s' % (
40 '/'.join(map(str, tree[:-1])))
41 matches.append(RuleMatch(tree[:-1], message))
42 else:
43 message = 'Password shouldn\'t be hardcoded for %s' % (
44 '/'.join(map(str, tree[:-1])))
45 matches.append(RuleMatch(tree[:-1], message))
46 elif isinstance(obj, dict):
47 if len(obj) == 1:
48 for key, value in obj.items():
49 if key == 'Ref':
50 if value in parameters:
51 param = cfn.template['Parameters'][value]
52 if 'NoEcho' in param:
53 if not param['NoEcho']:
54 fix_params.append(
55 {'Name': value, 'Use': password_property})
56 else:
57 fix_params.append({'Name': value, 'Use': password_property})
58 else:
59 message = 'Inappropriate map found for password on %s' % (
60 '/'.join(map(str, tree[:-1])))
61 matches.append(RuleMatch(tree[:-1], message))
62
63 for paramname in fix_params:
64 message = 'Parameter {} used as {}, therefore NoEcho should be True'.format(
65 paramname['Name'], paramname['Use'])
66 tree = ['Parameters', paramname['Name']]
67 matches.append(RuleMatch(tree, message))
68 return matches
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/properties/Password.py b/src/cfnlint/rules/resources/properties/Password.py
--- a/src/cfnlint/rules/resources/properties/Password.py
+++ b/src/cfnlint/rules/resources/properties/Password.py
@@ -29,7 +29,11 @@
for password_property in password_properties:
# Build the list of refs
refs = cfn.search_deep_keys(password_property)
- trees = [tree for tree in refs if tree[0] == 'Resources']
+ trees = []
+ for tree in refs:
+ if len(tree) > 2:
+ if tree[0] == 'Resources' and tree[2] == 'Properties':
+ trees.append(tree)
for tree in trees:
obj = tree[-1]
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/Password.py b/src/cfnlint/rules/resources/properties/Password.py\n--- a/src/cfnlint/rules/resources/properties/Password.py\n+++ b/src/cfnlint/rules/resources/properties/Password.py\n@@ -29,7 +29,11 @@\n for password_property in password_properties:\n # Build the list of refs\n refs = cfn.search_deep_keys(password_property)\n- trees = [tree for tree in refs if tree[0] == 'Resources']\n+ trees = []\n+ for tree in refs:\n+ if len(tree) > 2:\n+ if tree[0] == 'Resources' and tree[2] == 'Properties':\n+ trees.append(tree)\n \n for tree in trees:\n obj = tree[-1]\n", "issue": "NoEcho password warning on a Lambda Function named AccountPassword\n*cfn-lint version: 0.25.1*\r\n\r\n*Description of issue.*\r\n\r\nI'm getting this warning:\r\n\r\n```\r\nW2501 Inappropriate map found for password on Resources/AccountPassword\r\n```\r\n\r\non this Resource in my SAM template:\r\n\r\n```yaml\r\n AccountPassword:\r\n Type: AWS::Serverless::Function\r\n Properties:\r\n FunctionName: !Sub ${RepositoryName}-account-password-${StageName}\r\n Runtime: nodejs10.x\r\n Layers:\r\n - !Ref LambdaDependencyLayer\r\n CodeUri: dist/account\r\n Handler: password.handler\r\n Description: Reset User Password\r\n Timeout: 20\r\n Events:\r\n GET:\r\n Type: Api\r\n Properties:\r\n RestApiId:\r\n Ref: RestApi\r\n Path: /account/password\r\n Method: get\r\n Role: !GetAtt LambdaExecutionRole.Arn\r\n DeploymentPreference:\r\n Alarms:\r\n - !Ref AccountPasswordLatestVersionErrorMetricGreaterThanZeroAlarm\r\n```\r\n\r\nI'm guessing the resolution would be to make sure this check is only done on Parameters and not Resouces.\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nfrom cfnlint.helpers import REGEX_DYN_REF_SSM, REGEX_DYN_REF\n\n\nclass Password(CloudFormationLintRule):\n \"\"\"Check if Password Properties are properly configured\"\"\"\n id = 'W2501'\n shortdesc = 'Check if Password Properties are correctly configured'\n description = 'Password properties should be strings and if parameter using NoEcho'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/best-practices.html#creds'\n tags = ['parameters', 'passwords', 'security']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Password Parameters\"\"\"\n\n matches = []\n password_properties = ['AccountPassword', 'AdminPassword', 'ADDomainJoinPassword', 'CrossRealmTrustPrincipalPassword',\n 'KdcAdminPassword', 'Password', 'DbPassword', 'MasterUserPassword', 'PasswordParam']\n\n parameters = cfn.get_parameter_names()\n fix_params = []\n for password_property in password_properties:\n # Build the list of refs\n refs = cfn.search_deep_keys(password_property)\n trees = [tree for tree in refs if tree[0] == 'Resources']\n\n for tree in trees:\n obj = tree[-1]\n if isinstance(obj, (six.string_types)):\n if re.match(REGEX_DYN_REF, obj):\n if re.match(REGEX_DYN_REF_SSM, obj):\n message = 'Password should use a secure dynamic reference for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n message = 'Password shouldn\\'t be hardcoded for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n elif isinstance(obj, dict):\n if len(obj) == 1:\n for key, value in obj.items():\n if key == 'Ref':\n if value in parameters:\n param = cfn.template['Parameters'][value]\n if 'NoEcho' in param:\n if not param['NoEcho']:\n fix_params.append(\n {'Name': value, 'Use': password_property})\n else:\n fix_params.append({'Name': value, 'Use': password_property})\n else:\n message = 'Inappropriate map found for password on %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n for paramname in fix_params:\n message = 'Parameter {} used as {}, therefore NoEcho should be True'.format(\n paramname['Name'], paramname['Use'])\n tree = ['Parameters', paramname['Name']]\n matches.append(RuleMatch(tree, message))\n return matches\n", "path": "src/cfnlint/rules/resources/properties/Password.py"}], "after_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nfrom cfnlint.helpers import REGEX_DYN_REF_SSM, REGEX_DYN_REF\n\n\nclass Password(CloudFormationLintRule):\n \"\"\"Check if Password Properties are properly configured\"\"\"\n id = 'W2501'\n shortdesc = 'Check if Password Properties are correctly configured'\n description = 'Password properties should be strings and if parameter using NoEcho'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/best-practices.html#creds'\n tags = ['parameters', 'passwords', 'security']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Password Parameters\"\"\"\n\n matches = []\n password_properties = ['AccountPassword', 'AdminPassword', 'ADDomainJoinPassword', 'CrossRealmTrustPrincipalPassword',\n 'KdcAdminPassword', 'Password', 'DbPassword', 'MasterUserPassword', 'PasswordParam']\n\n parameters = cfn.get_parameter_names()\n fix_params = []\n for password_property in password_properties:\n # Build the list of refs\n refs = cfn.search_deep_keys(password_property)\n trees = []\n for tree in refs:\n if len(tree) > 2:\n if tree[0] == 'Resources' and tree[2] == 'Properties':\n trees.append(tree)\n\n for tree in trees:\n obj = tree[-1]\n if isinstance(obj, (six.string_types)):\n if re.match(REGEX_DYN_REF, obj):\n if re.match(REGEX_DYN_REF_SSM, obj):\n message = 'Password should use a secure dynamic reference for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n message = 'Password shouldn\\'t be hardcoded for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n elif isinstance(obj, dict):\n if len(obj) == 1:\n for key, value in obj.items():\n if key == 'Ref':\n if value in parameters:\n param = cfn.template['Parameters'][value]\n if 'NoEcho' in param:\n if not param['NoEcho']:\n fix_params.append(\n {'Name': value, 'Use': password_property})\n else:\n fix_params.append({'Name': value, 'Use': password_property})\n else:\n message = 'Inappropriate map found for password on %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n for paramname in fix_params:\n message = 'Parameter {} used as {}, therefore NoEcho should be True'.format(\n paramname['Name'], paramname['Use'])\n tree = ['Parameters', paramname['Name']]\n matches.append(RuleMatch(tree, message))\n return matches\n", "path": "src/cfnlint/rules/resources/properties/Password.py"}]} | 1,282 | 177 |
gh_patches_debug_20732 | rasdani/github-patches | git_diff | ocf__ocfweb-124 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stop emailing us when there's a paper jam
```
A problem was encountered and reported via ocflib:
An exception occured in an ocfweb periodic function:
Traceback (most recent call last):
File "/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/caching.py", line 56, in cache_lookup_with_fallback
raise KeyError('Forcing miss as requested.')
KeyError: 'Forcing miss as requested.'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/bin/run_periodic_functions.py", line 47, in run_periodic_functions
pf.update()
File "/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/caching.py", line 214, in update
force_miss=True,
File "/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/caching.py", line 66, in cache_lookup_with_fallback
result = fallback()
File "/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/caching.py", line 170, in function_with_timestamp
return (datetime.now(), self.function())
File "/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/stats/summary.py", line 68, in printers
for printer in PRINTERS
File "/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/stats/summary.py", line 68, in <genexpr>
for printer in PRINTERS
File "/usr/share/python/ocfweb/lib/python3.4/site-packages/ocflib/lab/printing.py", line 40, in get_toner
for oid in (OID_TONER_CUR, OID_TONER_MAX)
File "/usr/share/python/ocfweb/lib/python3.4/site-packages/ocflib/lab/printing.py", line 40, in <genexpr>
for oid in (OID_TONER_CUR, OID_TONER_MAX)
File "/usr/share/python/ocfweb/lib/python3.4/site-packages/ocflib/lab/printing.py", line 30, in _snmp
'Device {} returned error status: {}'.format(host, err_status),
OSError: Device deforestation returned error status: 5
Periodic function:
* Key: ('2016.01.20.18.11-gitbf0aae55', 'ocfweb.stats.summary#printers', (), ())
* Last Update: 2016-01-21 15:48:30.668738 (61.003647 seconds ago)
* Period: 60
* TTL: 120
The background process will now pause for 30 seconds.
====
Hostname: coma.ocf.berkeley.edu
Callstack:
at /usr/share/python/ocfweb/lib/python3.4/site-packages/ocflib/misc/mail.py:59 (send_problem_report)
by /usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/bin/run_periodic_functions.py:73 (run_periodic_functions)
by /usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/bin/run_periodic_functions.py:116 (main)
by /usr/share/python/ocfweb/bin/ocfweb-run-periodic-functions:9 (<module>)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ocfweb/stats/summary.py`
Content:
```
1 from datetime import date
2 from datetime import datetime
3 from operator import attrgetter
4
5 from django.shortcuts import render
6 from ocflib.constants import CURRENT_SEMESTER_START
7 from ocflib.lab.printing import get_maintkit
8 from ocflib.lab.printing import get_toner
9 from ocflib.lab.printing import PRINTERS
10 from ocflib.lab.stats import list_desktops
11 from ocflib.lab.stats import staff_in_lab as real_staff_in_lab
12 from ocflib.lab.stats import STATS_EPOCH
13 from ocflib.lab.stats import top_staff_alltime as real_top_staff_alltime
14 from ocflib.lab.stats import top_staff_semester as real_top_staff_semester
15 from ocflib.lab.stats import users_in_lab_count as real_users_in_lab_count
16 from ocflib.lab.stats import UtilizationProfile
17
18 from ocfweb.caching import periodic
19 from ocfweb.stats.daily_graph import get_open_close
20
21
22 @periodic(60)
23 def desktop_profiles():
24 open_, close = get_open_close(date.today())
25 now = datetime.today()
26
27 # If the lab has opened, but hasn't closed yet, only count
28 # statistics until the current time. If the lab isn't open
29 # yet, then don't count anything, and if it is closed, show
30 # statistics from when it was open during the day.
31 if now > open_ and now < close:
32 end = now
33 elif now <= open_:
34 end = open_
35 else:
36 end = close
37
38 return sorted(
39 UtilizationProfile.from_hostnames(list_desktops(), open_, end).values(),
40 key=attrgetter('hostname'),
41 )
42
43
44 @periodic(30)
45 def staff_in_lab():
46 return real_staff_in_lab()
47
48
49 @periodic(300)
50 def top_staff_alltime():
51 return real_top_staff_alltime()
52
53
54 @periodic(300)
55 def top_staff_semester():
56 return real_top_staff_semester()
57
58
59 @periodic(30)
60 def users_in_lab_count():
61 return real_users_in_lab_count()
62
63
64 @periodic(60)
65 def printers():
66 return sorted(
67 (printer, get_toner(printer), get_maintkit(printer))
68 for printer in PRINTERS
69 )
70
71
72 def summary(request):
73 return render(
74 request,
75 'summary.html',
76 {
77 'title': 'Lab Statistics',
78 'desktop_profiles': desktop_profiles(),
79 'current_semester_start': CURRENT_SEMESTER_START,
80 'stats_epoch': STATS_EPOCH,
81 'staff_in_lab': staff_in_lab(),
82 'top_staff_alltime': top_staff_alltime()[:10],
83 'top_staff_semester': top_staff_semester()[:10],
84 'users_in_lab_count': users_in_lab_count(),
85 'printers': printers(),
86 },
87 )
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ocfweb/stats/summary.py b/ocfweb/stats/summary.py
--- a/ocfweb/stats/summary.py
+++ b/ocfweb/stats/summary.py
@@ -1,3 +1,4 @@
+import logging
from datetime import date
from datetime import datetime
from operator import attrgetter
@@ -19,6 +20,9 @@
from ocfweb.stats.daily_graph import get_open_close
+_logger = logging.getLogger(__name__)
+
+
@periodic(60)
def desktop_profiles():
open_, close = get_open_close(date.today())
@@ -63,8 +67,17 @@
@periodic(60)
def printers():
+ def silence(f):
+ def inner(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except OSError as ex:
+ _logger.warn('Silencing exception reading printer data: {}'.format(ex))
+ return None
+ return inner
+
return sorted(
- (printer, get_toner(printer), get_maintkit(printer))
+ (printer, silence(get_toner)(printer), silence(get_maintkit)(printer))
for printer in PRINTERS
)
| {"golden_diff": "diff --git a/ocfweb/stats/summary.py b/ocfweb/stats/summary.py\n--- a/ocfweb/stats/summary.py\n+++ b/ocfweb/stats/summary.py\n@@ -1,3 +1,4 @@\n+import logging\n from datetime import date\n from datetime import datetime\n from operator import attrgetter\n@@ -19,6 +20,9 @@\n from ocfweb.stats.daily_graph import get_open_close\n \n \n+_logger = logging.getLogger(__name__)\n+\n+\n @periodic(60)\n def desktop_profiles():\n open_, close = get_open_close(date.today())\n@@ -63,8 +67,17 @@\n \n @periodic(60)\n def printers():\n+ def silence(f):\n+ def inner(*args, **kwargs):\n+ try:\n+ return f(*args, **kwargs)\n+ except OSError as ex:\n+ _logger.warn('Silencing exception reading printer data: {}'.format(ex))\n+ return None\n+ return inner\n+\n return sorted(\n- (printer, get_toner(printer), get_maintkit(printer))\n+ (printer, silence(get_toner)(printer), silence(get_maintkit)(printer))\n for printer in PRINTERS\n )\n", "issue": "Stop emailing us when there's a paper jam\n```\nA problem was encountered and reported via ocflib:\n\nAn exception occured in an ocfweb periodic function:\n\nTraceback (most recent call last):\n File \"/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/caching.py\", line 56, in cache_lookup_with_fallback\n raise KeyError('Forcing miss as requested.')\nKeyError: 'Forcing miss as requested.'\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/bin/run_periodic_functions.py\", line 47, in run_periodic_functions\n pf.update()\n File \"/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/caching.py\", line 214, in update\n force_miss=True,\n File \"/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/caching.py\", line 66, in cache_lookup_with_fallback\n result = fallback()\n File \"/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/caching.py\", line 170, in function_with_timestamp\n return (datetime.now(), self.function())\n File \"/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/stats/summary.py\", line 68, in printers\n for printer in PRINTERS\n File \"/usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/stats/summary.py\", line 68, in <genexpr>\n for printer in PRINTERS\n File \"/usr/share/python/ocfweb/lib/python3.4/site-packages/ocflib/lab/printing.py\", line 40, in get_toner\n for oid in (OID_TONER_CUR, OID_TONER_MAX)\n File \"/usr/share/python/ocfweb/lib/python3.4/site-packages/ocflib/lab/printing.py\", line 40, in <genexpr>\n for oid in (OID_TONER_CUR, OID_TONER_MAX)\n File \"/usr/share/python/ocfweb/lib/python3.4/site-packages/ocflib/lab/printing.py\", line 30, in _snmp\n 'Device {} returned error status: {}'.format(host, err_status),\nOSError: Device deforestation returned error status: 5\n\n\nPeriodic function:\n * Key: ('2016.01.20.18.11-gitbf0aae55', 'ocfweb.stats.summary#printers', (), ())\n * Last Update: 2016-01-21 15:48:30.668738 (61.003647 seconds ago)\n\n\n * Period: 60\n * TTL: 120\n\nThe background process will now pause for 30 seconds.\n\n\n====\nHostname: coma.ocf.berkeley.edu\nCallstack:\n at /usr/share/python/ocfweb/lib/python3.4/site-packages/ocflib/misc/mail.py:59 (send_problem_report)\n by /usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/bin/run_periodic_functions.py:73 (run_periodic_functions)\n by /usr/share/python/ocfweb/lib/python3.4/site-packages/ocfweb/bin/run_periodic_functions.py:116 (main)\n by /usr/share/python/ocfweb/bin/ocfweb-run-periodic-functions:9 (<module>)\n```\n\n", "before_files": [{"content": "from datetime import date\nfrom datetime import datetime\nfrom operator import attrgetter\n\nfrom django.shortcuts import render\nfrom ocflib.constants import CURRENT_SEMESTER_START\nfrom ocflib.lab.printing import get_maintkit\nfrom ocflib.lab.printing import get_toner\nfrom ocflib.lab.printing import PRINTERS\nfrom ocflib.lab.stats import list_desktops\nfrom ocflib.lab.stats import staff_in_lab as real_staff_in_lab\nfrom ocflib.lab.stats import STATS_EPOCH\nfrom ocflib.lab.stats import top_staff_alltime as real_top_staff_alltime\nfrom ocflib.lab.stats import top_staff_semester as real_top_staff_semester\nfrom ocflib.lab.stats import users_in_lab_count as real_users_in_lab_count\nfrom ocflib.lab.stats import UtilizationProfile\n\nfrom ocfweb.caching import periodic\nfrom ocfweb.stats.daily_graph import get_open_close\n\n\n@periodic(60)\ndef desktop_profiles():\n open_, close = get_open_close(date.today())\n now = datetime.today()\n\n # If the lab has opened, but hasn't closed yet, only count\n # statistics until the current time. If the lab isn't open\n # yet, then don't count anything, and if it is closed, show\n # statistics from when it was open during the day.\n if now > open_ and now < close:\n end = now\n elif now <= open_:\n end = open_\n else:\n end = close\n\n return sorted(\n UtilizationProfile.from_hostnames(list_desktops(), open_, end).values(),\n key=attrgetter('hostname'),\n )\n\n\n@periodic(30)\ndef staff_in_lab():\n return real_staff_in_lab()\n\n\n@periodic(300)\ndef top_staff_alltime():\n return real_top_staff_alltime()\n\n\n@periodic(300)\ndef top_staff_semester():\n return real_top_staff_semester()\n\n\n@periodic(30)\ndef users_in_lab_count():\n return real_users_in_lab_count()\n\n\n@periodic(60)\ndef printers():\n return sorted(\n (printer, get_toner(printer), get_maintkit(printer))\n for printer in PRINTERS\n )\n\n\ndef summary(request):\n return render(\n request,\n 'summary.html',\n {\n 'title': 'Lab Statistics',\n 'desktop_profiles': desktop_profiles(),\n 'current_semester_start': CURRENT_SEMESTER_START,\n 'stats_epoch': STATS_EPOCH,\n 'staff_in_lab': staff_in_lab(),\n 'top_staff_alltime': top_staff_alltime()[:10],\n 'top_staff_semester': top_staff_semester()[:10],\n 'users_in_lab_count': users_in_lab_count(),\n 'printers': printers(),\n },\n )\n", "path": "ocfweb/stats/summary.py"}], "after_files": [{"content": "import logging\nfrom datetime import date\nfrom datetime import datetime\nfrom operator import attrgetter\n\nfrom django.shortcuts import render\nfrom ocflib.constants import CURRENT_SEMESTER_START\nfrom ocflib.lab.printing import get_maintkit\nfrom ocflib.lab.printing import get_toner\nfrom ocflib.lab.printing import PRINTERS\nfrom ocflib.lab.stats import list_desktops\nfrom ocflib.lab.stats import staff_in_lab as real_staff_in_lab\nfrom ocflib.lab.stats import STATS_EPOCH\nfrom ocflib.lab.stats import top_staff_alltime as real_top_staff_alltime\nfrom ocflib.lab.stats import top_staff_semester as real_top_staff_semester\nfrom ocflib.lab.stats import users_in_lab_count as real_users_in_lab_count\nfrom ocflib.lab.stats import UtilizationProfile\n\nfrom ocfweb.caching import periodic\nfrom ocfweb.stats.daily_graph import get_open_close\n\n\n_logger = logging.getLogger(__name__)\n\n\n@periodic(60)\ndef desktop_profiles():\n open_, close = get_open_close(date.today())\n now = datetime.today()\n\n # If the lab has opened, but hasn't closed yet, only count\n # statistics until the current time. If the lab isn't open\n # yet, then don't count anything, and if it is closed, show\n # statistics from when it was open during the day.\n if now > open_ and now < close:\n end = now\n elif now <= open_:\n end = open_\n else:\n end = close\n\n return sorted(\n UtilizationProfile.from_hostnames(list_desktops(), open_, end).values(),\n key=attrgetter('hostname'),\n )\n\n\n@periodic(30)\ndef staff_in_lab():\n return real_staff_in_lab()\n\n\n@periodic(300)\ndef top_staff_alltime():\n return real_top_staff_alltime()\n\n\n@periodic(300)\ndef top_staff_semester():\n return real_top_staff_semester()\n\n\n@periodic(30)\ndef users_in_lab_count():\n return real_users_in_lab_count()\n\n\n@periodic(60)\ndef printers():\n def silence(f):\n def inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except OSError as ex:\n _logger.warn('Silencing exception reading printer data: {}'.format(ex))\n return None\n return inner\n\n return sorted(\n (printer, silence(get_toner)(printer), silence(get_maintkit)(printer))\n for printer in PRINTERS\n )\n\n\ndef summary(request):\n return render(\n request,\n 'summary.html',\n {\n 'title': 'Lab Statistics',\n 'desktop_profiles': desktop_profiles(),\n 'current_semester_start': CURRENT_SEMESTER_START,\n 'stats_epoch': STATS_EPOCH,\n 'staff_in_lab': staff_in_lab(),\n 'top_staff_alltime': top_staff_alltime()[:10],\n 'top_staff_semester': top_staff_semester()[:10],\n 'users_in_lab_count': users_in_lab_count(),\n 'printers': printers(),\n },\n )\n", "path": "ocfweb/stats/summary.py"}]} | 1,847 | 271 |
gh_patches_debug_29103 | rasdani/github-patches | git_diff | pyca__cryptography-1236 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MultiBackend doesn't implement TraditionalOpenSSLSerializationBackend
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cryptography/hazmat/backends/multibackend.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 from cryptography import utils
17 from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
18 from cryptography.hazmat.backends.interfaces import (
19 CMACBackend, CipherBackend, DSABackend, EllipticCurveBackend, HMACBackend,
20 HashBackend, PBKDF2HMACBackend, PKCS8SerializationBackend, RSABackend
21 )
22
23
24 @utils.register_interface(CMACBackend)
25 @utils.register_interface(CipherBackend)
26 @utils.register_interface(HashBackend)
27 @utils.register_interface(HMACBackend)
28 @utils.register_interface(PBKDF2HMACBackend)
29 @utils.register_interface(PKCS8SerializationBackend)
30 @utils.register_interface(RSABackend)
31 @utils.register_interface(DSABackend)
32 @utils.register_interface(EllipticCurveBackend)
33 class MultiBackend(object):
34 name = "multibackend"
35
36 def __init__(self, backends):
37 self._backends = backends
38
39 def _filtered_backends(self, interface):
40 for b in self._backends:
41 if isinstance(b, interface):
42 yield b
43
44 def cipher_supported(self, algorithm, mode):
45 return any(
46 b.cipher_supported(algorithm, mode)
47 for b in self._filtered_backends(CipherBackend)
48 )
49
50 def create_symmetric_encryption_ctx(self, algorithm, mode):
51 for b in self._filtered_backends(CipherBackend):
52 try:
53 return b.create_symmetric_encryption_ctx(algorithm, mode)
54 except UnsupportedAlgorithm:
55 pass
56 raise UnsupportedAlgorithm(
57 "cipher {0} in {1} mode is not supported by this backend.".format(
58 algorithm.name, mode.name if mode else mode),
59 _Reasons.UNSUPPORTED_CIPHER
60 )
61
62 def create_symmetric_decryption_ctx(self, algorithm, mode):
63 for b in self._filtered_backends(CipherBackend):
64 try:
65 return b.create_symmetric_decryption_ctx(algorithm, mode)
66 except UnsupportedAlgorithm:
67 pass
68 raise UnsupportedAlgorithm(
69 "cipher {0} in {1} mode is not supported by this backend.".format(
70 algorithm.name, mode.name if mode else mode),
71 _Reasons.UNSUPPORTED_CIPHER
72 )
73
74 def hash_supported(self, algorithm):
75 return any(
76 b.hash_supported(algorithm)
77 for b in self._filtered_backends(HashBackend)
78 )
79
80 def create_hash_ctx(self, algorithm):
81 for b in self._filtered_backends(HashBackend):
82 try:
83 return b.create_hash_ctx(algorithm)
84 except UnsupportedAlgorithm:
85 pass
86 raise UnsupportedAlgorithm(
87 "{0} is not a supported hash on this backend.".format(
88 algorithm.name),
89 _Reasons.UNSUPPORTED_HASH
90 )
91
92 def hmac_supported(self, algorithm):
93 return any(
94 b.hmac_supported(algorithm)
95 for b in self._filtered_backends(HMACBackend)
96 )
97
98 def create_hmac_ctx(self, key, algorithm):
99 for b in self._filtered_backends(HMACBackend):
100 try:
101 return b.create_hmac_ctx(key, algorithm)
102 except UnsupportedAlgorithm:
103 pass
104 raise UnsupportedAlgorithm(
105 "{0} is not a supported hash on this backend.".format(
106 algorithm.name),
107 _Reasons.UNSUPPORTED_HASH
108 )
109
110 def pbkdf2_hmac_supported(self, algorithm):
111 return any(
112 b.pbkdf2_hmac_supported(algorithm)
113 for b in self._filtered_backends(PBKDF2HMACBackend)
114 )
115
116 def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,
117 key_material):
118 for b in self._filtered_backends(PBKDF2HMACBackend):
119 try:
120 return b.derive_pbkdf2_hmac(
121 algorithm, length, salt, iterations, key_material
122 )
123 except UnsupportedAlgorithm:
124 pass
125 raise UnsupportedAlgorithm(
126 "{0} is not a supported hash on this backend.".format(
127 algorithm.name),
128 _Reasons.UNSUPPORTED_HASH
129 )
130
131 def generate_rsa_private_key(self, public_exponent, key_size):
132 for b in self._filtered_backends(RSABackend):
133 return b.generate_rsa_private_key(public_exponent, key_size)
134 raise UnsupportedAlgorithm("RSA is not supported by the backend.",
135 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
136
137 def generate_rsa_parameters_supported(self, public_exponent, key_size):
138 for b in self._filtered_backends(RSABackend):
139 return b.generate_rsa_parameters_supported(
140 public_exponent, key_size
141 )
142 raise UnsupportedAlgorithm("RSA is not supported by the backend.",
143 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
144
145 def create_rsa_signature_ctx(self, private_key, padding, algorithm):
146 for b in self._filtered_backends(RSABackend):
147 return b.create_rsa_signature_ctx(private_key, padding, algorithm)
148 raise UnsupportedAlgorithm("RSA is not supported by the backend.",
149 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
150
151 def create_rsa_verification_ctx(self, public_key, signature, padding,
152 algorithm):
153 for b in self._filtered_backends(RSABackend):
154 return b.create_rsa_verification_ctx(public_key, signature,
155 padding, algorithm)
156 raise UnsupportedAlgorithm("RSA is not supported by the backend.",
157 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
158
159 def mgf1_hash_supported(self, algorithm):
160 for b in self._filtered_backends(RSABackend):
161 return b.mgf1_hash_supported(algorithm)
162 raise UnsupportedAlgorithm("RSA is not supported by the backend.",
163 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
164
165 def decrypt_rsa(self, private_key, ciphertext, padding):
166 for b in self._filtered_backends(RSABackend):
167 return b.decrypt_rsa(private_key, ciphertext, padding)
168 raise UnsupportedAlgorithm("RSA is not supported by the backend.",
169 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
170
171 def encrypt_rsa(self, public_key, plaintext, padding):
172 for b in self._filtered_backends(RSABackend):
173 return b.encrypt_rsa(public_key, plaintext, padding)
174 raise UnsupportedAlgorithm("RSA is not supported by the backend.",
175 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
176
177 def rsa_padding_supported(self, padding):
178 for b in self._filtered_backends(RSABackend):
179 return b.rsa_padding_supported(padding)
180 raise UnsupportedAlgorithm("RSA is not supported by the backend.",
181 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
182
183 def load_rsa_private_numbers(self, numbers):
184 for b in self._filtered_backends(RSABackend):
185 return b.load_rsa_private_numbers(numbers)
186
187 raise UnsupportedAlgorithm("RSA is not supported by the backend",
188 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
189
190 def load_rsa_public_numbers(self, numbers):
191 for b in self._filtered_backends(RSABackend):
192 return b.load_rsa_public_numbers(numbers)
193
194 raise UnsupportedAlgorithm("RSA is not supported by the backend",
195 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
196
197 def generate_dsa_parameters(self, key_size):
198 for b in self._filtered_backends(DSABackend):
199 return b.generate_dsa_parameters(key_size)
200 raise UnsupportedAlgorithm("DSA is not supported by the backend.",
201 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
202
203 def generate_dsa_private_key(self, parameters):
204 for b in self._filtered_backends(DSABackend):
205 return b.generate_dsa_private_key(parameters)
206 raise UnsupportedAlgorithm("DSA is not supported by the backend.",
207 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
208
209 def generate_dsa_private_key_and_parameters(self, key_size):
210 for b in self._filtered_backends(DSABackend):
211 return b.generate_dsa_private_key_and_parameters(key_size)
212 raise UnsupportedAlgorithm("DSA is not supported by the backend.",
213 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
214
215 def create_dsa_verification_ctx(self, public_key, signature, algorithm):
216 for b in self._filtered_backends(DSABackend):
217 return b.create_dsa_verification_ctx(public_key, signature,
218 algorithm)
219 raise UnsupportedAlgorithm("DSA is not supported by the backend.",
220 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
221
222 def create_dsa_signature_ctx(self, private_key, algorithm):
223 for b in self._filtered_backends(DSABackend):
224 return b.create_dsa_signature_ctx(private_key, algorithm)
225 raise UnsupportedAlgorithm("DSA is not supported by the backend.",
226 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
227
228 def dsa_hash_supported(self, algorithm):
229 for b in self._filtered_backends(DSABackend):
230 return b.dsa_hash_supported(algorithm)
231 raise UnsupportedAlgorithm("DSA is not supported by the backend.",
232 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
233
234 def dsa_parameters_supported(self, p, q, g):
235 for b in self._filtered_backends(DSABackend):
236 return b.dsa_parameters_supported(p, q, g)
237 raise UnsupportedAlgorithm("DSA is not supported by the backend.",
238 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
239
240 def cmac_algorithm_supported(self, algorithm):
241 return any(
242 b.cmac_algorithm_supported(algorithm)
243 for b in self._filtered_backends(CMACBackend)
244 )
245
246 def create_cmac_ctx(self, algorithm):
247 for b in self._filtered_backends(CMACBackend):
248 try:
249 return b.create_cmac_ctx(algorithm)
250 except UnsupportedAlgorithm:
251 pass
252 raise UnsupportedAlgorithm("This backend does not support CMAC.",
253 _Reasons.UNSUPPORTED_CIPHER)
254
255 def elliptic_curve_supported(self, curve):
256 return any(
257 b.elliptic_curve_supported(curve)
258 for b in self._filtered_backends(EllipticCurveBackend)
259 )
260
261 def elliptic_curve_signature_algorithm_supported(
262 self, signature_algorithm, curve
263 ):
264 return any(
265 b.elliptic_curve_signature_algorithm_supported(
266 signature_algorithm, curve
267 )
268 for b in self._filtered_backends(EllipticCurveBackend)
269 )
270
271 def generate_elliptic_curve_private_key(self, curve):
272 for b in self._filtered_backends(EllipticCurveBackend):
273 try:
274 return b.generate_elliptic_curve_private_key(curve)
275 except UnsupportedAlgorithm:
276 continue
277
278 raise UnsupportedAlgorithm(
279 "This backend does not support this elliptic curve.",
280 _Reasons.UNSUPPORTED_ELLIPTIC_CURVE
281 )
282
283 def elliptic_curve_private_key_from_numbers(self, numbers):
284 for b in self._filtered_backends(EllipticCurveBackend):
285 try:
286 return b.elliptic_curve_private_key_from_numbers(numbers)
287 except UnsupportedAlgorithm:
288 continue
289
290 raise UnsupportedAlgorithm(
291 "This backend does not support this elliptic curve.",
292 _Reasons.UNSUPPORTED_ELLIPTIC_CURVE
293 )
294
295 def elliptic_curve_public_key_from_numbers(self, numbers):
296 for b in self._filtered_backends(EllipticCurveBackend):
297 try:
298 return b.elliptic_curve_public_key_from_numbers(numbers)
299 except UnsupportedAlgorithm:
300 continue
301
302 raise UnsupportedAlgorithm(
303 "This backend does not support this elliptic curve.",
304 _Reasons.UNSUPPORTED_ELLIPTIC_CURVE
305 )
306
307 def load_pkcs8_pem_private_key(self, data, password):
308 for b in self._filtered_backends(PKCS8SerializationBackend):
309 return b.load_pkcs8_pem_private_key(data, password)
310
311 raise UnsupportedAlgorithm(
312 "This backend does not support this key serialization.",
313 _Reasons.UNSUPPORTED_SERIALIZATION
314 )
315
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cryptography/hazmat/backends/multibackend.py b/cryptography/hazmat/backends/multibackend.py
--- a/cryptography/hazmat/backends/multibackend.py
+++ b/cryptography/hazmat/backends/multibackend.py
@@ -17,7 +17,8 @@
from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
from cryptography.hazmat.backends.interfaces import (
CMACBackend, CipherBackend, DSABackend, EllipticCurveBackend, HMACBackend,
- HashBackend, PBKDF2HMACBackend, PKCS8SerializationBackend, RSABackend
+ HashBackend, PBKDF2HMACBackend, PKCS8SerializationBackend,
+ RSABackend, TraditionalOpenSSLSerializationBackend
)
@@ -28,6 +29,7 @@
@utils.register_interface(PBKDF2HMACBackend)
@utils.register_interface(PKCS8SerializationBackend)
@utils.register_interface(RSABackend)
[email protected]_interface(TraditionalOpenSSLSerializationBackend)
@utils.register_interface(DSABackend)
@utils.register_interface(EllipticCurveBackend)
class MultiBackend(object):
@@ -312,3 +314,14 @@
"This backend does not support this key serialization.",
_Reasons.UNSUPPORTED_SERIALIZATION
)
+
+ def load_traditional_openssl_pem_private_key(self, data, password):
+ for b in self._filtered_backends(
+ TraditionalOpenSSLSerializationBackend
+ ):
+ return b.load_traditional_openssl_pem_private_key(data, password)
+
+ raise UnsupportedAlgorithm(
+ "This backend does not support this key serialization.",
+ _Reasons.UNSUPPORTED_SERIALIZATION
+ )
| {"golden_diff": "diff --git a/cryptography/hazmat/backends/multibackend.py b/cryptography/hazmat/backends/multibackend.py\n--- a/cryptography/hazmat/backends/multibackend.py\n+++ b/cryptography/hazmat/backends/multibackend.py\n@@ -17,7 +17,8 @@\n from cryptography.exceptions import UnsupportedAlgorithm, _Reasons\n from cryptography.hazmat.backends.interfaces import (\n CMACBackend, CipherBackend, DSABackend, EllipticCurveBackend, HMACBackend,\n- HashBackend, PBKDF2HMACBackend, PKCS8SerializationBackend, RSABackend\n+ HashBackend, PBKDF2HMACBackend, PKCS8SerializationBackend,\n+ RSABackend, TraditionalOpenSSLSerializationBackend\n )\n \n \n@@ -28,6 +29,7 @@\n @utils.register_interface(PBKDF2HMACBackend)\n @utils.register_interface(PKCS8SerializationBackend)\n @utils.register_interface(RSABackend)\[email protected]_interface(TraditionalOpenSSLSerializationBackend)\n @utils.register_interface(DSABackend)\n @utils.register_interface(EllipticCurveBackend)\n class MultiBackend(object):\n@@ -312,3 +314,14 @@\n \"This backend does not support this key serialization.\",\n _Reasons.UNSUPPORTED_SERIALIZATION\n )\n+\n+ def load_traditional_openssl_pem_private_key(self, data, password):\n+ for b in self._filtered_backends(\n+ TraditionalOpenSSLSerializationBackend\n+ ):\n+ return b.load_traditional_openssl_pem_private_key(data, password)\n+\n+ raise UnsupportedAlgorithm(\n+ \"This backend does not support this key serialization.\",\n+ _Reasons.UNSUPPORTED_SERIALIZATION\n+ )\n", "issue": "MultiBackend doesn't implement TraditionalOpenSSLSerializationBackend\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.backends.interfaces import (\n CMACBackend, CipherBackend, DSABackend, EllipticCurveBackend, HMACBackend,\n HashBackend, PBKDF2HMACBackend, PKCS8SerializationBackend, RSABackend\n)\n\n\[email protected]_interface(CMACBackend)\[email protected]_interface(CipherBackend)\[email protected]_interface(HashBackend)\[email protected]_interface(HMACBackend)\[email protected]_interface(PBKDF2HMACBackend)\[email protected]_interface(PKCS8SerializationBackend)\[email protected]_interface(RSABackend)\[email protected]_interface(DSABackend)\[email protected]_interface(EllipticCurveBackend)\nclass MultiBackend(object):\n name = \"multibackend\"\n\n def __init__(self, backends):\n self._backends = backends\n\n def _filtered_backends(self, interface):\n for b in self._backends:\n if isinstance(b, interface):\n yield b\n\n def cipher_supported(self, algorithm, mode):\n return any(\n b.cipher_supported(algorithm, mode)\n for b in self._filtered_backends(CipherBackend)\n )\n\n def create_symmetric_encryption_ctx(self, algorithm, mode):\n for b in self._filtered_backends(CipherBackend):\n try:\n return b.create_symmetric_encryption_ctx(algorithm, mode)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"cipher {0} in {1} mode is not supported by this backend.\".format(\n algorithm.name, mode.name if mode else mode),\n _Reasons.UNSUPPORTED_CIPHER\n )\n\n def create_symmetric_decryption_ctx(self, algorithm, mode):\n for b in self._filtered_backends(CipherBackend):\n try:\n return b.create_symmetric_decryption_ctx(algorithm, mode)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"cipher {0} in {1} mode is not supported by this backend.\".format(\n algorithm.name, mode.name if mode else mode),\n _Reasons.UNSUPPORTED_CIPHER\n )\n\n def hash_supported(self, algorithm):\n return any(\n b.hash_supported(algorithm)\n for b in self._filtered_backends(HashBackend)\n )\n\n def create_hash_ctx(self, algorithm):\n for b in self._filtered_backends(HashBackend):\n try:\n return b.create_hash_ctx(algorithm)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"{0} is not a supported hash on this backend.\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n\n def hmac_supported(self, algorithm):\n return any(\n b.hmac_supported(algorithm)\n for b in self._filtered_backends(HMACBackend)\n )\n\n def create_hmac_ctx(self, key, algorithm):\n for b in self._filtered_backends(HMACBackend):\n try:\n return b.create_hmac_ctx(key, algorithm)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"{0} is not a supported hash on this backend.\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n\n def pbkdf2_hmac_supported(self, algorithm):\n return any(\n b.pbkdf2_hmac_supported(algorithm)\n for b in self._filtered_backends(PBKDF2HMACBackend)\n )\n\n def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,\n key_material):\n for b in self._filtered_backends(PBKDF2HMACBackend):\n try:\n return b.derive_pbkdf2_hmac(\n algorithm, length, salt, iterations, key_material\n )\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"{0} is not a supported hash on this backend.\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n\n def generate_rsa_private_key(self, public_exponent, key_size):\n for b in self._filtered_backends(RSABackend):\n return b.generate_rsa_private_key(public_exponent, key_size)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def generate_rsa_parameters_supported(self, public_exponent, key_size):\n for b in self._filtered_backends(RSABackend):\n return b.generate_rsa_parameters_supported(\n public_exponent, key_size\n )\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def create_rsa_signature_ctx(self, private_key, padding, algorithm):\n for b in self._filtered_backends(RSABackend):\n return b.create_rsa_signature_ctx(private_key, padding, algorithm)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def create_rsa_verification_ctx(self, public_key, signature, padding,\n algorithm):\n for b in self._filtered_backends(RSABackend):\n return b.create_rsa_verification_ctx(public_key, signature,\n padding, algorithm)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def mgf1_hash_supported(self, algorithm):\n for b in self._filtered_backends(RSABackend):\n return b.mgf1_hash_supported(algorithm)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def decrypt_rsa(self, private_key, ciphertext, padding):\n for b in self._filtered_backends(RSABackend):\n return b.decrypt_rsa(private_key, ciphertext, padding)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def encrypt_rsa(self, public_key, plaintext, padding):\n for b in self._filtered_backends(RSABackend):\n return b.encrypt_rsa(public_key, plaintext, padding)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def rsa_padding_supported(self, padding):\n for b in self._filtered_backends(RSABackend):\n return b.rsa_padding_supported(padding)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def load_rsa_private_numbers(self, numbers):\n for b in self._filtered_backends(RSABackend):\n return b.load_rsa_private_numbers(numbers)\n\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def load_rsa_public_numbers(self, numbers):\n for b in self._filtered_backends(RSABackend):\n return b.load_rsa_public_numbers(numbers)\n\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def generate_dsa_parameters(self, key_size):\n for b in self._filtered_backends(DSABackend):\n return b.generate_dsa_parameters(key_size)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def generate_dsa_private_key(self, parameters):\n for b in self._filtered_backends(DSABackend):\n return b.generate_dsa_private_key(parameters)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def generate_dsa_private_key_and_parameters(self, key_size):\n for b in self._filtered_backends(DSABackend):\n return b.generate_dsa_private_key_and_parameters(key_size)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def create_dsa_verification_ctx(self, public_key, signature, algorithm):\n for b in self._filtered_backends(DSABackend):\n return b.create_dsa_verification_ctx(public_key, signature,\n algorithm)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def create_dsa_signature_ctx(self, private_key, algorithm):\n for b in self._filtered_backends(DSABackend):\n return b.create_dsa_signature_ctx(private_key, algorithm)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def dsa_hash_supported(self, algorithm):\n for b in self._filtered_backends(DSABackend):\n return b.dsa_hash_supported(algorithm)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def dsa_parameters_supported(self, p, q, g):\n for b in self._filtered_backends(DSABackend):\n return b.dsa_parameters_supported(p, q, g)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def cmac_algorithm_supported(self, algorithm):\n return any(\n b.cmac_algorithm_supported(algorithm)\n for b in self._filtered_backends(CMACBackend)\n )\n\n def create_cmac_ctx(self, algorithm):\n for b in self._filtered_backends(CMACBackend):\n try:\n return b.create_cmac_ctx(algorithm)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\"This backend does not support CMAC.\",\n _Reasons.UNSUPPORTED_CIPHER)\n\n def elliptic_curve_supported(self, curve):\n return any(\n b.elliptic_curve_supported(curve)\n for b in self._filtered_backends(EllipticCurveBackend)\n )\n\n def elliptic_curve_signature_algorithm_supported(\n self, signature_algorithm, curve\n ):\n return any(\n b.elliptic_curve_signature_algorithm_supported(\n signature_algorithm, curve\n )\n for b in self._filtered_backends(EllipticCurveBackend)\n )\n\n def generate_elliptic_curve_private_key(self, curve):\n for b in self._filtered_backends(EllipticCurveBackend):\n try:\n return b.generate_elliptic_curve_private_key(curve)\n except UnsupportedAlgorithm:\n continue\n\n raise UnsupportedAlgorithm(\n \"This backend does not support this elliptic curve.\",\n _Reasons.UNSUPPORTED_ELLIPTIC_CURVE\n )\n\n def elliptic_curve_private_key_from_numbers(self, numbers):\n for b in self._filtered_backends(EllipticCurveBackend):\n try:\n return b.elliptic_curve_private_key_from_numbers(numbers)\n except UnsupportedAlgorithm:\n continue\n\n raise UnsupportedAlgorithm(\n \"This backend does not support this elliptic curve.\",\n _Reasons.UNSUPPORTED_ELLIPTIC_CURVE\n )\n\n def elliptic_curve_public_key_from_numbers(self, numbers):\n for b in self._filtered_backends(EllipticCurveBackend):\n try:\n return b.elliptic_curve_public_key_from_numbers(numbers)\n except UnsupportedAlgorithm:\n continue\n\n raise UnsupportedAlgorithm(\n \"This backend does not support this elliptic curve.\",\n _Reasons.UNSUPPORTED_ELLIPTIC_CURVE\n )\n\n def load_pkcs8_pem_private_key(self, data, password):\n for b in self._filtered_backends(PKCS8SerializationBackend):\n return b.load_pkcs8_pem_private_key(data, password)\n\n raise UnsupportedAlgorithm(\n \"This backend does not support this key serialization.\",\n _Reasons.UNSUPPORTED_SERIALIZATION\n )\n", "path": "cryptography/hazmat/backends/multibackend.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.backends.interfaces import (\n CMACBackend, CipherBackend, DSABackend, EllipticCurveBackend, HMACBackend,\n HashBackend, PBKDF2HMACBackend, PKCS8SerializationBackend,\n RSABackend, TraditionalOpenSSLSerializationBackend\n)\n\n\[email protected]_interface(CMACBackend)\[email protected]_interface(CipherBackend)\[email protected]_interface(HashBackend)\[email protected]_interface(HMACBackend)\[email protected]_interface(PBKDF2HMACBackend)\[email protected]_interface(PKCS8SerializationBackend)\[email protected]_interface(RSABackend)\[email protected]_interface(TraditionalOpenSSLSerializationBackend)\[email protected]_interface(DSABackend)\[email protected]_interface(EllipticCurveBackend)\nclass MultiBackend(object):\n name = \"multibackend\"\n\n def __init__(self, backends):\n self._backends = backends\n\n def _filtered_backends(self, interface):\n for b in self._backends:\n if isinstance(b, interface):\n yield b\n\n def cipher_supported(self, algorithm, mode):\n return any(\n b.cipher_supported(algorithm, mode)\n for b in self._filtered_backends(CipherBackend)\n )\n\n def create_symmetric_encryption_ctx(self, algorithm, mode):\n for b in self._filtered_backends(CipherBackend):\n try:\n return b.create_symmetric_encryption_ctx(algorithm, mode)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"cipher {0} in {1} mode is not supported by this backend.\".format(\n algorithm.name, mode.name if mode else mode),\n _Reasons.UNSUPPORTED_CIPHER\n )\n\n def create_symmetric_decryption_ctx(self, algorithm, mode):\n for b in self._filtered_backends(CipherBackend):\n try:\n return b.create_symmetric_decryption_ctx(algorithm, mode)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"cipher {0} in {1} mode is not supported by this backend.\".format(\n algorithm.name, mode.name if mode else mode),\n _Reasons.UNSUPPORTED_CIPHER\n )\n\n def hash_supported(self, algorithm):\n return any(\n b.hash_supported(algorithm)\n for b in self._filtered_backends(HashBackend)\n )\n\n def create_hash_ctx(self, algorithm):\n for b in self._filtered_backends(HashBackend):\n try:\n return b.create_hash_ctx(algorithm)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"{0} is not a supported hash on this backend.\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n\n def hmac_supported(self, algorithm):\n return any(\n b.hmac_supported(algorithm)\n for b in self._filtered_backends(HMACBackend)\n )\n\n def create_hmac_ctx(self, key, algorithm):\n for b in self._filtered_backends(HMACBackend):\n try:\n return b.create_hmac_ctx(key, algorithm)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"{0} is not a supported hash on this backend.\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n\n def pbkdf2_hmac_supported(self, algorithm):\n return any(\n b.pbkdf2_hmac_supported(algorithm)\n for b in self._filtered_backends(PBKDF2HMACBackend)\n )\n\n def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,\n key_material):\n for b in self._filtered_backends(PBKDF2HMACBackend):\n try:\n return b.derive_pbkdf2_hmac(\n algorithm, length, salt, iterations, key_material\n )\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"{0} is not a supported hash on this backend.\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n\n def generate_rsa_private_key(self, public_exponent, key_size):\n for b in self._filtered_backends(RSABackend):\n return b.generate_rsa_private_key(public_exponent, key_size)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def generate_rsa_parameters_supported(self, public_exponent, key_size):\n for b in self._filtered_backends(RSABackend):\n return b.generate_rsa_parameters_supported(\n public_exponent, key_size\n )\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def create_rsa_signature_ctx(self, private_key, padding, algorithm):\n for b in self._filtered_backends(RSABackend):\n return b.create_rsa_signature_ctx(private_key, padding, algorithm)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def create_rsa_verification_ctx(self, public_key, signature, padding,\n algorithm):\n for b in self._filtered_backends(RSABackend):\n return b.create_rsa_verification_ctx(public_key, signature,\n padding, algorithm)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def mgf1_hash_supported(self, algorithm):\n for b in self._filtered_backends(RSABackend):\n return b.mgf1_hash_supported(algorithm)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def decrypt_rsa(self, private_key, ciphertext, padding):\n for b in self._filtered_backends(RSABackend):\n return b.decrypt_rsa(private_key, ciphertext, padding)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def encrypt_rsa(self, public_key, plaintext, padding):\n for b in self._filtered_backends(RSABackend):\n return b.encrypt_rsa(public_key, plaintext, padding)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def rsa_padding_supported(self, padding):\n for b in self._filtered_backends(RSABackend):\n return b.rsa_padding_supported(padding)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def load_rsa_private_numbers(self, numbers):\n for b in self._filtered_backends(RSABackend):\n return b.load_rsa_private_numbers(numbers)\n\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def load_rsa_public_numbers(self, numbers):\n for b in self._filtered_backends(RSABackend):\n return b.load_rsa_public_numbers(numbers)\n\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def generate_dsa_parameters(self, key_size):\n for b in self._filtered_backends(DSABackend):\n return b.generate_dsa_parameters(key_size)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def generate_dsa_private_key(self, parameters):\n for b in self._filtered_backends(DSABackend):\n return b.generate_dsa_private_key(parameters)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def generate_dsa_private_key_and_parameters(self, key_size):\n for b in self._filtered_backends(DSABackend):\n return b.generate_dsa_private_key_and_parameters(key_size)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def create_dsa_verification_ctx(self, public_key, signature, algorithm):\n for b in self._filtered_backends(DSABackend):\n return b.create_dsa_verification_ctx(public_key, signature,\n algorithm)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def create_dsa_signature_ctx(self, private_key, algorithm):\n for b in self._filtered_backends(DSABackend):\n return b.create_dsa_signature_ctx(private_key, algorithm)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def dsa_hash_supported(self, algorithm):\n for b in self._filtered_backends(DSABackend):\n return b.dsa_hash_supported(algorithm)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def dsa_parameters_supported(self, p, q, g):\n for b in self._filtered_backends(DSABackend):\n return b.dsa_parameters_supported(p, q, g)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def cmac_algorithm_supported(self, algorithm):\n return any(\n b.cmac_algorithm_supported(algorithm)\n for b in self._filtered_backends(CMACBackend)\n )\n\n def create_cmac_ctx(self, algorithm):\n for b in self._filtered_backends(CMACBackend):\n try:\n return b.create_cmac_ctx(algorithm)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\"This backend does not support CMAC.\",\n _Reasons.UNSUPPORTED_CIPHER)\n\n def elliptic_curve_supported(self, curve):\n return any(\n b.elliptic_curve_supported(curve)\n for b in self._filtered_backends(EllipticCurveBackend)\n )\n\n def elliptic_curve_signature_algorithm_supported(\n self, signature_algorithm, curve\n ):\n return any(\n b.elliptic_curve_signature_algorithm_supported(\n signature_algorithm, curve\n )\n for b in self._filtered_backends(EllipticCurveBackend)\n )\n\n def generate_elliptic_curve_private_key(self, curve):\n for b in self._filtered_backends(EllipticCurveBackend):\n try:\n return b.generate_elliptic_curve_private_key(curve)\n except UnsupportedAlgorithm:\n continue\n\n raise UnsupportedAlgorithm(\n \"This backend does not support this elliptic curve.\",\n _Reasons.UNSUPPORTED_ELLIPTIC_CURVE\n )\n\n def elliptic_curve_private_key_from_numbers(self, numbers):\n for b in self._filtered_backends(EllipticCurveBackend):\n try:\n return b.elliptic_curve_private_key_from_numbers(numbers)\n except UnsupportedAlgorithm:\n continue\n\n raise UnsupportedAlgorithm(\n \"This backend does not support this elliptic curve.\",\n _Reasons.UNSUPPORTED_ELLIPTIC_CURVE\n )\n\n def elliptic_curve_public_key_from_numbers(self, numbers):\n for b in self._filtered_backends(EllipticCurveBackend):\n try:\n return b.elliptic_curve_public_key_from_numbers(numbers)\n except UnsupportedAlgorithm:\n continue\n\n raise UnsupportedAlgorithm(\n \"This backend does not support this elliptic curve.\",\n _Reasons.UNSUPPORTED_ELLIPTIC_CURVE\n )\n\n def load_pkcs8_pem_private_key(self, data, password):\n for b in self._filtered_backends(PKCS8SerializationBackend):\n return b.load_pkcs8_pem_private_key(data, password)\n\n raise UnsupportedAlgorithm(\n \"This backend does not support this key serialization.\",\n _Reasons.UNSUPPORTED_SERIALIZATION\n )\n\n def load_traditional_openssl_pem_private_key(self, data, password):\n for b in self._filtered_backends(\n TraditionalOpenSSLSerializationBackend\n ):\n return b.load_traditional_openssl_pem_private_key(data, password)\n\n raise UnsupportedAlgorithm(\n \"This backend does not support this key serialization.\",\n _Reasons.UNSUPPORTED_SERIALIZATION\n )\n", "path": "cryptography/hazmat/backends/multibackend.py"}]} | 3,797 | 394 |
gh_patches_debug_29381 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-193 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bundle system - copy opensearch-tar-install script into bundle
The [opensearch-tar-install](https://github.com/opensearch-project/opensearch-build/blob/main/release/tar/linux/opensearch-tar-install.sh) script needs to be included with assembled bundles similar to [this](https://github.com/opensearch-project/opensearch-build/blob/main/release/tar/linux/opensearch-tar-build.sh#L156) shell script.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bundle-workflow/python/assemble.py`
Content:
```
1 #!/usr/bin/env python
2
3 # Copyright OpenSearch Contributors.
4 # SPDX-License-Identifier: Apache-2.0
5
6 import os
7 import tempfile
8 import argparse
9 from assemble_workflow.bundle import Bundle
10 from assemble_workflow.bundle_recorder import BundleRecorder
11 from manifests.build_manifest import BuildManifest
12
13 parser = argparse.ArgumentParser(description = "Assemble an OpenSearch Bundle")
14 parser.add_argument('manifest', type = argparse.FileType('r'), help="Manifest file.")
15 args = parser.parse_args()
16
17 build_manifest = BuildManifest.from_file(args.manifest)
18 build = build_manifest.build
19 artifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name))
20 output_dir = os.path.join(os.getcwd(), 'bundle')
21 os.makedirs(output_dir, exist_ok=True)
22
23 with tempfile.TemporaryDirectory() as work_dir:
24 print(f'Bundling {build.name} ({build.architecture}) into {output_dir} ...')
25
26 os.chdir(work_dir)
27
28 bundle_recorder = BundleRecorder(build, output_dir, artifacts_dir)
29 bundle = Bundle(build_manifest, artifacts_dir, bundle_recorder)
30
31 bundle.install_plugins()
32 print(f'Installed plugins: {bundle.installed_plugins}')
33
34 # Save a copy of the manifest inside of the tar
35 bundle_recorder.write_manifest(bundle.archive_path)
36 bundle.build_tar(output_dir)
37
38 bundle_recorder.write_manifest(output_dir)
39
40 print(f'Done.')
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bundle-workflow/python/assemble.py b/bundle-workflow/python/assemble.py
--- a/bundle-workflow/python/assemble.py
+++ b/bundle-workflow/python/assemble.py
@@ -6,6 +6,7 @@
import os
import tempfile
import argparse
+import shutil
from assemble_workflow.bundle import Bundle
from assemble_workflow.bundle_recorder import BundleRecorder
from manifests.build_manifest import BuildManifest
@@ -14,6 +15,12 @@
parser.add_argument('manifest', type = argparse.FileType('r'), help="Manifest file.")
args = parser.parse_args()
+tarball_installation_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../release/tar/linux/opensearch-tar-install.sh')
+
+if not os.path.isfile(tarball_installation_script):
+ print(f'No installation script found at path: {tarball_installation_script}')
+ exit(1)
+
build_manifest = BuildManifest.from_file(args.manifest)
build = build_manifest.build
artifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name))
@@ -31,6 +38,9 @@
bundle.install_plugins()
print(f'Installed plugins: {bundle.installed_plugins}')
+ # Copy the tar installation script into the bundle
+ shutil.copyfile(tarball_installation_script, os.path.join(bundle.archive_path, os.path.basename(tarball_installation_script)))
+
# Save a copy of the manifest inside of the tar
bundle_recorder.write_manifest(bundle.archive_path)
bundle.build_tar(output_dir)
| {"golden_diff": "diff --git a/bundle-workflow/python/assemble.py b/bundle-workflow/python/assemble.py\n--- a/bundle-workflow/python/assemble.py\n+++ b/bundle-workflow/python/assemble.py\n@@ -6,6 +6,7 @@\n import os\n import tempfile\n import argparse\n+import shutil\n from assemble_workflow.bundle import Bundle\n from assemble_workflow.bundle_recorder import BundleRecorder\n from manifests.build_manifest import BuildManifest\n@@ -14,6 +15,12 @@\n parser.add_argument('manifest', type = argparse.FileType('r'), help=\"Manifest file.\")\n args = parser.parse_args()\n \n+tarball_installation_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../release/tar/linux/opensearch-tar-install.sh')\n+\n+if not os.path.isfile(tarball_installation_script):\n+ print(f'No installation script found at path: {tarball_installation_script}')\n+ exit(1)\n+\n build_manifest = BuildManifest.from_file(args.manifest)\n build = build_manifest.build\n artifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name))\n@@ -31,6 +38,9 @@\n bundle.install_plugins()\n print(f'Installed plugins: {bundle.installed_plugins}')\n \n+ # Copy the tar installation script into the bundle\n+ shutil.copyfile(tarball_installation_script, os.path.join(bundle.archive_path, os.path.basename(tarball_installation_script)))\n+\n # Save a copy of the manifest inside of the tar\n bundle_recorder.write_manifest(bundle.archive_path)\n bundle.build_tar(output_dir)\n", "issue": "Bundle system - copy opensearch-tar-install script into bundle\nThe [opensearch-tar-install](https://github.com/opensearch-project/opensearch-build/blob/main/release/tar/linux/opensearch-tar-install.sh) script needs to be included with assembled bundles similar to [this](https://github.com/opensearch-project/opensearch-build/blob/main/release/tar/linux/opensearch-tar-build.sh#L156) shell script.\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright OpenSearch Contributors.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport tempfile\nimport argparse\nfrom assemble_workflow.bundle import Bundle\nfrom assemble_workflow.bundle_recorder import BundleRecorder\nfrom manifests.build_manifest import BuildManifest\n\nparser = argparse.ArgumentParser(description = \"Assemble an OpenSearch Bundle\")\nparser.add_argument('manifest', type = argparse.FileType('r'), help=\"Manifest file.\")\nargs = parser.parse_args()\n\nbuild_manifest = BuildManifest.from_file(args.manifest)\nbuild = build_manifest.build\nartifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name))\noutput_dir = os.path.join(os.getcwd(), 'bundle')\nos.makedirs(output_dir, exist_ok=True)\n\nwith tempfile.TemporaryDirectory() as work_dir:\n print(f'Bundling {build.name} ({build.architecture}) into {output_dir} ...')\n\n os.chdir(work_dir)\n\n bundle_recorder = BundleRecorder(build, output_dir, artifacts_dir)\n bundle = Bundle(build_manifest, artifacts_dir, bundle_recorder)\n\n bundle.install_plugins()\n print(f'Installed plugins: {bundle.installed_plugins}')\n\n # Save a copy of the manifest inside of the tar\n bundle_recorder.write_manifest(bundle.archive_path)\n bundle.build_tar(output_dir)\n\n bundle_recorder.write_manifest(output_dir)\n\nprint(f'Done.')\n", "path": "bundle-workflow/python/assemble.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# Copyright OpenSearch Contributors.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport tempfile\nimport argparse\nimport shutil\nfrom assemble_workflow.bundle import Bundle\nfrom assemble_workflow.bundle_recorder import BundleRecorder\nfrom manifests.build_manifest import BuildManifest\n\nparser = argparse.ArgumentParser(description = \"Assemble an OpenSearch Bundle\")\nparser.add_argument('manifest', type = argparse.FileType('r'), help=\"Manifest file.\")\nargs = parser.parse_args()\n\ntarball_installation_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../release/tar/linux/opensearch-tar-install.sh')\n\nif not os.path.isfile(tarball_installation_script):\n print(f'No installation script found at path: {tarball_installation_script}')\n exit(1)\n\nbuild_manifest = BuildManifest.from_file(args.manifest)\nbuild = build_manifest.build\nartifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name))\noutput_dir = os.path.join(os.getcwd(), 'bundle')\nos.makedirs(output_dir, exist_ok=True)\n\nwith tempfile.TemporaryDirectory() as work_dir:\n print(f'Bundling {build.name} ({build.architecture}) into {output_dir} ...')\n\n os.chdir(work_dir)\n\n bundle_recorder = BundleRecorder(build, output_dir, artifacts_dir)\n bundle = Bundle(build_manifest, artifacts_dir, bundle_recorder)\n\n bundle.install_plugins()\n print(f'Installed plugins: {bundle.installed_plugins}')\n\n # Copy the tar installation script into the bundle\n shutil.copyfile(tarball_installation_script, os.path.join(bundle.archive_path, os.path.basename(tarball_installation_script)))\n\n # Save a copy of the manifest inside of the tar\n bundle_recorder.write_manifest(bundle.archive_path)\n bundle.build_tar(output_dir)\n\n bundle_recorder.write_manifest(output_dir)\n\nprint(f'Done.')\n", "path": "bundle-workflow/python/assemble.py"}]} | 722 | 339 |
gh_patches_debug_13741 | rasdani/github-patches | git_diff | crytic__slither-161 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error when parsing type which refers to an aliased import
The following contracts will cause slither to error:
Main.sol:
```
import {Dependency as x} from "./Dependency.sol";
contract Main {
x y;
}
```
Dependency.sol:
```
contract Dependency {
uint y;
}
```
Error:
```
ERROR:TypeParsing:Type not found x
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/solc_parsing/solidity_types/type_parsing.py`
Content:
```
1 import logging
2
3 from slither.core.solidity_types.elementary_type import ElementaryType, ElementaryTypeName
4 from slither.core.solidity_types.user_defined_type import UserDefinedType
5 from slither.core.solidity_types.array_type import ArrayType
6 from slither.core.solidity_types.mapping_type import MappingType
7 from slither.core.solidity_types.function_type import FunctionType
8
9 from slither.core.variables.function_type_variable import FunctionTypeVariable
10
11 from slither.core.declarations.contract import Contract
12 from slither.core.declarations.function import Function
13
14 from slither.core.expressions.literal import Literal
15
16 import re
17
18 logger = logging.getLogger('TypeParsing')
19
20 class UnknownType:
21 def __init__(self, name):
22 self._name = name
23
24 @property
25 def name(self):
26 return self._name
27
28 def _find_from_type_name(name, contract, contracts, structures, enums):
29 name_elementary = name.split(' ')[0]
30 if '[' in name_elementary:
31 name_elementary = name_elementary[0:name_elementary.find('[')]
32 if name_elementary in ElementaryTypeName:
33 depth = name.count('[')
34 if depth:
35 return ArrayType(ElementaryType(name_elementary), Literal(depth))
36 else:
37 return ElementaryType(name_elementary)
38 # We first look for contract
39 # To avoid collision
40 # Ex: a structure with the name of a contract
41 name_contract = name
42 if name_contract.startswith('contract '):
43 name_contract = name_contract[len('contract '):]
44 if name_contract.startswith('library '):
45 name_contract = name_contract[len('library '):]
46 var_type = next((c for c in contracts if c.name == name_contract), None)
47
48 if not var_type:
49 var_type = next((st for st in structures if st.name == name), None)
50 if not var_type:
51 var_type = next((e for e in enums if e.name == name), None)
52 if not var_type:
53 # any contract can refer to another contract's enum
54 enum_name = name
55 if enum_name.startswith('enum '):
56 enum_name = enum_name[len('enum '):]
57 all_enums = [c.enums for c in contracts]
58 all_enums = [item for sublist in all_enums for item in sublist]
59 var_type = next((e for e in all_enums if e.name == enum_name), None)
60 if not var_type:
61 var_type = next((e for e in all_enums if e.contract.name+"."+e.name == enum_name), None)
62 if not var_type:
63 # any contract can refer to another contract's structure
64 name_struct = name
65 if name_struct.startswith('struct '):
66 name_struct = name_struct[len('struct '):]
67 name_struct = name_struct.split(' ')[0] # remove stuff like storage pointer at the end
68 all_structures = [c.structures for c in contracts]
69 all_structures = [item for sublist in all_structures for item in sublist]
70 var_type = next((st for st in all_structures if st.name == name_struct), None)
71 if not var_type:
72 var_type = next((st for st in all_structures if st.contract.name+"."+st.name == name_struct), None)
73 # case where struct xxx.xx[] where not well formed in the AST
74 if not var_type:
75 depth = 0
76 while name_struct.endswith('[]'):
77 name_struct = name_struct[0:-2]
78 depth+=1
79 var_type = next((st for st in all_structures if st.contract.name+"."+st.name == name_struct), None)
80 if var_type:
81 return ArrayType(UserDefinedType(var_type), Literal(depth))
82
83 if not var_type:
84 var_type = next((f for f in contract.functions if f.name == name), None)
85 if not var_type:
86 if name.startswith('function '):
87 found = re.findall('function \(([ ()a-zA-Z0-9\.,]*)\) returns \(([a-zA-Z0-9\.,]*)\)', name)
88 assert len(found) == 1
89 params = found[0][0].split(',')
90 return_values = found[0][1].split(',')
91 params = [_find_from_type_name(p, contract, contracts, structures, enums) for p in params]
92 return_values = [_find_from_type_name(r, contract, contracts, structures, enums) for r in return_values]
93 params_vars = []
94 return_vars = []
95 for p in params:
96 var = FunctionTypeVariable()
97 var.set_type(p)
98 params_vars.append(var)
99 for r in return_values:
100 var = FunctionTypeVariable()
101 var.set_type(r)
102 return_vars.append(var)
103 return FunctionType(params_vars, return_vars)
104 if not var_type:
105 if name.startswith('mapping('):
106 # nested mapping declared with var
107 if name.count('mapping(') == 1 :
108 found = re.findall('mapping\(([a-zA-Z0-9\.]*) => ([a-zA-Z0-9\.\[\]]*)\)', name)
109 else:
110 found = re.findall('mapping\(([a-zA-Z0-9\.]*) => (mapping\([=> a-zA-Z0-9\.\[\]]*\))\)', name)
111 assert len(found) == 1
112 from_ = found[0][0]
113 to_ = found[0][1]
114
115 from_type = _find_from_type_name(from_, contract, contracts, structures, enums)
116 to_type = _find_from_type_name(to_, contract, contracts, structures, enums)
117
118 return MappingType(from_type, to_type)
119
120 if not var_type:
121 logger.error('Type not found '+str(name))
122 exit(-1)
123 return UserDefinedType(var_type)
124
125
126
127 def parse_type(t, caller_context):
128 # local import to avoid circular dependency
129 from slither.solc_parsing.expressions.expression_parsing import parse_expression
130 from slither.solc_parsing.variables.function_type_variable import FunctionTypeVariableSolc
131
132 if isinstance(caller_context, Contract):
133 contract = caller_context
134 elif isinstance(caller_context, Function):
135 contract = caller_context.contract
136 else:
137 logger.error('Incorrect caller context')
138 exit(-1)
139
140
141 is_compact_ast = caller_context.is_compact_ast
142
143 if is_compact_ast:
144 key = 'nodeType'
145 else:
146 key = 'name'
147
148 structures = contract.structures
149 enums = contract.enums
150 contracts = contract.slither.contracts
151
152 if isinstance(t, UnknownType):
153 return _find_from_type_name(t.name, contract, contracts, structures, enums)
154
155 elif t[key] == 'ElementaryTypeName':
156 if is_compact_ast:
157 return ElementaryType(t['name'])
158 return ElementaryType(t['attributes'][key])
159
160 elif t[key] == 'UserDefinedTypeName':
161 if is_compact_ast:
162 return _find_from_type_name(t['typeDescriptions']['typeString'], contract, contracts, structures, enums)
163 return _find_from_type_name(t['attributes'][key], contract, contracts, structures, enums)
164
165 elif t[key] == 'ArrayTypeName':
166 length = None
167 if is_compact_ast:
168 if t['length']:
169 length = parse_expression(t['length'], caller_context)
170 array_type = parse_type(t['baseType'], contract)
171 else:
172 if len(t['children']) == 2:
173 length = parse_expression(t['children'][1], caller_context)
174 else:
175 assert len(t['children']) == 1
176 array_type = parse_type(t['children'][0], contract)
177 return ArrayType(array_type, length)
178
179 elif t[key] == 'Mapping':
180
181 if is_compact_ast:
182 mappingFrom = parse_type(t['keyType'], contract)
183 mappingTo = parse_type(t['valueType'], contract)
184 else:
185 assert len(t['children']) == 2
186
187 mappingFrom = parse_type(t['children'][0], contract)
188 mappingTo = parse_type(t['children'][1], contract)
189
190 return MappingType(mappingFrom, mappingTo)
191
192 elif t[key] == 'FunctionTypeName':
193
194 if is_compact_ast:
195 params = t['parameterTypes']
196 return_values = t['returnParameterTypes']
197 index = 'parameters'
198 else:
199 assert len(t['children']) == 2
200 params = t['children'][0]
201 return_values = t['children'][1]
202 index = 'children'
203
204 assert params[key] == 'ParameterList'
205 assert return_values[key] == 'ParameterList'
206
207 params_vars = []
208 return_values_vars = []
209 for p in params[index]:
210 var = FunctionTypeVariableSolc(p)
211 var.set_offset(p['src'], caller_context.slither)
212 var.analyze(caller_context)
213 params_vars.append(var)
214 for p in return_values[index]:
215 var = FunctionTypeVariableSolc(p)
216
217 var.set_offset(p['src'], caller_context.slither)
218 var.analyze(caller_context)
219 return_values_vars.append(var)
220
221 return FunctionType(params_vars, return_values_vars)
222
223 logger.error('Type name not found '+str(t))
224 exit(-1)
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/slither/solc_parsing/solidity_types/type_parsing.py b/slither/solc_parsing/solidity_types/type_parsing.py
--- a/slither/solc_parsing/solidity_types/type_parsing.py
+++ b/slither/solc_parsing/solidity_types/type_parsing.py
@@ -160,7 +160,10 @@
elif t[key] == 'UserDefinedTypeName':
if is_compact_ast:
return _find_from_type_name(t['typeDescriptions']['typeString'], contract, contracts, structures, enums)
- return _find_from_type_name(t['attributes'][key], contract, contracts, structures, enums)
+
+ # Determine if we have a type node (otherwise we use the name node, as some older solc did not have 'type').
+ type_name_key = 'type' if 'type' in t['attributes'] else key
+ return _find_from_type_name(t['attributes'][type_name_key], contract, contracts, structures, enums)
elif t[key] == 'ArrayTypeName':
length = None
| {"golden_diff": "diff --git a/slither/solc_parsing/solidity_types/type_parsing.py b/slither/solc_parsing/solidity_types/type_parsing.py\n--- a/slither/solc_parsing/solidity_types/type_parsing.py\n+++ b/slither/solc_parsing/solidity_types/type_parsing.py\n@@ -160,7 +160,10 @@\n elif t[key] == 'UserDefinedTypeName':\n if is_compact_ast:\n return _find_from_type_name(t['typeDescriptions']['typeString'], contract, contracts, structures, enums)\n- return _find_from_type_name(t['attributes'][key], contract, contracts, structures, enums)\n+\n+ # Determine if we have a type node (otherwise we use the name node, as some older solc did not have 'type').\n+ type_name_key = 'type' if 'type' in t['attributes'] else key\n+ return _find_from_type_name(t['attributes'][type_name_key], contract, contracts, structures, enums)\n \n elif t[key] == 'ArrayTypeName':\n length = None\n", "issue": "Error when parsing type which refers to an aliased import\nThe following contracts will cause slither to error:\r\n\r\nMain.sol:\r\n```\r\nimport {Dependency as x} from \"./Dependency.sol\";\r\n\r\ncontract Main {\r\n x y;\r\n}\r\n```\r\n\r\nDependency.sol:\r\n```\r\ncontract Dependency {\r\n uint y;\r\n}\r\n```\r\n\r\nError:\r\n```\r\nERROR:TypeParsing:Type not found x\r\n```\n", "before_files": [{"content": "import logging\n\nfrom slither.core.solidity_types.elementary_type import ElementaryType, ElementaryTypeName\nfrom slither.core.solidity_types.user_defined_type import UserDefinedType\nfrom slither.core.solidity_types.array_type import ArrayType\nfrom slither.core.solidity_types.mapping_type import MappingType\nfrom slither.core.solidity_types.function_type import FunctionType\n\nfrom slither.core.variables.function_type_variable import FunctionTypeVariable\n\nfrom slither.core.declarations.contract import Contract\nfrom slither.core.declarations.function import Function\n\nfrom slither.core.expressions.literal import Literal\n\nimport re\n\nlogger = logging.getLogger('TypeParsing')\n\nclass UnknownType:\n def __init__(self, name):\n self._name = name\n\n @property\n def name(self):\n return self._name\n\ndef _find_from_type_name(name, contract, contracts, structures, enums):\n name_elementary = name.split(' ')[0]\n if '[' in name_elementary:\n name_elementary = name_elementary[0:name_elementary.find('[')]\n if name_elementary in ElementaryTypeName:\n depth = name.count('[')\n if depth:\n return ArrayType(ElementaryType(name_elementary), Literal(depth))\n else:\n return ElementaryType(name_elementary)\n # We first look for contract\n # To avoid collision \n # Ex: a structure with the name of a contract\n name_contract = name\n if name_contract.startswith('contract '):\n name_contract = name_contract[len('contract '):]\n if name_contract.startswith('library '):\n name_contract = name_contract[len('library '):]\n var_type = next((c for c in contracts if c.name == name_contract), None)\n\n if not var_type:\n var_type = next((st for st in structures if st.name == name), None)\n if not var_type:\n var_type = next((e for e in enums if e.name == name), None)\n if not var_type:\n # any contract can refer to another contract's enum\n enum_name = name\n if enum_name.startswith('enum '):\n enum_name = enum_name[len('enum '):]\n all_enums = [c.enums for c in contracts]\n all_enums = [item for sublist in all_enums for item in sublist]\n var_type = next((e for e in all_enums if e.name == enum_name), None)\n if not var_type:\n var_type = next((e for e in all_enums if e.contract.name+\".\"+e.name == enum_name), None)\n if not var_type:\n # any contract can refer to another contract's structure\n name_struct = name\n if name_struct.startswith('struct '):\n name_struct = name_struct[len('struct '):]\n name_struct = name_struct.split(' ')[0] # remove stuff like storage pointer at the end\n all_structures = [c.structures for c in contracts]\n all_structures = [item for sublist in all_structures for item in sublist]\n var_type = next((st for st in all_structures if st.name == name_struct), None)\n if not var_type:\n var_type = next((st for st in all_structures if st.contract.name+\".\"+st.name == name_struct), None)\n # case where struct xxx.xx[] where not well formed in the AST\n if not var_type:\n depth = 0\n while name_struct.endswith('[]'):\n name_struct = name_struct[0:-2]\n depth+=1\n var_type = next((st for st in all_structures if st.contract.name+\".\"+st.name == name_struct), None)\n if var_type:\n return ArrayType(UserDefinedType(var_type), Literal(depth))\n\n if not var_type:\n var_type = next((f for f in contract.functions if f.name == name), None)\n if not var_type:\n if name.startswith('function '):\n found = re.findall('function \\(([ ()a-zA-Z0-9\\.,]*)\\) returns \\(([a-zA-Z0-9\\.,]*)\\)', name)\n assert len(found) == 1\n params = found[0][0].split(',')\n return_values = found[0][1].split(',')\n params = [_find_from_type_name(p, contract, contracts, structures, enums) for p in params]\n return_values = [_find_from_type_name(r, contract, contracts, structures, enums) for r in return_values]\n params_vars = []\n return_vars = []\n for p in params:\n var = FunctionTypeVariable()\n var.set_type(p)\n params_vars.append(var)\n for r in return_values:\n var = FunctionTypeVariable()\n var.set_type(r)\n return_vars.append(var)\n return FunctionType(params_vars, return_vars)\n if not var_type:\n if name.startswith('mapping('):\n # nested mapping declared with var\n if name.count('mapping(') == 1 :\n found = re.findall('mapping\\(([a-zA-Z0-9\\.]*) => ([a-zA-Z0-9\\.\\[\\]]*)\\)', name)\n else:\n found = re.findall('mapping\\(([a-zA-Z0-9\\.]*) => (mapping\\([=> a-zA-Z0-9\\.\\[\\]]*\\))\\)', name)\n assert len(found) == 1\n from_ = found[0][0]\n to_ = found[0][1]\n \n from_type = _find_from_type_name(from_, contract, contracts, structures, enums)\n to_type = _find_from_type_name(to_, contract, contracts, structures, enums)\n\n return MappingType(from_type, to_type)\n\n if not var_type:\n logger.error('Type not found '+str(name))\n exit(-1)\n return UserDefinedType(var_type)\n\n\n\ndef parse_type(t, caller_context):\n # local import to avoid circular dependency \n from slither.solc_parsing.expressions.expression_parsing import parse_expression\n from slither.solc_parsing.variables.function_type_variable import FunctionTypeVariableSolc\n\n if isinstance(caller_context, Contract):\n contract = caller_context\n elif isinstance(caller_context, Function):\n contract = caller_context.contract\n else:\n logger.error('Incorrect caller context')\n exit(-1)\n\n\n is_compact_ast = caller_context.is_compact_ast\n\n if is_compact_ast:\n key = 'nodeType'\n else:\n key = 'name'\n\n structures = contract.structures\n enums = contract.enums\n contracts = contract.slither.contracts\n\n if isinstance(t, UnknownType):\n return _find_from_type_name(t.name, contract, contracts, structures, enums)\n\n elif t[key] == 'ElementaryTypeName':\n if is_compact_ast:\n return ElementaryType(t['name'])\n return ElementaryType(t['attributes'][key])\n\n elif t[key] == 'UserDefinedTypeName':\n if is_compact_ast:\n return _find_from_type_name(t['typeDescriptions']['typeString'], contract, contracts, structures, enums)\n return _find_from_type_name(t['attributes'][key], contract, contracts, structures, enums)\n\n elif t[key] == 'ArrayTypeName':\n length = None\n if is_compact_ast:\n if t['length']:\n length = parse_expression(t['length'], caller_context)\n array_type = parse_type(t['baseType'], contract)\n else:\n if len(t['children']) == 2:\n length = parse_expression(t['children'][1], caller_context)\n else:\n assert len(t['children']) == 1\n array_type = parse_type(t['children'][0], contract)\n return ArrayType(array_type, length)\n\n elif t[key] == 'Mapping':\n\n if is_compact_ast:\n mappingFrom = parse_type(t['keyType'], contract)\n mappingTo = parse_type(t['valueType'], contract)\n else:\n assert len(t['children']) == 2\n\n mappingFrom = parse_type(t['children'][0], contract)\n mappingTo = parse_type(t['children'][1], contract)\n\n return MappingType(mappingFrom, mappingTo)\n\n elif t[key] == 'FunctionTypeName':\n\n if is_compact_ast:\n params = t['parameterTypes']\n return_values = t['returnParameterTypes']\n index = 'parameters'\n else:\n assert len(t['children']) == 2\n params = t['children'][0]\n return_values = t['children'][1]\n index = 'children'\n\n assert params[key] == 'ParameterList'\n assert return_values[key] == 'ParameterList'\n\n params_vars = []\n return_values_vars = []\n for p in params[index]:\n var = FunctionTypeVariableSolc(p)\n var.set_offset(p['src'], caller_context.slither)\n var.analyze(caller_context)\n params_vars.append(var)\n for p in return_values[index]:\n var = FunctionTypeVariableSolc(p)\n\n var.set_offset(p['src'], caller_context.slither)\n var.analyze(caller_context)\n return_values_vars.append(var)\n\n return FunctionType(params_vars, return_values_vars)\n\n logger.error('Type name not found '+str(t))\n exit(-1)\n", "path": "slither/solc_parsing/solidity_types/type_parsing.py"}], "after_files": [{"content": "import logging\n\nfrom slither.core.solidity_types.elementary_type import ElementaryType, ElementaryTypeName\nfrom slither.core.solidity_types.user_defined_type import UserDefinedType\nfrom slither.core.solidity_types.array_type import ArrayType\nfrom slither.core.solidity_types.mapping_type import MappingType\nfrom slither.core.solidity_types.function_type import FunctionType\n\nfrom slither.core.variables.function_type_variable import FunctionTypeVariable\n\nfrom slither.core.declarations.contract import Contract\nfrom slither.core.declarations.function import Function\n\nfrom slither.core.expressions.literal import Literal\n\nimport re\n\nlogger = logging.getLogger('TypeParsing')\n\nclass UnknownType:\n def __init__(self, name):\n self._name = name\n\n @property\n def name(self):\n return self._name\n\ndef _find_from_type_name(name, contract, contracts, structures, enums):\n name_elementary = name.split(' ')[0]\n if '[' in name_elementary:\n name_elementary = name_elementary[0:name_elementary.find('[')]\n if name_elementary in ElementaryTypeName:\n depth = name.count('[')\n if depth:\n return ArrayType(ElementaryType(name_elementary), Literal(depth))\n else:\n return ElementaryType(name_elementary)\n # We first look for contract\n # To avoid collision \n # Ex: a structure with the name of a contract\n name_contract = name\n if name_contract.startswith('contract '):\n name_contract = name_contract[len('contract '):]\n if name_contract.startswith('library '):\n name_contract = name_contract[len('library '):]\n var_type = next((c for c in contracts if c.name == name_contract), None)\n\n if not var_type:\n var_type = next((st for st in structures if st.name == name), None)\n if not var_type:\n var_type = next((e for e in enums if e.name == name), None)\n if not var_type:\n # any contract can refer to another contract's enum\n enum_name = name\n if enum_name.startswith('enum '):\n enum_name = enum_name[len('enum '):]\n all_enums = [c.enums for c in contracts]\n all_enums = [item for sublist in all_enums for item in sublist]\n var_type = next((e for e in all_enums if e.name == enum_name), None)\n if not var_type:\n var_type = next((e for e in all_enums if e.contract.name+\".\"+e.name == enum_name), None)\n if not var_type:\n # any contract can refer to another contract's structure\n name_struct = name\n if name_struct.startswith('struct '):\n name_struct = name_struct[len('struct '):]\n name_struct = name_struct.split(' ')[0] # remove stuff like storage pointer at the end\n all_structures = [c.structures for c in contracts]\n all_structures = [item for sublist in all_structures for item in sublist]\n var_type = next((st for st in all_structures if st.name == name_struct), None)\n if not var_type:\n var_type = next((st for st in all_structures if st.contract.name+\".\"+st.name == name_struct), None)\n # case where struct xxx.xx[] where not well formed in the AST\n if not var_type:\n depth = 0\n while name_struct.endswith('[]'):\n name_struct = name_struct[0:-2]\n depth+=1\n var_type = next((st for st in all_structures if st.contract.name+\".\"+st.name == name_struct), None)\n if var_type:\n return ArrayType(UserDefinedType(var_type), Literal(depth))\n\n if not var_type:\n var_type = next((f for f in contract.functions if f.name == name), None)\n if not var_type:\n if name.startswith('function '):\n found = re.findall('function \\(([ ()a-zA-Z0-9\\.,]*)\\) returns \\(([a-zA-Z0-9\\.,]*)\\)', name)\n assert len(found) == 1\n params = found[0][0].split(',')\n return_values = found[0][1].split(',')\n params = [_find_from_type_name(p, contract, contracts, structures, enums) for p in params]\n return_values = [_find_from_type_name(r, contract, contracts, structures, enums) for r in return_values]\n params_vars = []\n return_vars = []\n for p in params:\n var = FunctionTypeVariable()\n var.set_type(p)\n params_vars.append(var)\n for r in return_values:\n var = FunctionTypeVariable()\n var.set_type(r)\n return_vars.append(var)\n return FunctionType(params_vars, return_vars)\n if not var_type:\n if name.startswith('mapping('):\n # nested mapping declared with var\n if name.count('mapping(') == 1 :\n found = re.findall('mapping\\(([a-zA-Z0-9\\.]*) => ([a-zA-Z0-9\\.\\[\\]]*)\\)', name)\n else:\n found = re.findall('mapping\\(([a-zA-Z0-9\\.]*) => (mapping\\([=> a-zA-Z0-9\\.\\[\\]]*\\))\\)', name)\n assert len(found) == 1\n from_ = found[0][0]\n to_ = found[0][1]\n \n from_type = _find_from_type_name(from_, contract, contracts, structures, enums)\n to_type = _find_from_type_name(to_, contract, contracts, structures, enums)\n\n return MappingType(from_type, to_type)\n\n if not var_type:\n logger.error('Type not found '+str(name))\n exit(-1)\n return UserDefinedType(var_type)\n\n\n\ndef parse_type(t, caller_context):\n # local import to avoid circular dependency \n from slither.solc_parsing.expressions.expression_parsing import parse_expression\n from slither.solc_parsing.variables.function_type_variable import FunctionTypeVariableSolc\n\n if isinstance(caller_context, Contract):\n contract = caller_context\n elif isinstance(caller_context, Function):\n contract = caller_context.contract\n else:\n logger.error('Incorrect caller context')\n exit(-1)\n\n\n is_compact_ast = caller_context.is_compact_ast\n\n if is_compact_ast:\n key = 'nodeType'\n else:\n key = 'name'\n\n structures = contract.structures\n enums = contract.enums\n contracts = contract.slither.contracts\n\n if isinstance(t, UnknownType):\n return _find_from_type_name(t.name, contract, contracts, structures, enums)\n\n elif t[key] == 'ElementaryTypeName':\n if is_compact_ast:\n return ElementaryType(t['name'])\n return ElementaryType(t['attributes'][key])\n\n elif t[key] == 'UserDefinedTypeName':\n if is_compact_ast:\n return _find_from_type_name(t['typeDescriptions']['typeString'], contract, contracts, structures, enums)\n\n # Determine if we have a type node (otherwise we use the name node, as some older solc did not have 'type').\n type_name_key = 'type' if 'type' in t['attributes'] else key\n return _find_from_type_name(t['attributes'][type_name_key], contract, contracts, structures, enums)\n\n elif t[key] == 'ArrayTypeName':\n length = None\n if is_compact_ast:\n if t['length']:\n length = parse_expression(t['length'], caller_context)\n array_type = parse_type(t['baseType'], contract)\n else:\n if len(t['children']) == 2:\n length = parse_expression(t['children'][1], caller_context)\n else:\n assert len(t['children']) == 1\n array_type = parse_type(t['children'][0], contract)\n return ArrayType(array_type, length)\n\n elif t[key] == 'Mapping':\n\n if is_compact_ast:\n mappingFrom = parse_type(t['keyType'], contract)\n mappingTo = parse_type(t['valueType'], contract)\n else:\n assert len(t['children']) == 2\n\n mappingFrom = parse_type(t['children'][0], contract)\n mappingTo = parse_type(t['children'][1], contract)\n\n return MappingType(mappingFrom, mappingTo)\n\n elif t[key] == 'FunctionTypeName':\n\n if is_compact_ast:\n params = t['parameterTypes']\n return_values = t['returnParameterTypes']\n index = 'parameters'\n else:\n assert len(t['children']) == 2\n params = t['children'][0]\n return_values = t['children'][1]\n index = 'children'\n\n assert params[key] == 'ParameterList'\n assert return_values[key] == 'ParameterList'\n\n params_vars = []\n return_values_vars = []\n for p in params[index]:\n var = FunctionTypeVariableSolc(p)\n var.set_offset(p['src'], caller_context.slither)\n var.analyze(caller_context)\n params_vars.append(var)\n for p in return_values[index]:\n var = FunctionTypeVariableSolc(p)\n\n var.set_offset(p['src'], caller_context.slither)\n var.analyze(caller_context)\n return_values_vars.append(var)\n\n return FunctionType(params_vars, return_values_vars)\n\n logger.error('Type name not found '+str(t))\n exit(-1)\n", "path": "slither/solc_parsing/solidity_types/type_parsing.py"}]} | 2,915 | 241 |
gh_patches_debug_2525 | rasdani/github-patches | git_diff | kornia__kornia-679 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enable typing support
## 🚀 Feature
Enable typing support for `kornia` so other packages can benefit from the type hints.
## Motivation
Currently `kornia` only uses the type hints to check for internal consistency. For other packages you get the following error message when running `mypy`:
```python
import kornia
```
```
error: Cannot find implementation or library stub for module named 'kornia' [import]
```
## Pitch
To enable typing for other packages that depend on `kornia` we just need to put an empty file called `py.typed` in the root of the `kornia` package. See [`PEP561`](https://www.python.org/dev/peps/pep-0561/#packaging-type-information) for details.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Welcome to the Kornia setup.py.
2 #
3
4 import os
5 from setuptools import setup, find_packages
6 import subprocess
7 import distutils.command.clean
8
9
10 ################
11 # The variables below define the current version under
12 # development and the current pytorch supported verions.
13 # WARNING: Becareful and do not touch those variables,
14 # unless you are a maintainer. Otherwise, could brake
15 # the package backward compatibility.
16
17 # NOTE(maintainers): modify this variable each time you do a release
18
19 version = '0.4.1' # this a tag for the current development version
20
21
22 # NOTE(maintainers): update this dictionary each time you do a release
23 # When multiple pytorch versions are associated with a single version of kornia,
24 # the oldest one is the requirement. The versions should be inequalities.
25 # Once a pytorch version (in the future) breaks a kornia version, we could just
26 # add a maximal version.
27 kornia_pt_dependencies = {
28 '0.4.0': '>=1.6.0,<1.7.0',
29 '0.3.2': '>=1.5.0,<1.6.0',
30 '0.3.1': '>=1.5.0',
31 '0.2.2': '>=1.4.0',
32 '0.1.4': '>=1.2.0',
33 }
34
35
36 # version can be overiden eg with KORNIA_BUILD_VERSION so we map each possible kornia version to the dictionary keys
37 def dep_version(version):
38 compatible_versions = [v for v in kornia_pt_dependencies.keys() if v >= version]
39 compatible_versions += [sorted(kornia_pt_dependencies)[-1]]
40 return min(compatible_versions)
41
42
43 #################################
44
45 sha = 'Unknown'
46 package_name = 'kornia'
47
48 cwd = os.path.dirname(os.path.abspath(__file__))
49
50 try:
51 sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
52 except Exception:
53 pass
54
55 if os.getenv('KORNIA_BUILD_VERSION'):
56 version = os.getenv('KORNIA_BUILD_VERSION')
57 elif sha != 'Unknown':
58 version += '+' + sha[:7]
59 print("Building wheel {}-{}".format(package_name, version))
60
61
62 def write_version_file():
63 version_path = os.path.join(cwd, 'kornia', 'version.py')
64 with open(version_path, 'w') as f:
65 f.write("__version__ = '{}'\n".format(version))
66 f.write("git_version = {}\n".format(repr(sha)))
67
68
69 def read(*names, **kwargs):
70 with io.open(
71 os.path.join(os.path.dirname(__file__), *names),
72 encoding=kwargs.get("encoding", "utf8")
73 ) as fp:
74 return fp.read()
75
76
77 # open readme file and remove logo
78 readme = open('README.rst').read()
79 long_description = '\n'.join(readme.split('\n')[7:])
80
81
82 class clean(distutils.command.clean.clean):
83 def run(self):
84 with open('.gitignore', 'r') as f:
85 ignores = f.read()
86 for wildcard in filter(None, ignores.split('\n')):
87 for filename in glob.glob(wildcard):
88 try:
89 os.remove(filename)
90 except OSError:
91 shutil.rmtree(filename, ignore_errors=True)
92
93 # It's an old-style class in Python 2.7...
94 distutils.command.clean.clean.run(self)
95 # remove compiled and temporary files
96 subprocess.call(['rm -rf dist/ build/ kornia.egg*'], shell=True)
97
98 requirements = [
99 'numpy',
100 'torch' + kornia_pt_dependencies[dep_version(version)],
101 ]
102
103
104 if __name__ == '__main__':
105 write_version_file()
106 setup(
107 # Metadata
108 name=package_name,
109 version=version,
110 author='Edgar Riba',
111 author_email='[email protected]',
112 url='https://github.com/kornia/kornia',
113 description='Open Source Differentiable Computer Vision Library for PyTorch',
114 long_description=long_description,
115 license='Apache License 2.0',
116 python_requires='>=3.6',
117
118 # Test
119 setup_requires=['pytest-runner'],
120 tests_require=['pytest'],
121
122 # Package info
123 packages=find_packages(exclude=('docs', 'test', 'examples',)),
124
125 zip_safe=True,
126 install_requires=requirements,
127 classifiers=[
128 'Intended Audience :: Developers',
129 'Intended Audience :: Education',
130 'Intended Audience :: Science/Research',
131 'Operating System :: POSIX :: Linux',
132 'Programming Language :: Python :: 3 :: Only',
133 'License :: OSI Approved :: Apache Software License',
134 'Topic :: Scientific/Engineering :: Image Recognition',
135 'Topic :: Software Development :: Libraries',
136 ],
137 )
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -122,6 +122,10 @@
# Package info
packages=find_packages(exclude=('docs', 'test', 'examples',)),
+ package_data={
+ "kornia": ["py.typed"],
+ },
+
zip_safe=True,
install_requires=requirements,
classifiers=[
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -122,6 +122,10 @@\n # Package info\n packages=find_packages(exclude=('docs', 'test', 'examples',)),\n \n+ package_data={\n+ \"kornia\": [\"py.typed\"],\n+ },\n+\n zip_safe=True,\n install_requires=requirements,\n classifiers=[\n", "issue": "Enable typing support\n## \ud83d\ude80 Feature\r\n\r\nEnable typing support for `kornia` so other packages can benefit from the type hints.\r\n\r\n## Motivation\r\n\r\nCurrently `kornia` only uses the type hints to check for internal consistency. For other packages you get the following error message when running `mypy`:\r\n\r\n```python\r\nimport kornia\r\n```\r\n\r\n```\r\nerror: Cannot find implementation or library stub for module named 'kornia' [import]\r\n```\r\n\r\n## Pitch\r\n\r\nTo enable typing for other packages that depend on `kornia` we just need to put an empty file called `py.typed` in the root of the `kornia` package. See [`PEP561`](https://www.python.org/dev/peps/pep-0561/#packaging-type-information) for details.\r\n\r\n\n", "before_files": [{"content": "# Welcome to the Kornia setup.py.\n#\n\nimport os\nfrom setuptools import setup, find_packages\nimport subprocess\nimport distutils.command.clean\n\n\n################\n# The variables below define the current version under\n# development and the current pytorch supported verions.\n# WARNING: Becareful and do not touch those variables,\n# unless you are a maintainer. Otherwise, could brake\n# the package backward compatibility.\n\n# NOTE(maintainers): modify this variable each time you do a release\n\nversion = '0.4.1' # this a tag for the current development version\n\n\n# NOTE(maintainers): update this dictionary each time you do a release\n# When multiple pytorch versions are associated with a single version of kornia,\n# the oldest one is the requirement. The versions should be inequalities.\n# Once a pytorch version (in the future) breaks a kornia version, we could just\n# add a maximal version.\nkornia_pt_dependencies = {\n '0.4.0': '>=1.6.0,<1.7.0',\n '0.3.2': '>=1.5.0,<1.6.0',\n '0.3.1': '>=1.5.0',\n '0.2.2': '>=1.4.0',\n '0.1.4': '>=1.2.0',\n}\n\n\n# version can be overiden eg with KORNIA_BUILD_VERSION so we map each possible kornia version to the dictionary keys\ndef dep_version(version):\n compatible_versions = [v for v in kornia_pt_dependencies.keys() if v >= version]\n compatible_versions += [sorted(kornia_pt_dependencies)[-1]]\n return min(compatible_versions)\n\n\n#################################\n\nsha = 'Unknown'\npackage_name = 'kornia'\n\ncwd = os.path.dirname(os.path.abspath(__file__))\n\ntry:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()\nexcept Exception:\n pass\n\nif os.getenv('KORNIA_BUILD_VERSION'):\n version = os.getenv('KORNIA_BUILD_VERSION')\nelif sha != 'Unknown':\n version += '+' + sha[:7]\nprint(\"Building wheel {}-{}\".format(package_name, version))\n\n\ndef write_version_file():\n version_path = os.path.join(cwd, 'kornia', 'version.py')\n with open(version_path, 'w') as f:\n f.write(\"__version__ = '{}'\\n\".format(version))\n f.write(\"git_version = {}\\n\".format(repr(sha)))\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")\n ) as fp:\n return fp.read()\n\n\n# open readme file and remove logo\nreadme = open('README.rst').read()\nlong_description = '\\n'.join(readme.split('\\n')[7:])\n\n\nclass clean(distutils.command.clean.clean):\n def run(self):\n with open('.gitignore', 'r') as f:\n ignores = f.read()\n for wildcard in filter(None, ignores.split('\\n')):\n for filename in glob.glob(wildcard):\n try:\n os.remove(filename)\n except OSError:\n shutil.rmtree(filename, ignore_errors=True)\n\n # It's an old-style class in Python 2.7...\n distutils.command.clean.clean.run(self)\n # remove compiled and temporary files\n subprocess.call(['rm -rf dist/ build/ kornia.egg*'], shell=True)\n\nrequirements = [\n 'numpy',\n 'torch' + kornia_pt_dependencies[dep_version(version)],\n]\n\n\nif __name__ == '__main__':\n write_version_file()\n setup(\n # Metadata\n name=package_name,\n version=version,\n author='Edgar Riba',\n author_email='[email protected]',\n url='https://github.com/kornia/kornia',\n description='Open Source Differentiable Computer Vision Library for PyTorch',\n long_description=long_description,\n license='Apache License 2.0',\n python_requires='>=3.6',\n\n # Test\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n\n # Package info\n packages=find_packages(exclude=('docs', 'test', 'examples',)),\n\n zip_safe=True,\n install_requires=requirements,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3 :: Only',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Software Development :: Libraries',\n ],\n )\n", "path": "setup.py"}], "after_files": [{"content": "# Welcome to the Kornia setup.py.\n#\n\nimport os\nfrom setuptools import setup, find_packages\nimport subprocess\nimport distutils.command.clean\n\n\n################\n# The variables below define the current version under\n# development and the current pytorch supported verions.\n# WARNING: Becareful and do not touch those variables,\n# unless you are a maintainer. Otherwise, could brake\n# the package backward compatibility.\n\n# NOTE(maintainers): modify this variable each time you do a release\n\nversion = '0.4.1' # this a tag for the current development version\n\n\n# NOTE(maintainers): update this dictionary each time you do a release\n# When multiple pytorch versions are associated with a single version of kornia,\n# the oldest one is the requirement. The versions should be inequalities.\n# Once a pytorch version (in the future) breaks a kornia version, we could just\n# add a maximal version.\nkornia_pt_dependencies = {\n '0.4.0': '>=1.6.0,<1.7.0',\n '0.3.2': '>=1.5.0,<1.6.0',\n '0.3.1': '>=1.5.0',\n '0.2.2': '>=1.4.0',\n '0.1.4': '>=1.2.0',\n}\n\n\n# version can be overiden eg with KORNIA_BUILD_VERSION so we map each possible kornia version to the dictionary keys\ndef dep_version(version):\n compatible_versions = [v for v in kornia_pt_dependencies.keys() if v >= version]\n compatible_versions += [sorted(kornia_pt_dependencies)[-1]]\n return min(compatible_versions)\n\n\n#################################\n\nsha = 'Unknown'\npackage_name = 'kornia'\n\ncwd = os.path.dirname(os.path.abspath(__file__))\n\ntry:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()\nexcept Exception:\n pass\n\nif os.getenv('KORNIA_BUILD_VERSION'):\n version = os.getenv('KORNIA_BUILD_VERSION')\nelif sha != 'Unknown':\n version += '+' + sha[:7]\nprint(\"Building wheel {}-{}\".format(package_name, version))\n\n\ndef write_version_file():\n version_path = os.path.join(cwd, 'kornia', 'version.py')\n with open(version_path, 'w') as f:\n f.write(\"__version__ = '{}'\\n\".format(version))\n f.write(\"git_version = {}\\n\".format(repr(sha)))\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")\n ) as fp:\n return fp.read()\n\n\n# open readme file and remove logo\nreadme = open('README.rst').read()\nlong_description = '\\n'.join(readme.split('\\n')[7:])\n\n\nclass clean(distutils.command.clean.clean):\n def run(self):\n with open('.gitignore', 'r') as f:\n ignores = f.read()\n for wildcard in filter(None, ignores.split('\\n')):\n for filename in glob.glob(wildcard):\n try:\n os.remove(filename)\n except OSError:\n shutil.rmtree(filename, ignore_errors=True)\n\n # It's an old-style class in Python 2.7...\n distutils.command.clean.clean.run(self)\n # remove compiled and temporary files\n subprocess.call(['rm -rf dist/ build/ kornia.egg*'], shell=True)\n\nrequirements = [\n 'numpy',\n 'torch' + kornia_pt_dependencies[dep_version(version)],\n]\n\n\nif __name__ == '__main__':\n write_version_file()\n setup(\n # Metadata\n name=package_name,\n version=version,\n author='Edgar Riba',\n author_email='[email protected]',\n url='https://github.com/kornia/kornia',\n description='Open Source Differentiable Computer Vision Library for PyTorch',\n long_description=long_description,\n license='Apache License 2.0',\n python_requires='>=3.6',\n\n # Test\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n\n # Package info\n packages=find_packages(exclude=('docs', 'test', 'examples',)),\n\n package_data={\n \"kornia\": [\"py.typed\"],\n },\n\n zip_safe=True,\n install_requires=requirements,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3 :: Only',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Software Development :: Libraries',\n ],\n )\n", "path": "setup.py"}]} | 1,789 | 92 |
gh_patches_debug_5960 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-402 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
${aws:username} now fails
*cfn-lint version: 0.8
E1029 Found an embedded parameter outside of an "Fn::Sub" at Resources/ForceMFAPolicy/Properties/PolicyDocument/Statement/3/Resource/0/arn:aws:iam::*:mfa/${aws:username}
It looks like the IAM variables are now being detected as embedded parameters in cfn-lint.
Effect: "Allow"
Action:
- "iam:UploadSSHPublicKey"
Resource: "arn:aws:iam::*:user/${aws:username}"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/functions/SubNeeded.py`
Content:
```
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import re
18 from cfnlint import CloudFormationLintRule
19 from cfnlint import RuleMatch
20
21 class SubNeeded(CloudFormationLintRule):
22 """Check if a substitution string exists without a substitution function"""
23 id = 'E1029'
24 shortdesc = 'Sub is required if a variable is used in a string'
25 description = 'If a substitution variable exists in a string but isn\'t wrapped with the Fn::Sub function the deployment will fail.'
26 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
27 tags = ['functions', 'sub']
28
29 # Free-form text properties to exclude from this rule
30 excludes = ['UserData', 'ZipFile']
31
32 def _match_values(self, searchRegex, cfnelem, path):
33 """Recursively search for values matching the searchRegex"""
34 values = []
35 if isinstance(cfnelem, dict):
36 for key in cfnelem:
37 pathprop = path[:]
38 pathprop.append(key)
39 values.extend(self._match_values(searchRegex, cfnelem[key], pathprop))
40 elif isinstance(cfnelem, list):
41 for index, item in enumerate(cfnelem):
42 pathprop = path[:]
43 pathprop.append(index)
44 values.extend(self._match_values(searchRegex, item, pathprop))
45 else:
46 # Leaf node
47 if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem):
48 values.append(path + [cfnelem])
49
50 return values
51
52 def match_values(self, searchRegex, cfn):
53 """
54 Search for values in all parts of the templates that match the searchRegex
55 """
56 results = []
57 results.extend(self._match_values(searchRegex, cfn.template, []))
58 # Globals are removed during a transform. They need to be checked manually
59 results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), []))
60 return results
61
62 def match(self, cfn):
63 """Basic Rule Matching"""
64
65 matches = []
66
67 # Generic regex to match a string containing at least one ${parameter}
68 parameter_search = re.compile(r'^(.*\$\{.*\}.*(\$\{.*\}.*)*)$')
69
70 # Get a list of paths to every leaf node string containing at least one ${parameter}
71 parameter_string_paths = self.match_values(parameter_search, cfn)
72
73 # We want to search all of the paths to check if each one contains an 'Fn::Sub'
74 for parameter_string_path in parameter_string_paths:
75 found_sub = False
76
77 # Does the path contain an 'Fn::Sub'?
78 for step in parameter_string_path:
79 if step == 'Fn::Sub' or step in self.excludes:
80 found_sub = True
81
82 # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly
83 if not found_sub:
84 message = 'Found an embedded parameter outside of an "Fn::Sub" at {}'.format('/'.join(map(str, parameter_string_path)))
85 matches.append(RuleMatch(parameter_string_path, message))
86
87 return matches
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py
--- a/src/cfnlint/rules/functions/SubNeeded.py
+++ b/src/cfnlint/rules/functions/SubNeeded.py
@@ -27,7 +27,7 @@
tags = ['functions', 'sub']
# Free-form text properties to exclude from this rule
- excludes = ['UserData', 'ZipFile']
+ excludes = ['UserData', 'ZipFile', 'Resource', 'Condition']
def _match_values(self, searchRegex, cfnelem, path):
"""Recursively search for values matching the searchRegex"""
| {"golden_diff": "diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py\n--- a/src/cfnlint/rules/functions/SubNeeded.py\n+++ b/src/cfnlint/rules/functions/SubNeeded.py\n@@ -27,7 +27,7 @@\n tags = ['functions', 'sub']\n \n # Free-form text properties to exclude from this rule\n- excludes = ['UserData', 'ZipFile']\n+ excludes = ['UserData', 'ZipFile', 'Resource', 'Condition']\n \n def _match_values(self, searchRegex, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n", "issue": "${aws:username} now fails\n*cfn-lint version: 0.8\r\n\r\nE1029 Found an embedded parameter outside of an \"Fn::Sub\" at Resources/ForceMFAPolicy/Properties/PolicyDocument/Statement/3/Resource/0/arn:aws:iam::*:mfa/${aws:username}\r\n\r\nIt looks like the IAM variables are now being detected as embedded parameters in cfn-lint.\r\n\r\n Effect: \"Allow\"\r\n Action:\r\n - \"iam:UploadSSHPublicKey\"\r\n Resource: \"arn:aws:iam::*:user/${aws:username}\"\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n # Free-form text properties to exclude from this rule\n excludes = ['UserData', 'ZipFile']\n\n def _match_values(self, searchRegex, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(searchRegex, cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(searchRegex, item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem):\n values.append(path + [cfnelem])\n\n return values\n\n def match_values(self, searchRegex, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(searchRegex, cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), []))\n return results\n\n def match(self, cfn):\n \"\"\"Basic Rule Matching\"\"\"\n\n matches = []\n\n # Generic regex to match a string containing at least one ${parameter}\n parameter_search = re.compile(r'^(.*\\$\\{.*\\}.*(\\$\\{.*\\}.*)*)$')\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(parameter_search, cfn)\n\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n found_sub = False\n\n # Does the path contain an 'Fn::Sub'?\n for step in parameter_string_path:\n if step == 'Fn::Sub' or step in self.excludes:\n found_sub = True\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not found_sub:\n message = 'Found an embedded parameter outside of an \"Fn::Sub\" at {}'.format('/'.join(map(str, parameter_string_path)))\n matches.append(RuleMatch(parameter_string_path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n # Free-form text properties to exclude from this rule\n excludes = ['UserData', 'ZipFile', 'Resource', 'Condition']\n\n def _match_values(self, searchRegex, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(searchRegex, cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(searchRegex, item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem):\n values.append(path + [cfnelem])\n\n return values\n\n def match_values(self, searchRegex, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(searchRegex, cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), []))\n return results\n\n def match(self, cfn):\n \"\"\"Basic Rule Matching\"\"\"\n\n matches = []\n\n # Generic regex to match a string containing at least one ${parameter}\n parameter_search = re.compile(r'^(.*\\$\\{.*\\}.*(\\$\\{.*\\}.*)*)$')\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(parameter_search, cfn)\n\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n found_sub = False\n\n # Does the path contain an 'Fn::Sub'?\n for step in parameter_string_path:\n if step == 'Fn::Sub' or step in self.excludes:\n found_sub = True\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not found_sub:\n message = 'Found an embedded parameter outside of an \"Fn::Sub\" at {}'.format('/'.join(map(str, parameter_string_path)))\n matches.append(RuleMatch(parameter_string_path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}]} | 1,445 | 139 |
gh_patches_debug_13558 | rasdani/github-patches | git_diff | pypa__pip-11779 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip 23.0: `pip list` fails with error "ValueError: not enough values to unpack" for archives installed from URL
### Description
When a project contains installed archives installed by specifying URL (e.g. spaCy models), `pip list` does not display the list of installed packages, because it crashes.
Probable cause: https://github.com/pypa/pip/commit/38681f3d6669754c7e919f0eb051b12931bfb0f2
### Expected behavior
_No response_
### pip version
pip 23.0
### Python version
Python 3.8.16
### OS
Ubuntu 20.04.5 LTS (Focal Fossa)
### How to Reproduce
```
pyenv virtualenv 3.8.16 pip-issue
pyenv local pip-issue
```
```
cat << EOF > ./pyproject.toml
[tool.poetry]
name = "pip_issue"
version = "0.0.1"
authors = [ "Foo Bar <[email protected]>" ]
description = "pip issue"
[tool.poetry.dependencies]
python = ">=3.8, <3.11"
en_core_web_sm = { url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0.tar.gz" }
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
EOF
```
```
pip install --upgrade poetry
poetry install
```
```
# ✅ previous version works fine
pip list
```
```
# 💥 latest version crashes
python3.8 -m pip install --upgrade pip
pip list
```
### Output
```
$ pip list
ERROR: Exception:
Traceback (most recent call last):
File "/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/cli/base_command.py", line 160, in exc_logging_wrapper
status = run_func(*args)
File "/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/commands/list.py", line 192, in run
self.output_package_listing(packages, options)
File "/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/commands/list.py", line 273, in output_package_listing
data, header = format_for_columns(packages, options)
File "/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/commands/list.py", line 316, in format_for_columns
has_editables = any(x.editable for x in pkgs)
File "/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/commands/list.py", line 316, in <genexpr>
has_editables = any(x.editable for x in pkgs)
File "/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/metadata/base.py", line 338, in editable
return bool(self.editable_project_location)
File "/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/metadata/base.py", line 176, in editable_project_location
direct_url = self.direct_url
File "/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/metadata/base.py", line 306, in direct_url
return DirectUrl.from_json(content)
File "/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/models/direct_url.py", line 217, in from_json
return cls.from_dict(json.loads(s))
File "/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/models/direct_url.py", line 200, in from_dict
ArchiveInfo._from_dict(_get(d, dict, "archive_info")),
File "/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/models/direct_url.py", line 124, in _from_dict
return cls(hash=_get(d, str, "hash"), hashes=_get(d, dict, "hashes"))
File "/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/models/direct_url.py", line 111, in __init__
hash_name, hash_value = hash.split("=", 1)
ValueError: not enough values to unpack (expected 2, got 1)
```
### Code of Conduct
- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/models/direct_url.py`
Content:
```
1 """ PEP 610 """
2 import json
3 import re
4 import urllib.parse
5 from typing import Any, Dict, Iterable, Optional, Type, TypeVar, Union
6
7 __all__ = [
8 "DirectUrl",
9 "DirectUrlValidationError",
10 "DirInfo",
11 "ArchiveInfo",
12 "VcsInfo",
13 ]
14
15 T = TypeVar("T")
16
17 DIRECT_URL_METADATA_NAME = "direct_url.json"
18 ENV_VAR_RE = re.compile(r"^\$\{[A-Za-z0-9-_]+\}(:\$\{[A-Za-z0-9-_]+\})?$")
19
20
21 class DirectUrlValidationError(Exception):
22 pass
23
24
25 def _get(
26 d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None
27 ) -> Optional[T]:
28 """Get value from dictionary and verify expected type."""
29 if key not in d:
30 return default
31 value = d[key]
32 if not isinstance(value, expected_type):
33 raise DirectUrlValidationError(
34 "{!r} has unexpected type for {} (expected {})".format(
35 value, key, expected_type
36 )
37 )
38 return value
39
40
41 def _get_required(
42 d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None
43 ) -> T:
44 value = _get(d, expected_type, key, default)
45 if value is None:
46 raise DirectUrlValidationError(f"{key} must have a value")
47 return value
48
49
50 def _exactly_one_of(infos: Iterable[Optional["InfoType"]]) -> "InfoType":
51 infos = [info for info in infos if info is not None]
52 if not infos:
53 raise DirectUrlValidationError(
54 "missing one of archive_info, dir_info, vcs_info"
55 )
56 if len(infos) > 1:
57 raise DirectUrlValidationError(
58 "more than one of archive_info, dir_info, vcs_info"
59 )
60 assert infos[0] is not None
61 return infos[0]
62
63
64 def _filter_none(**kwargs: Any) -> Dict[str, Any]:
65 """Make dict excluding None values."""
66 return {k: v for k, v in kwargs.items() if v is not None}
67
68
69 class VcsInfo:
70 name = "vcs_info"
71
72 def __init__(
73 self,
74 vcs: str,
75 commit_id: str,
76 requested_revision: Optional[str] = None,
77 ) -> None:
78 self.vcs = vcs
79 self.requested_revision = requested_revision
80 self.commit_id = commit_id
81
82 @classmethod
83 def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["VcsInfo"]:
84 if d is None:
85 return None
86 return cls(
87 vcs=_get_required(d, str, "vcs"),
88 commit_id=_get_required(d, str, "commit_id"),
89 requested_revision=_get(d, str, "requested_revision"),
90 )
91
92 def _to_dict(self) -> Dict[str, Any]:
93 return _filter_none(
94 vcs=self.vcs,
95 requested_revision=self.requested_revision,
96 commit_id=self.commit_id,
97 )
98
99
100 class ArchiveInfo:
101 name = "archive_info"
102
103 def __init__(
104 self,
105 hash: Optional[str] = None,
106 hashes: Optional[Dict[str, str]] = None,
107 ) -> None:
108 if hash is not None:
109 # Auto-populate the hashes key to upgrade to the new format automatically.
110 # We don't back-populate the legacy hash key.
111 hash_name, hash_value = hash.split("=", 1)
112 if hashes is None:
113 hashes = {hash_name: hash_value}
114 elif hash_name not in hash:
115 hashes = hashes.copy()
116 hashes[hash_name] = hash_value
117 self.hash = hash
118 self.hashes = hashes
119
120 @classmethod
121 def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["ArchiveInfo"]:
122 if d is None:
123 return None
124 return cls(hash=_get(d, str, "hash"), hashes=_get(d, dict, "hashes"))
125
126 def _to_dict(self) -> Dict[str, Any]:
127 return _filter_none(hash=self.hash, hashes=self.hashes)
128
129
130 class DirInfo:
131 name = "dir_info"
132
133 def __init__(
134 self,
135 editable: bool = False,
136 ) -> None:
137 self.editable = editable
138
139 @classmethod
140 def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["DirInfo"]:
141 if d is None:
142 return None
143 return cls(editable=_get_required(d, bool, "editable", default=False))
144
145 def _to_dict(self) -> Dict[str, Any]:
146 return _filter_none(editable=self.editable or None)
147
148
149 InfoType = Union[ArchiveInfo, DirInfo, VcsInfo]
150
151
152 class DirectUrl:
153 def __init__(
154 self,
155 url: str,
156 info: InfoType,
157 subdirectory: Optional[str] = None,
158 ) -> None:
159 self.url = url
160 self.info = info
161 self.subdirectory = subdirectory
162
163 def _remove_auth_from_netloc(self, netloc: str) -> str:
164 if "@" not in netloc:
165 return netloc
166 user_pass, netloc_no_user_pass = netloc.split("@", 1)
167 if (
168 isinstance(self.info, VcsInfo)
169 and self.info.vcs == "git"
170 and user_pass == "git"
171 ):
172 return netloc
173 if ENV_VAR_RE.match(user_pass):
174 return netloc
175 return netloc_no_user_pass
176
177 @property
178 def redacted_url(self) -> str:
179 """url with user:password part removed unless it is formed with
180 environment variables as specified in PEP 610, or it is ``git``
181 in the case of a git URL.
182 """
183 purl = urllib.parse.urlsplit(self.url)
184 netloc = self._remove_auth_from_netloc(purl.netloc)
185 surl = urllib.parse.urlunsplit(
186 (purl.scheme, netloc, purl.path, purl.query, purl.fragment)
187 )
188 return surl
189
190 def validate(self) -> None:
191 self.from_dict(self.to_dict())
192
193 @classmethod
194 def from_dict(cls, d: Dict[str, Any]) -> "DirectUrl":
195 return DirectUrl(
196 url=_get_required(d, str, "url"),
197 subdirectory=_get(d, str, "subdirectory"),
198 info=_exactly_one_of(
199 [
200 ArchiveInfo._from_dict(_get(d, dict, "archive_info")),
201 DirInfo._from_dict(_get(d, dict, "dir_info")),
202 VcsInfo._from_dict(_get(d, dict, "vcs_info")),
203 ]
204 ),
205 )
206
207 def to_dict(self) -> Dict[str, Any]:
208 res = _filter_none(
209 url=self.redacted_url,
210 subdirectory=self.subdirectory,
211 )
212 res[self.info.name] = self.info._to_dict()
213 return res
214
215 @classmethod
216 def from_json(cls, s: str) -> "DirectUrl":
217 return cls.from_dict(json.loads(s))
218
219 def to_json(self) -> str:
220 return json.dumps(self.to_dict(), sort_keys=True)
221
222 def is_local_editable(self) -> bool:
223 return isinstance(self.info, DirInfo) and self.info.editable
224
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/models/direct_url.py b/src/pip/_internal/models/direct_url.py
--- a/src/pip/_internal/models/direct_url.py
+++ b/src/pip/_internal/models/direct_url.py
@@ -108,7 +108,12 @@
if hash is not None:
# Auto-populate the hashes key to upgrade to the new format automatically.
# We don't back-populate the legacy hash key.
- hash_name, hash_value = hash.split("=", 1)
+ try:
+ hash_name, hash_value = hash.split("=", 1)
+ except ValueError:
+ raise DirectUrlValidationError(
+ f"invalid archive_info.hash format: {hash!r}"
+ )
if hashes is None:
hashes = {hash_name: hash_value}
elif hash_name not in hash:
| {"golden_diff": "diff --git a/src/pip/_internal/models/direct_url.py b/src/pip/_internal/models/direct_url.py\n--- a/src/pip/_internal/models/direct_url.py\n+++ b/src/pip/_internal/models/direct_url.py\n@@ -108,7 +108,12 @@\n if hash is not None:\n # Auto-populate the hashes key to upgrade to the new format automatically.\n # We don't back-populate the legacy hash key.\n- hash_name, hash_value = hash.split(\"=\", 1)\n+ try:\n+ hash_name, hash_value = hash.split(\"=\", 1)\n+ except ValueError:\n+ raise DirectUrlValidationError(\n+ f\"invalid archive_info.hash format: {hash!r}\"\n+ )\n if hashes is None:\n hashes = {hash_name: hash_value}\n elif hash_name not in hash:\n", "issue": "pip 23.0: `pip list` fails with error \"ValueError: not enough values to unpack\" for archives installed from URL\n### Description\r\n\r\nWhen a project contains installed archives installed by specifying URL (e.g. spaCy models), `pip list` does not display the list of installed packages, because it crashes.\r\n\r\nProbable cause: https://github.com/pypa/pip/commit/38681f3d6669754c7e919f0eb051b12931bfb0f2\r\n\r\n### Expected behavior\r\n\r\n_No response_\r\n\r\n### pip version\r\n\r\npip 23.0\r\n\r\n### Python version\r\n\r\nPython 3.8.16\r\n\r\n### OS\r\n\r\nUbuntu 20.04.5 LTS (Focal Fossa)\r\n\r\n### How to Reproduce\r\n\r\n```\r\npyenv virtualenv 3.8.16 pip-issue\r\npyenv local pip-issue\r\n```\r\n\r\n```\r\ncat << EOF > ./pyproject.toml\r\n[tool.poetry]\r\nname = \"pip_issue\"\r\nversion = \"0.0.1\"\r\nauthors = [ \"Foo Bar <[email protected]>\" ]\r\ndescription = \"pip issue\"\r\n\r\n[tool.poetry.dependencies]\r\npython = \">=3.8, <3.11\"\r\nen_core_web_sm = { url = \"https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0.tar.gz\" }\r\n\r\n[build-system]\r\nrequires = [\"poetry-core>=1.0.0\"]\r\nbuild-backend = \"poetry.core.masonry.api\"\r\nEOF\r\n```\r\n\r\n```\r\npip install --upgrade poetry\r\npoetry install\r\n```\r\n\r\n```\r\n# \u2705 previous version works fine\r\npip list \r\n```\r\n\r\n```\r\n# \ud83d\udca5 latest version crashes\r\npython3.8 -m pip install --upgrade pip\r\npip list\r\n```\r\n\r\n### Output\r\n\r\n```\r\n$ pip list \r\nERROR: Exception:\r\nTraceback (most recent call last):\r\n File \"/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/cli/base_command.py\", line 160, in exc_logging_wrapper\r\n status = run_func(*args)\r\n File \"/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/commands/list.py\", line 192, in run\r\n self.output_package_listing(packages, options)\r\n File \"/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/commands/list.py\", line 273, in output_package_listing\r\n data, header = format_for_columns(packages, options)\r\n File \"/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/commands/list.py\", line 316, in format_for_columns\r\n has_editables = any(x.editable for x in pkgs)\r\n File \"/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/commands/list.py\", line 316, in <genexpr>\r\n has_editables = any(x.editable for x in pkgs)\r\n File \"/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/metadata/base.py\", line 338, in editable\r\n return bool(self.editable_project_location)\r\n File \"/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/metadata/base.py\", line 176, in editable_project_location\r\n direct_url = self.direct_url\r\n File \"/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/metadata/base.py\", line 306, in direct_url\r\n return DirectUrl.from_json(content)\r\n File \"/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/models/direct_url.py\", line 217, in from_json\r\n return cls.from_dict(json.loads(s))\r\n File \"/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/models/direct_url.py\", line 200, in from_dict\r\n ArchiveInfo._from_dict(_get(d, dict, \"archive_info\")),\r\n File \"/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/models/direct_url.py\", line 124, in _from_dict\r\n return cls(hash=_get(d, str, \"hash\"), hashes=_get(d, dict, \"hashes\"))\r\n File \"/home/users/foo/.pyenv/versions/pip-issue/lib/python3.8/site-packages/pip/_internal/models/direct_url.py\", line 111, in __init__\r\n hash_name, hash_value = hash.split(\"=\", 1)\r\nValueError: not enough values to unpack (expected 2, got 1)\r\n```\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).\n", "before_files": [{"content": "\"\"\" PEP 610 \"\"\"\nimport json\nimport re\nimport urllib.parse\nfrom typing import Any, Dict, Iterable, Optional, Type, TypeVar, Union\n\n__all__ = [\n \"DirectUrl\",\n \"DirectUrlValidationError\",\n \"DirInfo\",\n \"ArchiveInfo\",\n \"VcsInfo\",\n]\n\nT = TypeVar(\"T\")\n\nDIRECT_URL_METADATA_NAME = \"direct_url.json\"\nENV_VAR_RE = re.compile(r\"^\\$\\{[A-Za-z0-9-_]+\\}(:\\$\\{[A-Za-z0-9-_]+\\})?$\")\n\n\nclass DirectUrlValidationError(Exception):\n pass\n\n\ndef _get(\n d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None\n) -> Optional[T]:\n \"\"\"Get value from dictionary and verify expected type.\"\"\"\n if key not in d:\n return default\n value = d[key]\n if not isinstance(value, expected_type):\n raise DirectUrlValidationError(\n \"{!r} has unexpected type for {} (expected {})\".format(\n value, key, expected_type\n )\n )\n return value\n\n\ndef _get_required(\n d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None\n) -> T:\n value = _get(d, expected_type, key, default)\n if value is None:\n raise DirectUrlValidationError(f\"{key} must have a value\")\n return value\n\n\ndef _exactly_one_of(infos: Iterable[Optional[\"InfoType\"]]) -> \"InfoType\":\n infos = [info for info in infos if info is not None]\n if not infos:\n raise DirectUrlValidationError(\n \"missing one of archive_info, dir_info, vcs_info\"\n )\n if len(infos) > 1:\n raise DirectUrlValidationError(\n \"more than one of archive_info, dir_info, vcs_info\"\n )\n assert infos[0] is not None\n return infos[0]\n\n\ndef _filter_none(**kwargs: Any) -> Dict[str, Any]:\n \"\"\"Make dict excluding None values.\"\"\"\n return {k: v for k, v in kwargs.items() if v is not None}\n\n\nclass VcsInfo:\n name = \"vcs_info\"\n\n def __init__(\n self,\n vcs: str,\n commit_id: str,\n requested_revision: Optional[str] = None,\n ) -> None:\n self.vcs = vcs\n self.requested_revision = requested_revision\n self.commit_id = commit_id\n\n @classmethod\n def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional[\"VcsInfo\"]:\n if d is None:\n return None\n return cls(\n vcs=_get_required(d, str, \"vcs\"),\n commit_id=_get_required(d, str, \"commit_id\"),\n requested_revision=_get(d, str, \"requested_revision\"),\n )\n\n def _to_dict(self) -> Dict[str, Any]:\n return _filter_none(\n vcs=self.vcs,\n requested_revision=self.requested_revision,\n commit_id=self.commit_id,\n )\n\n\nclass ArchiveInfo:\n name = \"archive_info\"\n\n def __init__(\n self,\n hash: Optional[str] = None,\n hashes: Optional[Dict[str, str]] = None,\n ) -> None:\n if hash is not None:\n # Auto-populate the hashes key to upgrade to the new format automatically.\n # We don't back-populate the legacy hash key.\n hash_name, hash_value = hash.split(\"=\", 1)\n if hashes is None:\n hashes = {hash_name: hash_value}\n elif hash_name not in hash:\n hashes = hashes.copy()\n hashes[hash_name] = hash_value\n self.hash = hash\n self.hashes = hashes\n\n @classmethod\n def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional[\"ArchiveInfo\"]:\n if d is None:\n return None\n return cls(hash=_get(d, str, \"hash\"), hashes=_get(d, dict, \"hashes\"))\n\n def _to_dict(self) -> Dict[str, Any]:\n return _filter_none(hash=self.hash, hashes=self.hashes)\n\n\nclass DirInfo:\n name = \"dir_info\"\n\n def __init__(\n self,\n editable: bool = False,\n ) -> None:\n self.editable = editable\n\n @classmethod\n def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional[\"DirInfo\"]:\n if d is None:\n return None\n return cls(editable=_get_required(d, bool, \"editable\", default=False))\n\n def _to_dict(self) -> Dict[str, Any]:\n return _filter_none(editable=self.editable or None)\n\n\nInfoType = Union[ArchiveInfo, DirInfo, VcsInfo]\n\n\nclass DirectUrl:\n def __init__(\n self,\n url: str,\n info: InfoType,\n subdirectory: Optional[str] = None,\n ) -> None:\n self.url = url\n self.info = info\n self.subdirectory = subdirectory\n\n def _remove_auth_from_netloc(self, netloc: str) -> str:\n if \"@\" not in netloc:\n return netloc\n user_pass, netloc_no_user_pass = netloc.split(\"@\", 1)\n if (\n isinstance(self.info, VcsInfo)\n and self.info.vcs == \"git\"\n and user_pass == \"git\"\n ):\n return netloc\n if ENV_VAR_RE.match(user_pass):\n return netloc\n return netloc_no_user_pass\n\n @property\n def redacted_url(self) -> str:\n \"\"\"url with user:password part removed unless it is formed with\n environment variables as specified in PEP 610, or it is ``git``\n in the case of a git URL.\n \"\"\"\n purl = urllib.parse.urlsplit(self.url)\n netloc = self._remove_auth_from_netloc(purl.netloc)\n surl = urllib.parse.urlunsplit(\n (purl.scheme, netloc, purl.path, purl.query, purl.fragment)\n )\n return surl\n\n def validate(self) -> None:\n self.from_dict(self.to_dict())\n\n @classmethod\n def from_dict(cls, d: Dict[str, Any]) -> \"DirectUrl\":\n return DirectUrl(\n url=_get_required(d, str, \"url\"),\n subdirectory=_get(d, str, \"subdirectory\"),\n info=_exactly_one_of(\n [\n ArchiveInfo._from_dict(_get(d, dict, \"archive_info\")),\n DirInfo._from_dict(_get(d, dict, \"dir_info\")),\n VcsInfo._from_dict(_get(d, dict, \"vcs_info\")),\n ]\n ),\n )\n\n def to_dict(self) -> Dict[str, Any]:\n res = _filter_none(\n url=self.redacted_url,\n subdirectory=self.subdirectory,\n )\n res[self.info.name] = self.info._to_dict()\n return res\n\n @classmethod\n def from_json(cls, s: str) -> \"DirectUrl\":\n return cls.from_dict(json.loads(s))\n\n def to_json(self) -> str:\n return json.dumps(self.to_dict(), sort_keys=True)\n\n def is_local_editable(self) -> bool:\n return isinstance(self.info, DirInfo) and self.info.editable\n", "path": "src/pip/_internal/models/direct_url.py"}], "after_files": [{"content": "\"\"\" PEP 610 \"\"\"\nimport json\nimport re\nimport urllib.parse\nfrom typing import Any, Dict, Iterable, Optional, Type, TypeVar, Union\n\n__all__ = [\n \"DirectUrl\",\n \"DirectUrlValidationError\",\n \"DirInfo\",\n \"ArchiveInfo\",\n \"VcsInfo\",\n]\n\nT = TypeVar(\"T\")\n\nDIRECT_URL_METADATA_NAME = \"direct_url.json\"\nENV_VAR_RE = re.compile(r\"^\\$\\{[A-Za-z0-9-_]+\\}(:\\$\\{[A-Za-z0-9-_]+\\})?$\")\n\n\nclass DirectUrlValidationError(Exception):\n pass\n\n\ndef _get(\n d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None\n) -> Optional[T]:\n \"\"\"Get value from dictionary and verify expected type.\"\"\"\n if key not in d:\n return default\n value = d[key]\n if not isinstance(value, expected_type):\n raise DirectUrlValidationError(\n \"{!r} has unexpected type for {} (expected {})\".format(\n value, key, expected_type\n )\n )\n return value\n\n\ndef _get_required(\n d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None\n) -> T:\n value = _get(d, expected_type, key, default)\n if value is None:\n raise DirectUrlValidationError(f\"{key} must have a value\")\n return value\n\n\ndef _exactly_one_of(infos: Iterable[Optional[\"InfoType\"]]) -> \"InfoType\":\n infos = [info for info in infos if info is not None]\n if not infos:\n raise DirectUrlValidationError(\n \"missing one of archive_info, dir_info, vcs_info\"\n )\n if len(infos) > 1:\n raise DirectUrlValidationError(\n \"more than one of archive_info, dir_info, vcs_info\"\n )\n assert infos[0] is not None\n return infos[0]\n\n\ndef _filter_none(**kwargs: Any) -> Dict[str, Any]:\n \"\"\"Make dict excluding None values.\"\"\"\n return {k: v for k, v in kwargs.items() if v is not None}\n\n\nclass VcsInfo:\n name = \"vcs_info\"\n\n def __init__(\n self,\n vcs: str,\n commit_id: str,\n requested_revision: Optional[str] = None,\n ) -> None:\n self.vcs = vcs\n self.requested_revision = requested_revision\n self.commit_id = commit_id\n\n @classmethod\n def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional[\"VcsInfo\"]:\n if d is None:\n return None\n return cls(\n vcs=_get_required(d, str, \"vcs\"),\n commit_id=_get_required(d, str, \"commit_id\"),\n requested_revision=_get(d, str, \"requested_revision\"),\n )\n\n def _to_dict(self) -> Dict[str, Any]:\n return _filter_none(\n vcs=self.vcs,\n requested_revision=self.requested_revision,\n commit_id=self.commit_id,\n )\n\n\nclass ArchiveInfo:\n name = \"archive_info\"\n\n def __init__(\n self,\n hash: Optional[str] = None,\n hashes: Optional[Dict[str, str]] = None,\n ) -> None:\n if hash is not None:\n # Auto-populate the hashes key to upgrade to the new format automatically.\n # We don't back-populate the legacy hash key.\n try:\n hash_name, hash_value = hash.split(\"=\", 1)\n except ValueError:\n raise DirectUrlValidationError(\n f\"invalid archive_info.hash format: {hash!r}\"\n )\n if hashes is None:\n hashes = {hash_name: hash_value}\n elif hash_name not in hash:\n hashes = hashes.copy()\n hashes[hash_name] = hash_value\n self.hash = hash\n self.hashes = hashes\n\n @classmethod\n def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional[\"ArchiveInfo\"]:\n if d is None:\n return None\n return cls(hash=_get(d, str, \"hash\"), hashes=_get(d, dict, \"hashes\"))\n\n def _to_dict(self) -> Dict[str, Any]:\n return _filter_none(hash=self.hash, hashes=self.hashes)\n\n\nclass DirInfo:\n name = \"dir_info\"\n\n def __init__(\n self,\n editable: bool = False,\n ) -> None:\n self.editable = editable\n\n @classmethod\n def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional[\"DirInfo\"]:\n if d is None:\n return None\n return cls(editable=_get_required(d, bool, \"editable\", default=False))\n\n def _to_dict(self) -> Dict[str, Any]:\n return _filter_none(editable=self.editable or None)\n\n\nInfoType = Union[ArchiveInfo, DirInfo, VcsInfo]\n\n\nclass DirectUrl:\n def __init__(\n self,\n url: str,\n info: InfoType,\n subdirectory: Optional[str] = None,\n ) -> None:\n self.url = url\n self.info = info\n self.subdirectory = subdirectory\n\n def _remove_auth_from_netloc(self, netloc: str) -> str:\n if \"@\" not in netloc:\n return netloc\n user_pass, netloc_no_user_pass = netloc.split(\"@\", 1)\n if (\n isinstance(self.info, VcsInfo)\n and self.info.vcs == \"git\"\n and user_pass == \"git\"\n ):\n return netloc\n if ENV_VAR_RE.match(user_pass):\n return netloc\n return netloc_no_user_pass\n\n @property\n def redacted_url(self) -> str:\n \"\"\"url with user:password part removed unless it is formed with\n environment variables as specified in PEP 610, or it is ``git``\n in the case of a git URL.\n \"\"\"\n purl = urllib.parse.urlsplit(self.url)\n netloc = self._remove_auth_from_netloc(purl.netloc)\n surl = urllib.parse.urlunsplit(\n (purl.scheme, netloc, purl.path, purl.query, purl.fragment)\n )\n return surl\n\n def validate(self) -> None:\n self.from_dict(self.to_dict())\n\n @classmethod\n def from_dict(cls, d: Dict[str, Any]) -> \"DirectUrl\":\n return DirectUrl(\n url=_get_required(d, str, \"url\"),\n subdirectory=_get(d, str, \"subdirectory\"),\n info=_exactly_one_of(\n [\n ArchiveInfo._from_dict(_get(d, dict, \"archive_info\")),\n DirInfo._from_dict(_get(d, dict, \"dir_info\")),\n VcsInfo._from_dict(_get(d, dict, \"vcs_info\")),\n ]\n ),\n )\n\n def to_dict(self) -> Dict[str, Any]:\n res = _filter_none(\n url=self.redacted_url,\n subdirectory=self.subdirectory,\n )\n res[self.info.name] = self.info._to_dict()\n return res\n\n @classmethod\n def from_json(cls, s: str) -> \"DirectUrl\":\n return cls.from_dict(json.loads(s))\n\n def to_json(self) -> str:\n return json.dumps(self.to_dict(), sort_keys=True)\n\n def is_local_editable(self) -> bool:\n return isinstance(self.info, DirInfo) and self.info.editable\n", "path": "src/pip/_internal/models/direct_url.py"}]} | 3,577 | 191 |
gh_patches_debug_12717 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2764 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Schema URL is null for some messages and "" for others
While working on the getting started guide, I notice the console exporter will sometimes output empty string and sometimes `null` for schema URL:
```json
{
"resource_metrics": [
{
"resource": {
"attributes": {
"telemetry.sdk.language": "python",
"telemetry.sdk.name": "opentelemetry",
"telemetry.sdk.version": "1.12.0rc1",
"telemetry.auto.version": "0.31b0",
"service.name": "unknown_service"
},
"schema_url": ""
},
"scope_metrics": [
{
"scope": {
"name": "app",
"version": "",
"schema_url": null
},
"metrics": [
{
"name": "roll_counter",
"description": "The number of rolls by roll value",
"unit": "",
"data": {
"data_points": [
{
"attributes": {
"roll.value": 4
},
"start_time_unix_nano": 1654790325350232600,
"time_unix_nano": 1654790332211598800,
"value": 3
},
{
"attributes": {
"roll.value": 6
},
"start_time_unix_nano": 1654790325350232600,
"time_unix_nano": 1654790332211598800,
"value": 4
},
{
"attributes": {
"roll.value": 5
},
"start_time_unix_nano": 1654790325350232600,
"time_unix_nano": 1654790332211598800,
"value": 1
},
{
"attributes": {
"roll.value": 1
},
"start_time_unix_nano": 1654790325350232600,
"time_unix_nano": 1654790332211598800,
"value": 2
},
{
"attributes": {
"roll.value": 3
},
"start_time_unix_nano": 1654790325350232600,
"time_unix_nano": 1654790332211598800,
"value": 1
}
],
"aggregation_temporality": 2,
"is_monotonic": true
}
}
],
"schema_url": null
}
],
"schema_url": ""
}
]
}
```
Note, in OTLP protobuf these two are equivalent. I think we should have consistent treatment of the empty value for these in the SDK though.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from json import dumps
15 from typing import Optional
16
17 from deprecated import deprecated
18
19
20 class InstrumentationInfo:
21 """Immutable information about an instrumentation library module.
22
23 See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these
24 properties.
25 """
26
27 __slots__ = ("_name", "_version", "_schema_url")
28
29 @deprecated(version="1.11.1", reason="You should use InstrumentationScope")
30 def __init__(
31 self,
32 name: str,
33 version: Optional[str] = None,
34 schema_url: Optional[str] = None,
35 ):
36 self._name = name
37 self._version = version
38 self._schema_url = schema_url
39
40 def __repr__(self):
41 return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})"
42
43 def __hash__(self):
44 return hash((self._name, self._version, self._schema_url))
45
46 def __eq__(self, value):
47 return type(value) is type(self) and (
48 self._name,
49 self._version,
50 self._schema_url,
51 ) == (value._name, value._version, value._schema_url)
52
53 def __lt__(self, value):
54 if type(value) is not type(self):
55 return NotImplemented
56 return (self._name, self._version, self._schema_url) < (
57 value._name,
58 value._version,
59 value._schema_url,
60 )
61
62 @property
63 def schema_url(self) -> Optional[str]:
64 return self._schema_url
65
66 @property
67 def version(self) -> Optional[str]:
68 return self._version
69
70 @property
71 def name(self) -> str:
72 return self._name
73
74
75 class InstrumentationScope:
76 """A logical unit of the application code with which the emitted telemetry can be
77 associated.
78
79 See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these
80 properties.
81 """
82
83 __slots__ = ("_name", "_version", "_schema_url")
84
85 def __init__(
86 self,
87 name: str,
88 version: Optional[str] = None,
89 schema_url: Optional[str] = None,
90 ) -> None:
91 self._name = name
92 self._version = version
93 self._schema_url = schema_url
94
95 def __repr__(self) -> str:
96 return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})"
97
98 def __hash__(self) -> int:
99 return hash((self._name, self._version, self._schema_url))
100
101 def __eq__(self, value: object) -> bool:
102 if not isinstance(value, InstrumentationScope):
103 return NotImplemented
104 return (self._name, self._version, self._schema_url) == (
105 value._name,
106 value._version,
107 value._schema_url,
108 )
109
110 def __lt__(self, value: object) -> bool:
111 if not isinstance(value, InstrumentationScope):
112 return NotImplemented
113 return (self._name, self._version, self._schema_url) < (
114 value._name,
115 value._version,
116 value._schema_url,
117 )
118
119 @property
120 def schema_url(self) -> Optional[str]:
121 return self._schema_url
122
123 @property
124 def version(self) -> Optional[str]:
125 return self._version
126
127 @property
128 def name(self) -> str:
129 return self._name
130
131 def to_json(self, indent=4) -> str:
132 return dumps(
133 {
134 "name": self._name,
135 "version": self._version,
136 "schema_url": self._schema_url,
137 },
138 indent=indent,
139 )
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py b/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py
@@ -35,6 +35,8 @@
):
self._name = name
self._version = version
+ if schema_url is None:
+ schema_url = ""
self._schema_url = schema_url
def __repr__(self):
@@ -90,6 +92,8 @@
) -> None:
self._name = name
self._version = version
+ if schema_url is None:
+ schema_url = ""
self._schema_url = schema_url
def __repr__(self) -> str:
| {"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py b/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py\n@@ -35,6 +35,8 @@\n ):\n self._name = name\n self._version = version\n+ if schema_url is None:\n+ schema_url = \"\"\n self._schema_url = schema_url\n \n def __repr__(self):\n@@ -90,6 +92,8 @@\n ) -> None:\n self._name = name\n self._version = version\n+ if schema_url is None:\n+ schema_url = \"\"\n self._schema_url = schema_url\n \n def __repr__(self) -> str:\n", "issue": "Schema URL is null for some messages and \"\" for others\nWhile working on the getting started guide, I notice the console exporter will sometimes output empty string and sometimes `null` for schema URL:\r\n\r\n```json\r\n{\r\n \"resource_metrics\": [\r\n {\r\n \"resource\": {\r\n \"attributes\": {\r\n \"telemetry.sdk.language\": \"python\",\r\n \"telemetry.sdk.name\": \"opentelemetry\",\r\n \"telemetry.sdk.version\": \"1.12.0rc1\",\r\n \"telemetry.auto.version\": \"0.31b0\",\r\n \"service.name\": \"unknown_service\"\r\n },\r\n \"schema_url\": \"\"\r\n },\r\n \"scope_metrics\": [\r\n {\r\n \"scope\": {\r\n \"name\": \"app\",\r\n \"version\": \"\",\r\n \"schema_url\": null\r\n },\r\n \"metrics\": [\r\n {\r\n \"name\": \"roll_counter\",\r\n \"description\": \"The number of rolls by roll value\",\r\n \"unit\": \"\",\r\n \"data\": {\r\n \"data_points\": [\r\n {\r\n \"attributes\": {\r\n \"roll.value\": 4\r\n },\r\n \"start_time_unix_nano\": 1654790325350232600,\r\n \"time_unix_nano\": 1654790332211598800,\r\n \"value\": 3\r\n },\r\n {\r\n \"attributes\": {\r\n \"roll.value\": 6\r\n },\r\n \"start_time_unix_nano\": 1654790325350232600,\r\n \"time_unix_nano\": 1654790332211598800,\r\n \"value\": 4\r\n },\r\n {\r\n \"attributes\": {\r\n \"roll.value\": 5\r\n },\r\n \"start_time_unix_nano\": 1654790325350232600,\r\n \"time_unix_nano\": 1654790332211598800,\r\n \"value\": 1\r\n },\r\n {\r\n \"attributes\": {\r\n \"roll.value\": 1\r\n },\r\n \"start_time_unix_nano\": 1654790325350232600,\r\n \"time_unix_nano\": 1654790332211598800,\r\n \"value\": 2\r\n },\r\n {\r\n \"attributes\": {\r\n \"roll.value\": 3\r\n },\r\n \"start_time_unix_nano\": 1654790325350232600,\r\n \"time_unix_nano\": 1654790332211598800,\r\n \"value\": 1\r\n }\r\n ],\r\n \"aggregation_temporality\": 2,\r\n \"is_monotonic\": true\r\n }\r\n }\r\n ],\r\n \"schema_url\": null\r\n }\r\n ],\r\n \"schema_url\": \"\"\r\n }\r\n ]\r\n}\r\n```\r\n\r\nNote, in OTLP protobuf these two are equivalent. I think we should have consistent treatment of the empty value for these in the SDK though.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom json import dumps\nfrom typing import Optional\n\nfrom deprecated import deprecated\n\n\nclass InstrumentationInfo:\n \"\"\"Immutable information about an instrumentation library module.\n\n See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these\n properties.\n \"\"\"\n\n __slots__ = (\"_name\", \"_version\", \"_schema_url\")\n\n @deprecated(version=\"1.11.1\", reason=\"You should use InstrumentationScope\")\n def __init__(\n self,\n name: str,\n version: Optional[str] = None,\n schema_url: Optional[str] = None,\n ):\n self._name = name\n self._version = version\n self._schema_url = schema_url\n\n def __repr__(self):\n return f\"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})\"\n\n def __hash__(self):\n return hash((self._name, self._version, self._schema_url))\n\n def __eq__(self, value):\n return type(value) is type(self) and (\n self._name,\n self._version,\n self._schema_url,\n ) == (value._name, value._version, value._schema_url)\n\n def __lt__(self, value):\n if type(value) is not type(self):\n return NotImplemented\n return (self._name, self._version, self._schema_url) < (\n value._name,\n value._version,\n value._schema_url,\n )\n\n @property\n def schema_url(self) -> Optional[str]:\n return self._schema_url\n\n @property\n def version(self) -> Optional[str]:\n return self._version\n\n @property\n def name(self) -> str:\n return self._name\n\n\nclass InstrumentationScope:\n \"\"\"A logical unit of the application code with which the emitted telemetry can be\n associated.\n\n See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these\n properties.\n \"\"\"\n\n __slots__ = (\"_name\", \"_version\", \"_schema_url\")\n\n def __init__(\n self,\n name: str,\n version: Optional[str] = None,\n schema_url: Optional[str] = None,\n ) -> None:\n self._name = name\n self._version = version\n self._schema_url = schema_url\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})\"\n\n def __hash__(self) -> int:\n return hash((self._name, self._version, self._schema_url))\n\n def __eq__(self, value: object) -> bool:\n if not isinstance(value, InstrumentationScope):\n return NotImplemented\n return (self._name, self._version, self._schema_url) == (\n value._name,\n value._version,\n value._schema_url,\n )\n\n def __lt__(self, value: object) -> bool:\n if not isinstance(value, InstrumentationScope):\n return NotImplemented\n return (self._name, self._version, self._schema_url) < (\n value._name,\n value._version,\n value._schema_url,\n )\n\n @property\n def schema_url(self) -> Optional[str]:\n return self._schema_url\n\n @property\n def version(self) -> Optional[str]:\n return self._version\n\n @property\n def name(self) -> str:\n return self._name\n\n def to_json(self, indent=4) -> str:\n return dumps(\n {\n \"name\": self._name,\n \"version\": self._version,\n \"schema_url\": self._schema_url,\n },\n indent=indent,\n )\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom json import dumps\nfrom typing import Optional\n\nfrom deprecated import deprecated\n\n\nclass InstrumentationInfo:\n \"\"\"Immutable information about an instrumentation library module.\n\n See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these\n properties.\n \"\"\"\n\n __slots__ = (\"_name\", \"_version\", \"_schema_url\")\n\n @deprecated(version=\"1.11.1\", reason=\"You should use InstrumentationScope\")\n def __init__(\n self,\n name: str,\n version: Optional[str] = None,\n schema_url: Optional[str] = None,\n ):\n self._name = name\n self._version = version\n if schema_url is None:\n schema_url = \"\"\n self._schema_url = schema_url\n\n def __repr__(self):\n return f\"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})\"\n\n def __hash__(self):\n return hash((self._name, self._version, self._schema_url))\n\n def __eq__(self, value):\n return type(value) is type(self) and (\n self._name,\n self._version,\n self._schema_url,\n ) == (value._name, value._version, value._schema_url)\n\n def __lt__(self, value):\n if type(value) is not type(self):\n return NotImplemented\n return (self._name, self._version, self._schema_url) < (\n value._name,\n value._version,\n value._schema_url,\n )\n\n @property\n def schema_url(self) -> Optional[str]:\n return self._schema_url\n\n @property\n def version(self) -> Optional[str]:\n return self._version\n\n @property\n def name(self) -> str:\n return self._name\n\n\nclass InstrumentationScope:\n \"\"\"A logical unit of the application code with which the emitted telemetry can be\n associated.\n\n See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these\n properties.\n \"\"\"\n\n __slots__ = (\"_name\", \"_version\", \"_schema_url\")\n\n def __init__(\n self,\n name: str,\n version: Optional[str] = None,\n schema_url: Optional[str] = None,\n ) -> None:\n self._name = name\n self._version = version\n if schema_url is None:\n schema_url = \"\"\n self._schema_url = schema_url\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})\"\n\n def __hash__(self) -> int:\n return hash((self._name, self._version, self._schema_url))\n\n def __eq__(self, value: object) -> bool:\n if not isinstance(value, InstrumentationScope):\n return NotImplemented\n return (self._name, self._version, self._schema_url) == (\n value._name,\n value._version,\n value._schema_url,\n )\n\n def __lt__(self, value: object) -> bool:\n if not isinstance(value, InstrumentationScope):\n return NotImplemented\n return (self._name, self._version, self._schema_url) < (\n value._name,\n value._version,\n value._schema_url,\n )\n\n @property\n def schema_url(self) -> Optional[str]:\n return self._schema_url\n\n @property\n def version(self) -> Optional[str]:\n return self._version\n\n @property\n def name(self) -> str:\n return self._name\n\n def to_json(self, indent=4) -> str:\n return dumps(\n {\n \"name\": self._name,\n \"version\": self._version,\n \"schema_url\": self._schema_url,\n },\n indent=indent,\n )\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/util/instrumentation.py"}]} | 2,247 | 190 |
gh_patches_debug_231 | rasdani/github-patches | git_diff | nipy__nipype-3199 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
REL: 1.5.0
## Summary
Prep for new feature release 1.5.0, targeting release on Monday, February 24.
Given that we just released 1.4.2 about a week ago, I'm inclined to hold this one off for any feature PRs that would like to shoot for inclusion.
Require merge or postponement decision on all issues/PRs in https://github.com/nipy/nipype/milestone/38:
* [x] ENH: Add a ``Bandpass`` filter interface under ``algorithms.filters`` #2915 (@oesteban)
* [x] [WIP/ENH] Adds a new interface for AFNI's ``3dMEMA`` command #2953 (@JesseyWright / @oesteban)
* [x] ENH: Add interface for fslorient #2955 (@felixsc1)
* [x] [FIX] Mrtrix3 usedefault issue (#3004) (@matteomancini)
* [x] [DOC] SelectFiles docstring corrected #3041 (@AKSoo)
* [ ] FIX Ants N4BiasFieldCorrection rescale_intensities bug #3139 (@salma1601)
* [x] CI: Test Python 3.8 #3154 (@effigies)
* [x] ENH: Detect values for EulerNumber interface #3173 (@mgxd)
Will try review the open PRs and see if anything is close enough to push on, tomorrow.
## Release checklist
* [ ] Merge pending PRs
* [x] Update changelog
* [x] Update .mailmap
* [x] Update .zenodo.json
* [x] Set release number in `nipype/info.py`
* [x] Update `doc/interfaces.rst` with previous releases
* [x] Check conda-forge feedstock build (conda-forge/nipype-feedstock#67)
* [ ] Tutorial tests (https://circleci.com/workflow-run/be312bea-8273-47cf-9e52-54257d969422)
## Uncredited authors
The following authors have contributed, but not added themselves to the [`.zenodo.json`](https://github.com/nipy/nipype/blob/master/.zenodo.json) file. If you would like to be an author on Zenodo releases, please add yourself or comment with your preferred publication name, affiliation and [ORCID](https://orcid.org/). If you would like to stop being spammed whenever I'm the one doing releases, let me know, and I'll add you to a blacklist.
No entry to sort: cdla (@cdla)
No entry to sort: Gio Piantoni (@gpiantoni)
No entry to sort: Victor Férat (@vferat)
No entry to sort: Niklas Förster (@niklasfoe)
~~No entry to sort: Adam Kimbler (@adamkimbler)~~
No entry to sort: Kirstie Whitaker (@KirstieJane)
No entry to sort: Pablo Polosecki (@polosecki)
No entry to sort: Ami Tsuchida
No entry to sort: Daniel Brenner (@brennerd11)
No entry to sort: Isaiah Norton (@ihnorton)
No entry to sort: Kevin Sitek (@sitek)
No entry to sort: Luke Bloy (@bloyl)
No entry to sort: Martin Luessi (@mluessi)
No entry to sort: steve (@steve19922)
No entry to sort: Charl Linssen (@turingbirds)
No entry to sort: Félix C. Morency (@fmorency)
~~No entry to sort: Jonathan R. Williford (@williford)~~
No entry to sort: Michiel Cottaar (@MichielCottaar)
No entry to sort: Regina Kim (@reginakim)
No entry to sort: Valentin Haenel (@esc)
No entry to sort: Xu Wang
No entry to sort: maedoc (@maedoc)
I am unable to find GitHub handles for Ami Tsuchida or Xu Wang.
Apologies also to anybody who may have
## Acknowledgment
- [x] \(Mandatory\) I acknowledge that this contribution will be available under the Apache 2 license.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nipype/info.py`
Content:
```
1 """ This file contains defines parameters for nipy that we use to fill
2 settings in setup.py, the nipy top-level docstring, and for building the
3 docs. In setup.py in particular, we exec this file, so it cannot import nipy
4 """
5
6 # nipype version information
7 # Remove -dev for release
8 __version__ = "1.5.0-rc1.post-dev"
9
10
11 def get_nipype_gitversion():
12 """Nipype version as reported by the last commit in git
13
14 Returns
15 -------
16 None or str
17 Version of Nipype according to git.
18 """
19 import os
20 import subprocess
21
22 try:
23 import nipype
24
25 gitpath = os.path.realpath(
26 os.path.join(os.path.dirname(nipype.__file__), os.path.pardir)
27 )
28 except:
29 gitpath = os.getcwd()
30 gitpathgit = os.path.join(gitpath, ".git")
31 if not os.path.exists(gitpathgit):
32 return None
33 ver = None
34 try:
35 o, _ = subprocess.Popen(
36 "git describe", shell=True, cwd=gitpath, stdout=subprocess.PIPE
37 ).communicate()
38 except Exception:
39 pass
40 else:
41 ver = o.decode().strip().split("-")[-1]
42 return ver
43
44
45 if __version__.endswith("-dev"):
46 gitversion = get_nipype_gitversion()
47 if gitversion:
48 __version__ = "{}+{}".format(__version__, gitversion)
49
50 CLASSIFIERS = [
51 "Development Status :: 5 - Production/Stable",
52 "Environment :: Console",
53 "Intended Audience :: Science/Research",
54 "License :: OSI Approved :: Apache Software License",
55 "Operating System :: MacOS :: MacOS X",
56 "Operating System :: POSIX :: Linux",
57 "Programming Language :: Python :: 3.6",
58 "Programming Language :: Python :: 3.7",
59 "Programming Language :: Python :: 3.8",
60 "Topic :: Scientific/Engineering",
61 ]
62 PYTHON_REQUIRES = ">= 3.6"
63
64 description = "Neuroimaging in Python: Pipelines and Interfaces"
65
66 # Note: this long_description is actually a copy/paste from the top-level
67 # README.txt, so that it shows up nicely on PyPI. So please remember to edit
68 # it only in one place and sync it correctly.
69 long_description = """========================================================
70 NIPYPE: Neuroimaging in Python: Pipelines and Interfaces
71 ========================================================
72
73 Current neuroimaging software offer users an incredible opportunity to
74 analyze data using a variety of different algorithms. However, this has
75 resulted in a heterogeneous collection of specialized applications
76 without transparent interoperability or a uniform operating interface.
77
78 *Nipype*, an open-source, community-developed initiative under the
79 umbrella of `NiPy <http://nipy.org>`_, is a Python project that provides a
80 uniform interface to existing neuroimaging software and facilitates interaction
81 between these packages within a single workflow. Nipype provides an environment
82 that encourages interactive exploration of algorithms from different
83 packages (e.g., AFNI, ANTS, BRAINS, BrainSuite, Camino, FreeSurfer, FSL, MNE,
84 MRtrix, MNE, Nipy, Slicer, SPM), eases the design of workflows within and
85 between packages, and reduces the learning curve necessary to use different \
86 packages. Nipype is creating a collaborative platform for neuroimaging \
87 software development in a high-level language and addressing limitations of \
88 existing pipeline systems.
89
90 *Nipype* allows you to:
91
92 * easily interact with tools from different software packages
93 * combine processing steps from different software packages
94 * develop new workflows faster by reusing common steps from old ones
95 * process data faster by running it in parallel on many cores/machines
96 * make your research easily reproducible
97 * share your processing workflows with the community
98 """
99
100 # versions
101 NIBABEL_MIN_VERSION = "2.1.0"
102 NETWORKX_MIN_VERSION = "1.9"
103 NUMPY_MIN_VERSION = "1.13"
104 # Numpy bug in python 3.7:
105 # https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html
106 NUMPY_MIN_VERSION_37 = "1.15.3"
107 SCIPY_MIN_VERSION = "0.14"
108 TRAITS_MIN_VERSION = "4.6"
109 DATEUTIL_MIN_VERSION = "2.2"
110 FUTURE_MIN_VERSION = "0.16.0"
111 SIMPLEJSON_MIN_VERSION = "3.8.0"
112 PROV_VERSION = "1.5.2"
113 RDFLIB_MIN_VERSION = "5.0.0"
114 CLICK_MIN_VERSION = "6.6.0"
115 PYDOT_MIN_VERSION = "1.2.3"
116
117 NAME = "nipype"
118 MAINTAINER = "nipype developers"
119 MAINTAINER_EMAIL = "[email protected]"
120 DESCRIPTION = description
121 LONG_DESCRIPTION = long_description
122 URL = "http://nipy.org/nipype"
123 DOWNLOAD_URL = "http://github.com/nipy/nipype/archives/master"
124 LICENSE = "Apache License, 2.0"
125 AUTHOR = "nipype developers"
126 AUTHOR_EMAIL = "[email protected]"
127 PLATFORMS = "OS Independent"
128 MAJOR = __version__.split(".")[0]
129 MINOR = __version__.split(".")[1]
130 MICRO = __version__.replace("-", ".").split(".")[2]
131 ISRELEASE = (
132 len(__version__.replace("-", ".").split(".")) == 3
133 or "post" in __version__.replace("-", ".").split(".")[-1]
134 )
135 VERSION = __version__
136 PROVIDES = ["nipype"]
137 REQUIRES = [
138 "click>=%s" % CLICK_MIN_VERSION,
139 "networkx>=%s" % NETWORKX_MIN_VERSION,
140 "nibabel>=%s" % NIBABEL_MIN_VERSION,
141 'numpy>=%s ; python_version < "3.7"' % NUMPY_MIN_VERSION,
142 'numpy>=%s ; python_version >= "3.7"' % NUMPY_MIN_VERSION_37,
143 "packaging",
144 "prov>=%s" % PROV_VERSION,
145 "pydot>=%s" % PYDOT_MIN_VERSION,
146 "pydotplus",
147 "python-dateutil>=%s" % DATEUTIL_MIN_VERSION,
148 "rdflib>=%s" % RDFLIB_MIN_VERSION,
149 "scipy>=%s" % SCIPY_MIN_VERSION,
150 "simplejson>=%s" % SIMPLEJSON_MIN_VERSION,
151 "traits>=%s,!=5.0" % TRAITS_MIN_VERSION,
152 "filelock>=3.0.0",
153 "etelemetry>=0.2.0",
154 ]
155
156 TESTS_REQUIRES = [
157 "codecov",
158 "coverage<5",
159 "pytest",
160 "pytest-cov",
161 "pytest-env",
162 "pytest-timeout",
163 ]
164
165 EXTRA_REQUIRES = {
166 "data": ["datalad"],
167 "doc": [
168 "dipy",
169 "ipython",
170 "matplotlib",
171 "nbsphinx",
172 "sphinx-argparse",
173 "sphinx>=2.1.2",
174 "sphinxcontrib-apidoc",
175 "sphinxcontrib-napoleon",
176 ],
177 "duecredit": ["duecredit"],
178 "nipy": ["nitime", "nilearn", "dipy", "nipy", "matplotlib"],
179 "profiler": ["psutil>=5.0"],
180 "pybids": ["pybids>=0.7.0"],
181 "specs": ["black"],
182 "ssh": ["paramiko"],
183 "tests": TESTS_REQUIRES,
184 "xvfbwrapper": ["xvfbwrapper"],
185 # 'mesh': ['mayavi'] # Enable when it works
186 }
187
188
189 def _list_union(iterable):
190 return list(set(sum(iterable, [])))
191
192
193 # Enable a handle to install all extra dependencies at once
194 EXTRA_REQUIRES["all"] = _list_union(EXTRA_REQUIRES.values())
195 # dev = doc + tests + specs
196 EXTRA_REQUIRES["dev"] = _list_union(
197 val for key, val in EXTRA_REQUIRES.items() if key in ("doc", "tests", "specs")
198 )
199
200 STATUS = "stable"
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nipype/info.py b/nipype/info.py
--- a/nipype/info.py
+++ b/nipype/info.py
@@ -5,7 +5,7 @@
# nipype version information
# Remove -dev for release
-__version__ = "1.5.0-rc1.post-dev"
+__version__ = "1.5.0"
def get_nipype_gitversion():
| {"golden_diff": "diff --git a/nipype/info.py b/nipype/info.py\n--- a/nipype/info.py\n+++ b/nipype/info.py\n@@ -5,7 +5,7 @@\n \n # nipype version information\n # Remove -dev for release\n-__version__ = \"1.5.0-rc1.post-dev\"\n+__version__ = \"1.5.0\"\n \n \n def get_nipype_gitversion():\n", "issue": "REL: 1.5.0\n## Summary\r\n\r\nPrep for new feature release 1.5.0, targeting release on Monday, February 24.\r\n\r\nGiven that we just released 1.4.2 about a week ago, I'm inclined to hold this one off for any feature PRs that would like to shoot for inclusion.\r\n\r\nRequire merge or postponement decision on all issues/PRs in https://github.com/nipy/nipype/milestone/38:\r\n\r\n* [x] ENH: Add a ``Bandpass`` filter interface under ``algorithms.filters`` #2915 (@oesteban)\r\n* [x] [WIP/ENH] Adds a new interface for AFNI's ``3dMEMA`` command #2953 (@JesseyWright / @oesteban)\r\n* [x] ENH: Add interface for fslorient #2955 (@felixsc1)\r\n* [x] [FIX] Mrtrix3 usedefault issue (#3004) (@matteomancini)\r\n* [x] [DOC] SelectFiles docstring corrected #3041 (@AKSoo)\r\n* [ ] FIX Ants N4BiasFieldCorrection rescale_intensities bug #3139 (@salma1601)\r\n* [x] CI: Test Python 3.8 #3154 (@effigies)\r\n* [x] ENH: Detect values for EulerNumber interface #3173 (@mgxd)\r\n\r\nWill try review the open PRs and see if anything is close enough to push on, tomorrow.\r\n\r\n## Release checklist\r\n\r\n* [ ] Merge pending PRs\r\n* [x] Update changelog\r\n* [x] Update .mailmap\r\n* [x] Update .zenodo.json\r\n* [x] Set release number in `nipype/info.py`\r\n* [x] Update `doc/interfaces.rst` with previous releases\r\n* [x] Check conda-forge feedstock build (conda-forge/nipype-feedstock#67)\r\n* [ ] Tutorial tests (https://circleci.com/workflow-run/be312bea-8273-47cf-9e52-54257d969422)\r\n\r\n## Uncredited authors\r\n\r\nThe following authors have contributed, but not added themselves to the [`.zenodo.json`](https://github.com/nipy/nipype/blob/master/.zenodo.json) file. If you would like to be an author on Zenodo releases, please add yourself or comment with your preferred publication name, affiliation and [ORCID](https://orcid.org/). If you would like to stop being spammed whenever I'm the one doing releases, let me know, and I'll add you to a blacklist.\r\n\r\nNo entry to sort: cdla (@cdla)\r\nNo entry to sort: Gio Piantoni (@gpiantoni)\r\nNo entry to sort: Victor F\u00e9rat (@vferat)\r\nNo entry to sort: Niklas F\u00f6rster (@niklasfoe)\r\n~~No entry to sort: Adam Kimbler (@adamkimbler)~~\r\nNo entry to sort: Kirstie Whitaker (@KirstieJane)\r\nNo entry to sort: Pablo Polosecki (@polosecki)\r\nNo entry to sort: Ami Tsuchida\r\nNo entry to sort: Daniel Brenner (@brennerd11)\r\nNo entry to sort: Isaiah Norton (@ihnorton)\r\nNo entry to sort: Kevin Sitek (@sitek)\r\nNo entry to sort: Luke Bloy (@bloyl)\r\nNo entry to sort: Martin Luessi (@mluessi)\r\nNo entry to sort: steve (@steve19922)\r\nNo entry to sort: Charl Linssen (@turingbirds)\r\nNo entry to sort: F\u00e9lix C. Morency (@fmorency)\r\n~~No entry to sort: Jonathan R. Williford (@williford)~~\r\nNo entry to sort: Michiel Cottaar (@MichielCottaar)\r\nNo entry to sort: Regina Kim (@reginakim)\r\nNo entry to sort: Valentin Haenel (@esc)\r\nNo entry to sort: Xu Wang\r\nNo entry to sort: maedoc (@maedoc)\r\n\r\nI am unable to find GitHub handles for Ami Tsuchida or Xu Wang.\r\n\r\nApologies also to anybody who may have \r\n\r\n## Acknowledgment\r\n\r\n- [x] \\(Mandatory\\) I acknowledge that this contribution will be available under the Apache 2 license.\n", "before_files": [{"content": "\"\"\" This file contains defines parameters for nipy that we use to fill\nsettings in setup.py, the nipy top-level docstring, and for building the\ndocs. In setup.py in particular, we exec this file, so it cannot import nipy\n\"\"\"\n\n# nipype version information\n# Remove -dev for release\n__version__ = \"1.5.0-rc1.post-dev\"\n\n\ndef get_nipype_gitversion():\n \"\"\"Nipype version as reported by the last commit in git\n\n Returns\n -------\n None or str\n Version of Nipype according to git.\n \"\"\"\n import os\n import subprocess\n\n try:\n import nipype\n\n gitpath = os.path.realpath(\n os.path.join(os.path.dirname(nipype.__file__), os.path.pardir)\n )\n except:\n gitpath = os.getcwd()\n gitpathgit = os.path.join(gitpath, \".git\")\n if not os.path.exists(gitpathgit):\n return None\n ver = None\n try:\n o, _ = subprocess.Popen(\n \"git describe\", shell=True, cwd=gitpath, stdout=subprocess.PIPE\n ).communicate()\n except Exception:\n pass\n else:\n ver = o.decode().strip().split(\"-\")[-1]\n return ver\n\n\nif __version__.endswith(\"-dev\"):\n gitversion = get_nipype_gitversion()\n if gitversion:\n __version__ = \"{}+{}\".format(__version__, gitversion)\n\nCLASSIFIERS = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n]\nPYTHON_REQUIRES = \">= 3.6\"\n\ndescription = \"Neuroimaging in Python: Pipelines and Interfaces\"\n\n# Note: this long_description is actually a copy/paste from the top-level\n# README.txt, so that it shows up nicely on PyPI. So please remember to edit\n# it only in one place and sync it correctly.\nlong_description = \"\"\"========================================================\nNIPYPE: Neuroimaging in Python: Pipelines and Interfaces\n========================================================\n\nCurrent neuroimaging software offer users an incredible opportunity to\nanalyze data using a variety of different algorithms. However, this has\nresulted in a heterogeneous collection of specialized applications\nwithout transparent interoperability or a uniform operating interface.\n\n*Nipype*, an open-source, community-developed initiative under the\numbrella of `NiPy <http://nipy.org>`_, is a Python project that provides a\nuniform interface to existing neuroimaging software and facilitates interaction\nbetween these packages within a single workflow. Nipype provides an environment\nthat encourages interactive exploration of algorithms from different\npackages (e.g., AFNI, ANTS, BRAINS, BrainSuite, Camino, FreeSurfer, FSL, MNE,\nMRtrix, MNE, Nipy, Slicer, SPM), eases the design of workflows within and\nbetween packages, and reduces the learning curve necessary to use different \\\npackages. Nipype is creating a collaborative platform for neuroimaging \\\nsoftware development in a high-level language and addressing limitations of \\\nexisting pipeline systems.\n\n*Nipype* allows you to:\n\n* easily interact with tools from different software packages\n* combine processing steps from different software packages\n* develop new workflows faster by reusing common steps from old ones\n* process data faster by running it in parallel on many cores/machines\n* make your research easily reproducible\n* share your processing workflows with the community\n\"\"\"\n\n# versions\nNIBABEL_MIN_VERSION = \"2.1.0\"\nNETWORKX_MIN_VERSION = \"1.9\"\nNUMPY_MIN_VERSION = \"1.13\"\n# Numpy bug in python 3.7:\n# https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html\nNUMPY_MIN_VERSION_37 = \"1.15.3\"\nSCIPY_MIN_VERSION = \"0.14\"\nTRAITS_MIN_VERSION = \"4.6\"\nDATEUTIL_MIN_VERSION = \"2.2\"\nFUTURE_MIN_VERSION = \"0.16.0\"\nSIMPLEJSON_MIN_VERSION = \"3.8.0\"\nPROV_VERSION = \"1.5.2\"\nRDFLIB_MIN_VERSION = \"5.0.0\"\nCLICK_MIN_VERSION = \"6.6.0\"\nPYDOT_MIN_VERSION = \"1.2.3\"\n\nNAME = \"nipype\"\nMAINTAINER = \"nipype developers\"\nMAINTAINER_EMAIL = \"[email protected]\"\nDESCRIPTION = description\nLONG_DESCRIPTION = long_description\nURL = \"http://nipy.org/nipype\"\nDOWNLOAD_URL = \"http://github.com/nipy/nipype/archives/master\"\nLICENSE = \"Apache License, 2.0\"\nAUTHOR = \"nipype developers\"\nAUTHOR_EMAIL = \"[email protected]\"\nPLATFORMS = \"OS Independent\"\nMAJOR = __version__.split(\".\")[0]\nMINOR = __version__.split(\".\")[1]\nMICRO = __version__.replace(\"-\", \".\").split(\".\")[2]\nISRELEASE = (\n len(__version__.replace(\"-\", \".\").split(\".\")) == 3\n or \"post\" in __version__.replace(\"-\", \".\").split(\".\")[-1]\n)\nVERSION = __version__\nPROVIDES = [\"nipype\"]\nREQUIRES = [\n \"click>=%s\" % CLICK_MIN_VERSION,\n \"networkx>=%s\" % NETWORKX_MIN_VERSION,\n \"nibabel>=%s\" % NIBABEL_MIN_VERSION,\n 'numpy>=%s ; python_version < \"3.7\"' % NUMPY_MIN_VERSION,\n 'numpy>=%s ; python_version >= \"3.7\"' % NUMPY_MIN_VERSION_37,\n \"packaging\",\n \"prov>=%s\" % PROV_VERSION,\n \"pydot>=%s\" % PYDOT_MIN_VERSION,\n \"pydotplus\",\n \"python-dateutil>=%s\" % DATEUTIL_MIN_VERSION,\n \"rdflib>=%s\" % RDFLIB_MIN_VERSION,\n \"scipy>=%s\" % SCIPY_MIN_VERSION,\n \"simplejson>=%s\" % SIMPLEJSON_MIN_VERSION,\n \"traits>=%s,!=5.0\" % TRAITS_MIN_VERSION,\n \"filelock>=3.0.0\",\n \"etelemetry>=0.2.0\",\n]\n\nTESTS_REQUIRES = [\n \"codecov\",\n \"coverage<5\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-env\",\n \"pytest-timeout\",\n]\n\nEXTRA_REQUIRES = {\n \"data\": [\"datalad\"],\n \"doc\": [\n \"dipy\",\n \"ipython\",\n \"matplotlib\",\n \"nbsphinx\",\n \"sphinx-argparse\",\n \"sphinx>=2.1.2\",\n \"sphinxcontrib-apidoc\",\n \"sphinxcontrib-napoleon\",\n ],\n \"duecredit\": [\"duecredit\"],\n \"nipy\": [\"nitime\", \"nilearn\", \"dipy\", \"nipy\", \"matplotlib\"],\n \"profiler\": [\"psutil>=5.0\"],\n \"pybids\": [\"pybids>=0.7.0\"],\n \"specs\": [\"black\"],\n \"ssh\": [\"paramiko\"],\n \"tests\": TESTS_REQUIRES,\n \"xvfbwrapper\": [\"xvfbwrapper\"],\n # 'mesh': ['mayavi'] # Enable when it works\n}\n\n\ndef _list_union(iterable):\n return list(set(sum(iterable, [])))\n\n\n# Enable a handle to install all extra dependencies at once\nEXTRA_REQUIRES[\"all\"] = _list_union(EXTRA_REQUIRES.values())\n# dev = doc + tests + specs\nEXTRA_REQUIRES[\"dev\"] = _list_union(\n val for key, val in EXTRA_REQUIRES.items() if key in (\"doc\", \"tests\", \"specs\")\n)\n\nSTATUS = \"stable\"\n", "path": "nipype/info.py"}], "after_files": [{"content": "\"\"\" This file contains defines parameters for nipy that we use to fill\nsettings in setup.py, the nipy top-level docstring, and for building the\ndocs. In setup.py in particular, we exec this file, so it cannot import nipy\n\"\"\"\n\n# nipype version information\n# Remove -dev for release\n__version__ = \"1.5.0\"\n\n\ndef get_nipype_gitversion():\n \"\"\"Nipype version as reported by the last commit in git\n\n Returns\n -------\n None or str\n Version of Nipype according to git.\n \"\"\"\n import os\n import subprocess\n\n try:\n import nipype\n\n gitpath = os.path.realpath(\n os.path.join(os.path.dirname(nipype.__file__), os.path.pardir)\n )\n except:\n gitpath = os.getcwd()\n gitpathgit = os.path.join(gitpath, \".git\")\n if not os.path.exists(gitpathgit):\n return None\n ver = None\n try:\n o, _ = subprocess.Popen(\n \"git describe\", shell=True, cwd=gitpath, stdout=subprocess.PIPE\n ).communicate()\n except Exception:\n pass\n else:\n ver = o.decode().strip().split(\"-\")[-1]\n return ver\n\n\nif __version__.endswith(\"-dev\"):\n gitversion = get_nipype_gitversion()\n if gitversion:\n __version__ = \"{}+{}\".format(__version__, gitversion)\n\nCLASSIFIERS = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n]\nPYTHON_REQUIRES = \">= 3.6\"\n\ndescription = \"Neuroimaging in Python: Pipelines and Interfaces\"\n\n# Note: this long_description is actually a copy/paste from the top-level\n# README.txt, so that it shows up nicely on PyPI. So please remember to edit\n# it only in one place and sync it correctly.\nlong_description = \"\"\"========================================================\nNIPYPE: Neuroimaging in Python: Pipelines and Interfaces\n========================================================\n\nCurrent neuroimaging software offer users an incredible opportunity to\nanalyze data using a variety of different algorithms. However, this has\nresulted in a heterogeneous collection of specialized applications\nwithout transparent interoperability or a uniform operating interface.\n\n*Nipype*, an open-source, community-developed initiative under the\numbrella of `NiPy <http://nipy.org>`_, is a Python project that provides a\nuniform interface to existing neuroimaging software and facilitates interaction\nbetween these packages within a single workflow. Nipype provides an environment\nthat encourages interactive exploration of algorithms from different\npackages (e.g., AFNI, ANTS, BRAINS, BrainSuite, Camino, FreeSurfer, FSL, MNE,\nMRtrix, MNE, Nipy, Slicer, SPM), eases the design of workflows within and\nbetween packages, and reduces the learning curve necessary to use different \\\npackages. Nipype is creating a collaborative platform for neuroimaging \\\nsoftware development in a high-level language and addressing limitations of \\\nexisting pipeline systems.\n\n*Nipype* allows you to:\n\n* easily interact with tools from different software packages\n* combine processing steps from different software packages\n* develop new workflows faster by reusing common steps from old ones\n* process data faster by running it in parallel on many cores/machines\n* make your research easily reproducible\n* share your processing workflows with the community\n\"\"\"\n\n# versions\nNIBABEL_MIN_VERSION = \"2.1.0\"\nNETWORKX_MIN_VERSION = \"1.9\"\nNUMPY_MIN_VERSION = \"1.13\"\n# Numpy bug in python 3.7:\n# https://www.opensourceanswers.com/blog/you-shouldnt-use-python-37-for-data-science-right-now.html\nNUMPY_MIN_VERSION_37 = \"1.15.3\"\nSCIPY_MIN_VERSION = \"0.14\"\nTRAITS_MIN_VERSION = \"4.6\"\nDATEUTIL_MIN_VERSION = \"2.2\"\nFUTURE_MIN_VERSION = \"0.16.0\"\nSIMPLEJSON_MIN_VERSION = \"3.8.0\"\nPROV_VERSION = \"1.5.2\"\nRDFLIB_MIN_VERSION = \"5.0.0\"\nCLICK_MIN_VERSION = \"6.6.0\"\nPYDOT_MIN_VERSION = \"1.2.3\"\n\nNAME = \"nipype\"\nMAINTAINER = \"nipype developers\"\nMAINTAINER_EMAIL = \"[email protected]\"\nDESCRIPTION = description\nLONG_DESCRIPTION = long_description\nURL = \"http://nipy.org/nipype\"\nDOWNLOAD_URL = \"http://github.com/nipy/nipype/archives/master\"\nLICENSE = \"Apache License, 2.0\"\nAUTHOR = \"nipype developers\"\nAUTHOR_EMAIL = \"[email protected]\"\nPLATFORMS = \"OS Independent\"\nMAJOR = __version__.split(\".\")[0]\nMINOR = __version__.split(\".\")[1]\nMICRO = __version__.replace(\"-\", \".\").split(\".\")[2]\nISRELEASE = (\n len(__version__.replace(\"-\", \".\").split(\".\")) == 3\n or \"post\" in __version__.replace(\"-\", \".\").split(\".\")[-1]\n)\nVERSION = __version__\nPROVIDES = [\"nipype\"]\nREQUIRES = [\n \"click>=%s\" % CLICK_MIN_VERSION,\n \"networkx>=%s\" % NETWORKX_MIN_VERSION,\n \"nibabel>=%s\" % NIBABEL_MIN_VERSION,\n 'numpy>=%s ; python_version < \"3.7\"' % NUMPY_MIN_VERSION,\n 'numpy>=%s ; python_version >= \"3.7\"' % NUMPY_MIN_VERSION_37,\n \"packaging\",\n \"prov>=%s\" % PROV_VERSION,\n \"pydot>=%s\" % PYDOT_MIN_VERSION,\n \"pydotplus\",\n \"python-dateutil>=%s\" % DATEUTIL_MIN_VERSION,\n \"rdflib>=%s\" % RDFLIB_MIN_VERSION,\n \"scipy>=%s\" % SCIPY_MIN_VERSION,\n \"simplejson>=%s\" % SIMPLEJSON_MIN_VERSION,\n \"traits>=%s,!=5.0\" % TRAITS_MIN_VERSION,\n \"filelock>=3.0.0\",\n \"etelemetry>=0.2.0\",\n]\n\nTESTS_REQUIRES = [\n \"codecov\",\n \"coverage<5\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-env\",\n \"pytest-timeout\",\n]\n\nEXTRA_REQUIRES = {\n \"data\": [\"datalad\"],\n \"doc\": [\n \"dipy\",\n \"ipython\",\n \"matplotlib\",\n \"nbsphinx\",\n \"sphinx-argparse\",\n \"sphinx>=2.1.2\",\n \"sphinxcontrib-apidoc\",\n \"sphinxcontrib-napoleon\",\n ],\n \"duecredit\": [\"duecredit\"],\n \"nipy\": [\"nitime\", \"nilearn\", \"dipy\", \"nipy\", \"matplotlib\"],\n \"profiler\": [\"psutil>=5.0\"],\n \"pybids\": [\"pybids>=0.7.0\"],\n \"specs\": [\"black\"],\n \"ssh\": [\"paramiko\"],\n \"tests\": TESTS_REQUIRES,\n \"xvfbwrapper\": [\"xvfbwrapper\"],\n # 'mesh': ['mayavi'] # Enable when it works\n}\n\n\ndef _list_union(iterable):\n return list(set(sum(iterable, [])))\n\n\n# Enable a handle to install all extra dependencies at once\nEXTRA_REQUIRES[\"all\"] = _list_union(EXTRA_REQUIRES.values())\n# dev = doc + tests + specs\nEXTRA_REQUIRES[\"dev\"] = _list_union(\n val for key, val in EXTRA_REQUIRES.items() if key in (\"doc\", \"tests\", \"specs\")\n)\n\nSTATUS = \"stable\"\n", "path": "nipype/info.py"}]} | 3,490 | 94 |
gh_patches_debug_40055 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-587 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for weights paths to the SEMANTIC_SEGMENTATION_BACKBONES registry
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/image/segmentation/heads.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from functools import partial
15 from typing import Callable
16
17 from flash.core.registry import FlashRegistry
18 from flash.core.utilities.imports import _SEGMENTATION_MODELS_AVAILABLE
19
20 if _SEGMENTATION_MODELS_AVAILABLE:
21 import segmentation_models_pytorch as smp
22
23 SMP_MODEL_CLASS = [
24 smp.Unet, smp.UnetPlusPlus, smp.MAnet, smp.Linknet, smp.FPN, smp.PSPNet, smp.DeepLabV3, smp.DeepLabV3Plus,
25 smp.PAN
26 ]
27 SMP_MODELS = {a.__name__.lower(): a for a in SMP_MODEL_CLASS}
28
29 SEMANTIC_SEGMENTATION_HEADS = FlashRegistry("backbones")
30
31 if _SEGMENTATION_MODELS_AVAILABLE:
32
33 def _load_smp_head(
34 head: str,
35 backbone: str,
36 pretrained: bool = True,
37 num_classes: int = 1,
38 in_channels: int = 3,
39 **kwargs,
40 ) -> Callable:
41
42 if head not in SMP_MODELS:
43 raise NotImplementedError(f"{head} is not implemented! Supported heads -> {SMP_MODELS.keys()}")
44
45 encoder_weights = None
46 if pretrained:
47 encoder_weights = "imagenet"
48
49 return smp.create_model(
50 arch=head,
51 encoder_name=backbone,
52 encoder_weights=encoder_weights,
53 classes=num_classes,
54 in_channels=in_channels,
55 **kwargs,
56 )
57
58 for model_name in SMP_MODELS:
59 SEMANTIC_SEGMENTATION_HEADS(
60 partial(_load_smp_head, head=model_name),
61 name=model_name,
62 namespace="image/segmentation",
63 package="segmentation_models.pytorch"
64 )
65
```
Path: `flash/image/segmentation/model.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Type, Union
15
16 import torch
17 from torch import nn
18 from torch.nn import functional as F
19 from torchmetrics import IoU, Metric
20
21 from flash.core.classification import ClassificationTask
22 from flash.core.data.data_source import DefaultDataKeys
23 from flash.core.data.process import Postprocess, Serializer
24 from flash.core.registry import FlashRegistry
25 from flash.core.utilities.imports import _KORNIA_AVAILABLE
26 from flash.image.segmentation.backbones import SEMANTIC_SEGMENTATION_BACKBONES
27 from flash.image.segmentation.heads import SEMANTIC_SEGMENTATION_HEADS
28 from flash.image.segmentation.serialization import SegmentationLabels
29
30 if _KORNIA_AVAILABLE:
31 import kornia as K
32
33
34 class SemanticSegmentationPostprocess(Postprocess):
35
36 def per_sample_transform(self, sample: Any) -> Any:
37 resize = K.geometry.Resize(sample[DefaultDataKeys.METADATA]["size"][-2:], interpolation='bilinear')
38 sample[DefaultDataKeys.PREDS] = resize(torch.stack(sample[DefaultDataKeys.PREDS]))
39 sample[DefaultDataKeys.INPUT] = resize(torch.stack(sample[DefaultDataKeys.INPUT]))
40 return super().per_sample_transform(sample)
41
42
43 class SemanticSegmentation(ClassificationTask):
44 """``SemanticSegmentation`` is a :class:`~flash.Task` for semantic segmentation of images. For more details, see
45 :ref:`semantic_segmentation`.
46
47 Args:
48 num_classes: Number of classes to classify.
49 backbone: A string or model to use to compute image features.
50 backbone_kwargs: Additional arguments for the backbone configuration.
51 head: A string or (model, num_features) tuple to use to compute image features.
52 head_kwargs: Additional arguments for the head configuration.
53 pretrained: Use a pretrained backbone.
54 loss_fn: Loss function for training.
55 optimizer: Optimizer to use for training.
56 metrics: Metrics to compute for training and evaluation. Can either be an metric from the `torchmetrics`
57 package, a custom metric inherenting from `torchmetrics.Metric`, a callable function or a list/dict
58 containing a combination of the aforementioned. In all cases, each metric needs to have the signature
59 `metric(preds,target)` and return a single scalar tensor. Defaults to :class:`torchmetrics.IOU`.
60 learning_rate: Learning rate to use for training.
61 multi_label: Whether the targets are multi-label or not.
62 serializer: The :class:`~flash.core.data.process.Serializer` to use when serializing prediction outputs.
63 """
64
65 postprocess_cls = SemanticSegmentationPostprocess
66
67 backbones: FlashRegistry = SEMANTIC_SEGMENTATION_BACKBONES
68
69 heads: FlashRegistry = SEMANTIC_SEGMENTATION_HEADS
70
71 required_extras: str = "image"
72
73 def __init__(
74 self,
75 num_classes: int,
76 backbone: Union[str, nn.Module] = "resnet50",
77 backbone_kwargs: Optional[Dict] = None,
78 head: str = "fpn",
79 head_kwargs: Optional[Dict] = None,
80 pretrained: bool = True,
81 loss_fn: Optional[Callable] = None,
82 optimizer: Type[torch.optim.Optimizer] = torch.optim.AdamW,
83 metrics: Union[Metric, Callable, Mapping, Sequence, None] = None,
84 learning_rate: float = 1e-3,
85 multi_label: bool = False,
86 serializer: Optional[Union[Serializer, Mapping[str, Serializer]]] = None,
87 postprocess: Optional[Postprocess] = None,
88 ) -> None:
89 if metrics is None:
90 metrics = IoU(num_classes=num_classes)
91
92 if loss_fn is None:
93 loss_fn = F.cross_entropy
94
95 # TODO: need to check for multi_label
96 if multi_label:
97 raise NotImplementedError("Multi-label not supported yet.")
98
99 super().__init__(
100 model=None,
101 loss_fn=loss_fn,
102 optimizer=optimizer,
103 metrics=metrics,
104 learning_rate=learning_rate,
105 serializer=serializer or SegmentationLabels(),
106 postprocess=postprocess or self.postprocess_cls()
107 )
108
109 self.save_hyperparameters()
110
111 if not backbone_kwargs:
112 backbone_kwargs = {}
113
114 if not head_kwargs:
115 head_kwargs = {}
116
117 if isinstance(backbone, nn.Module):
118 self.backbone = backbone
119 else:
120 self.backbone = self.backbones.get(backbone)(**backbone_kwargs)
121
122 self.head: nn.Module = self.heads.get(head)(
123 backbone=self.backbone, num_classes=num_classes, pretrained=pretrained, **head_kwargs
124 )
125 self.backbone = self.head.encoder
126
127 def training_step(self, batch: Any, batch_idx: int) -> Any:
128 batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])
129 return super().training_step(batch, batch_idx)
130
131 def validation_step(self, batch: Any, batch_idx: int) -> Any:
132 batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])
133 return super().validation_step(batch, batch_idx)
134
135 def test_step(self, batch: Any, batch_idx: int) -> Any:
136 batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])
137 return super().test_step(batch, batch_idx)
138
139 def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
140 batch_input = (batch[DefaultDataKeys.INPUT])
141 batch[DefaultDataKeys.PREDS] = super().predict_step(batch_input, batch_idx, dataloader_idx=dataloader_idx)
142 return batch
143
144 def forward(self, x) -> torch.Tensor:
145 res = self.head(x)
146
147 # some frameworks like torchvision return a dict.
148 # In particular, torchvision segmentation models return the output logits
149 # in the key `out`.
150 if torch.jit.isinstance(res, Dict[str, torch.Tensor]):
151 out = res['out']
152 elif torch.is_tensor(res):
153 out = res
154 else:
155 raise NotImplementedError(f"Unsupported output type: {type(res)}")
156
157 return out
158
159 @staticmethod
160 def _ci_benchmark_fn(history: List[Dict[str, Any]]):
161 """
162 This function is used only for debugging usage with CI
163 """
164 assert history[-1]["val_iou"] > 0.2
165
```
Path: `flash/image/segmentation/backbones.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from functools import partial
15
16 from flash.core.registry import FlashRegistry
17 from flash.core.utilities.imports import _SEGMENTATION_MODELS_AVAILABLE
18
19 if _SEGMENTATION_MODELS_AVAILABLE:
20 import segmentation_models_pytorch as smp
21
22 SEMANTIC_SEGMENTATION_BACKBONES = FlashRegistry("backbones")
23
24 if _SEGMENTATION_MODELS_AVAILABLE:
25
26 ENCODERS = smp.encoders.get_encoder_names()
27
28 def _load_smp_backbone(backbone: str, **_) -> str:
29 return backbone
30
31 for encoder_name in ENCODERS:
32 short_name = encoder_name
33 if short_name.startswith("timm-"):
34 short_name = encoder_name[5:]
35 SEMANTIC_SEGMENTATION_BACKBONES(
36 partial(_load_smp_backbone, backbone=encoder_name), name=short_name, namespace="image/segmentation"
37 )
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flash/image/segmentation/backbones.py b/flash/image/segmentation/backbones.py
--- a/flash/image/segmentation/backbones.py
+++ b/flash/image/segmentation/backbones.py
@@ -32,6 +32,11 @@
short_name = encoder_name
if short_name.startswith("timm-"):
short_name = encoder_name[5:]
+
+ available_weights = smp.encoders.encoders[encoder_name]["pretrained_settings"].keys()
SEMANTIC_SEGMENTATION_BACKBONES(
- partial(_load_smp_backbone, backbone=encoder_name), name=short_name, namespace="image/segmentation"
+ partial(_load_smp_backbone, backbone=encoder_name),
+ name=short_name,
+ namespace="image/segmentation",
+ weights_paths=available_weights,
)
diff --git a/flash/image/segmentation/heads.py b/flash/image/segmentation/heads.py
--- a/flash/image/segmentation/heads.py
+++ b/flash/image/segmentation/heads.py
@@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
-from typing import Callable
+from typing import Union
+
+from torch import nn
from flash.core.registry import FlashRegistry
from flash.core.utilities.imports import _SEGMENTATION_MODELS_AVAILABLE
@@ -33,17 +35,19 @@
def _load_smp_head(
head: str,
backbone: str,
- pretrained: bool = True,
+ pretrained: Union[bool, str] = True,
num_classes: int = 1,
in_channels: int = 3,
**kwargs,
- ) -> Callable:
+ ) -> nn.Module:
if head not in SMP_MODELS:
raise NotImplementedError(f"{head} is not implemented! Supported heads -> {SMP_MODELS.keys()}")
encoder_weights = None
- if pretrained:
+ if isinstance(pretrained, str):
+ encoder_weights = pretrained
+ elif pretrained:
encoder_weights = "imagenet"
return smp.create_model(
diff --git a/flash/image/segmentation/model.py b/flash/image/segmentation/model.py
--- a/flash/image/segmentation/model.py
+++ b/flash/image/segmentation/model.py
@@ -77,7 +77,7 @@
backbone_kwargs: Optional[Dict] = None,
head: str = "fpn",
head_kwargs: Optional[Dict] = None,
- pretrained: bool = True,
+ pretrained: Union[bool, str] = True,
loss_fn: Optional[Callable] = None,
optimizer: Type[torch.optim.Optimizer] = torch.optim.AdamW,
metrics: Union[Metric, Callable, Mapping, Sequence, None] = None,
@@ -156,6 +156,16 @@
return out
+ @classmethod
+ def available_pretrained_weights(cls, backbone: str):
+ result = cls.backbones.get(backbone, with_metadata=True)
+ pretrained_weights = None
+
+ if "weights_paths" in result["metadata"]:
+ pretrained_weights = list(result["metadata"]["weights_paths"])
+
+ return pretrained_weights
+
@staticmethod
def _ci_benchmark_fn(history: List[Dict[str, Any]]):
"""
| {"golden_diff": "diff --git a/flash/image/segmentation/backbones.py b/flash/image/segmentation/backbones.py\n--- a/flash/image/segmentation/backbones.py\n+++ b/flash/image/segmentation/backbones.py\n@@ -32,6 +32,11 @@\n short_name = encoder_name\n if short_name.startswith(\"timm-\"):\n short_name = encoder_name[5:]\n+\n+ available_weights = smp.encoders.encoders[encoder_name][\"pretrained_settings\"].keys()\n SEMANTIC_SEGMENTATION_BACKBONES(\n- partial(_load_smp_backbone, backbone=encoder_name), name=short_name, namespace=\"image/segmentation\"\n+ partial(_load_smp_backbone, backbone=encoder_name),\n+ name=short_name,\n+ namespace=\"image/segmentation\",\n+ weights_paths=available_weights,\n )\ndiff --git a/flash/image/segmentation/heads.py b/flash/image/segmentation/heads.py\n--- a/flash/image/segmentation/heads.py\n+++ b/flash/image/segmentation/heads.py\n@@ -12,7 +12,9 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n from functools import partial\n-from typing import Callable\n+from typing import Union\n+\n+from torch import nn\n \n from flash.core.registry import FlashRegistry\n from flash.core.utilities.imports import _SEGMENTATION_MODELS_AVAILABLE\n@@ -33,17 +35,19 @@\n def _load_smp_head(\n head: str,\n backbone: str,\n- pretrained: bool = True,\n+ pretrained: Union[bool, str] = True,\n num_classes: int = 1,\n in_channels: int = 3,\n **kwargs,\n- ) -> Callable:\n+ ) -> nn.Module:\n \n if head not in SMP_MODELS:\n raise NotImplementedError(f\"{head} is not implemented! Supported heads -> {SMP_MODELS.keys()}\")\n \n encoder_weights = None\n- if pretrained:\n+ if isinstance(pretrained, str):\n+ encoder_weights = pretrained\n+ elif pretrained:\n encoder_weights = \"imagenet\"\n \n return smp.create_model(\ndiff --git a/flash/image/segmentation/model.py b/flash/image/segmentation/model.py\n--- a/flash/image/segmentation/model.py\n+++ b/flash/image/segmentation/model.py\n@@ -77,7 +77,7 @@\n backbone_kwargs: Optional[Dict] = None,\n head: str = \"fpn\",\n head_kwargs: Optional[Dict] = None,\n- pretrained: bool = True,\n+ pretrained: Union[bool, str] = True,\n loss_fn: Optional[Callable] = None,\n optimizer: Type[torch.optim.Optimizer] = torch.optim.AdamW,\n metrics: Union[Metric, Callable, Mapping, Sequence, None] = None,\n@@ -156,6 +156,16 @@\n \n return out\n \n+ @classmethod\n+ def available_pretrained_weights(cls, backbone: str):\n+ result = cls.backbones.get(backbone, with_metadata=True)\n+ pretrained_weights = None\n+\n+ if \"weights_paths\" in result[\"metadata\"]:\n+ pretrained_weights = list(result[\"metadata\"][\"weights_paths\"])\n+\n+ return pretrained_weights\n+\n @staticmethod\n def _ci_benchmark_fn(history: List[Dict[str, Any]]):\n \"\"\"\n", "issue": "Add support for weights paths to the SEMANTIC_SEGMENTATION_BACKBONES registry\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\nfrom typing import Callable\n\nfrom flash.core.registry import FlashRegistry\nfrom flash.core.utilities.imports import _SEGMENTATION_MODELS_AVAILABLE\n\nif _SEGMENTATION_MODELS_AVAILABLE:\n import segmentation_models_pytorch as smp\n\n SMP_MODEL_CLASS = [\n smp.Unet, smp.UnetPlusPlus, smp.MAnet, smp.Linknet, smp.FPN, smp.PSPNet, smp.DeepLabV3, smp.DeepLabV3Plus,\n smp.PAN\n ]\n SMP_MODELS = {a.__name__.lower(): a for a in SMP_MODEL_CLASS}\n\nSEMANTIC_SEGMENTATION_HEADS = FlashRegistry(\"backbones\")\n\nif _SEGMENTATION_MODELS_AVAILABLE:\n\n def _load_smp_head(\n head: str,\n backbone: str,\n pretrained: bool = True,\n num_classes: int = 1,\n in_channels: int = 3,\n **kwargs,\n ) -> Callable:\n\n if head not in SMP_MODELS:\n raise NotImplementedError(f\"{head} is not implemented! Supported heads -> {SMP_MODELS.keys()}\")\n\n encoder_weights = None\n if pretrained:\n encoder_weights = \"imagenet\"\n\n return smp.create_model(\n arch=head,\n encoder_name=backbone,\n encoder_weights=encoder_weights,\n classes=num_classes,\n in_channels=in_channels,\n **kwargs,\n )\n\n for model_name in SMP_MODELS:\n SEMANTIC_SEGMENTATION_HEADS(\n partial(_load_smp_head, head=model_name),\n name=model_name,\n namespace=\"image/segmentation\",\n package=\"segmentation_models.pytorch\"\n )\n", "path": "flash/image/segmentation/heads.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Type, Union\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchmetrics import IoU, Metric\n\nfrom flash.core.classification import ClassificationTask\nfrom flash.core.data.data_source import DefaultDataKeys\nfrom flash.core.data.process import Postprocess, Serializer\nfrom flash.core.registry import FlashRegistry\nfrom flash.core.utilities.imports import _KORNIA_AVAILABLE\nfrom flash.image.segmentation.backbones import SEMANTIC_SEGMENTATION_BACKBONES\nfrom flash.image.segmentation.heads import SEMANTIC_SEGMENTATION_HEADS\nfrom flash.image.segmentation.serialization import SegmentationLabels\n\nif _KORNIA_AVAILABLE:\n import kornia as K\n\n\nclass SemanticSegmentationPostprocess(Postprocess):\n\n def per_sample_transform(self, sample: Any) -> Any:\n resize = K.geometry.Resize(sample[DefaultDataKeys.METADATA][\"size\"][-2:], interpolation='bilinear')\n sample[DefaultDataKeys.PREDS] = resize(torch.stack(sample[DefaultDataKeys.PREDS]))\n sample[DefaultDataKeys.INPUT] = resize(torch.stack(sample[DefaultDataKeys.INPUT]))\n return super().per_sample_transform(sample)\n\n\nclass SemanticSegmentation(ClassificationTask):\n \"\"\"``SemanticSegmentation`` is a :class:`~flash.Task` for semantic segmentation of images. For more details, see\n :ref:`semantic_segmentation`.\n\n Args:\n num_classes: Number of classes to classify.\n backbone: A string or model to use to compute image features.\n backbone_kwargs: Additional arguments for the backbone configuration.\n head: A string or (model, num_features) tuple to use to compute image features.\n head_kwargs: Additional arguments for the head configuration.\n pretrained: Use a pretrained backbone.\n loss_fn: Loss function for training.\n optimizer: Optimizer to use for training.\n metrics: Metrics to compute for training and evaluation. Can either be an metric from the `torchmetrics`\n package, a custom metric inherenting from `torchmetrics.Metric`, a callable function or a list/dict\n containing a combination of the aforementioned. In all cases, each metric needs to have the signature\n `metric(preds,target)` and return a single scalar tensor. Defaults to :class:`torchmetrics.IOU`.\n learning_rate: Learning rate to use for training.\n multi_label: Whether the targets are multi-label or not.\n serializer: The :class:`~flash.core.data.process.Serializer` to use when serializing prediction outputs.\n \"\"\"\n\n postprocess_cls = SemanticSegmentationPostprocess\n\n backbones: FlashRegistry = SEMANTIC_SEGMENTATION_BACKBONES\n\n heads: FlashRegistry = SEMANTIC_SEGMENTATION_HEADS\n\n required_extras: str = \"image\"\n\n def __init__(\n self,\n num_classes: int,\n backbone: Union[str, nn.Module] = \"resnet50\",\n backbone_kwargs: Optional[Dict] = None,\n head: str = \"fpn\",\n head_kwargs: Optional[Dict] = None,\n pretrained: bool = True,\n loss_fn: Optional[Callable] = None,\n optimizer: Type[torch.optim.Optimizer] = torch.optim.AdamW,\n metrics: Union[Metric, Callable, Mapping, Sequence, None] = None,\n learning_rate: float = 1e-3,\n multi_label: bool = False,\n serializer: Optional[Union[Serializer, Mapping[str, Serializer]]] = None,\n postprocess: Optional[Postprocess] = None,\n ) -> None:\n if metrics is None:\n metrics = IoU(num_classes=num_classes)\n\n if loss_fn is None:\n loss_fn = F.cross_entropy\n\n # TODO: need to check for multi_label\n if multi_label:\n raise NotImplementedError(\"Multi-label not supported yet.\")\n\n super().__init__(\n model=None,\n loss_fn=loss_fn,\n optimizer=optimizer,\n metrics=metrics,\n learning_rate=learning_rate,\n serializer=serializer or SegmentationLabels(),\n postprocess=postprocess or self.postprocess_cls()\n )\n\n self.save_hyperparameters()\n\n if not backbone_kwargs:\n backbone_kwargs = {}\n\n if not head_kwargs:\n head_kwargs = {}\n\n if isinstance(backbone, nn.Module):\n self.backbone = backbone\n else:\n self.backbone = self.backbones.get(backbone)(**backbone_kwargs)\n\n self.head: nn.Module = self.heads.get(head)(\n backbone=self.backbone, num_classes=num_classes, pretrained=pretrained, **head_kwargs\n )\n self.backbone = self.head.encoder\n\n def training_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().training_step(batch, batch_idx)\n\n def validation_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().validation_step(batch, batch_idx)\n\n def test_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().test_step(batch, batch_idx)\n\n def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:\n batch_input = (batch[DefaultDataKeys.INPUT])\n batch[DefaultDataKeys.PREDS] = super().predict_step(batch_input, batch_idx, dataloader_idx=dataloader_idx)\n return batch\n\n def forward(self, x) -> torch.Tensor:\n res = self.head(x)\n\n # some frameworks like torchvision return a dict.\n # In particular, torchvision segmentation models return the output logits\n # in the key `out`.\n if torch.jit.isinstance(res, Dict[str, torch.Tensor]):\n out = res['out']\n elif torch.is_tensor(res):\n out = res\n else:\n raise NotImplementedError(f\"Unsupported output type: {type(res)}\")\n\n return out\n\n @staticmethod\n def _ci_benchmark_fn(history: List[Dict[str, Any]]):\n \"\"\"\n This function is used only for debugging usage with CI\n \"\"\"\n assert history[-1][\"val_iou\"] > 0.2\n", "path": "flash/image/segmentation/model.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\n\nfrom flash.core.registry import FlashRegistry\nfrom flash.core.utilities.imports import _SEGMENTATION_MODELS_AVAILABLE\n\nif _SEGMENTATION_MODELS_AVAILABLE:\n import segmentation_models_pytorch as smp\n\nSEMANTIC_SEGMENTATION_BACKBONES = FlashRegistry(\"backbones\")\n\nif _SEGMENTATION_MODELS_AVAILABLE:\n\n ENCODERS = smp.encoders.get_encoder_names()\n\n def _load_smp_backbone(backbone: str, **_) -> str:\n return backbone\n\n for encoder_name in ENCODERS:\n short_name = encoder_name\n if short_name.startswith(\"timm-\"):\n short_name = encoder_name[5:]\n SEMANTIC_SEGMENTATION_BACKBONES(\n partial(_load_smp_backbone, backbone=encoder_name), name=short_name, namespace=\"image/segmentation\"\n )\n", "path": "flash/image/segmentation/backbones.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\nfrom typing import Union\n\nfrom torch import nn\n\nfrom flash.core.registry import FlashRegistry\nfrom flash.core.utilities.imports import _SEGMENTATION_MODELS_AVAILABLE\n\nif _SEGMENTATION_MODELS_AVAILABLE:\n import segmentation_models_pytorch as smp\n\n SMP_MODEL_CLASS = [\n smp.Unet, smp.UnetPlusPlus, smp.MAnet, smp.Linknet, smp.FPN, smp.PSPNet, smp.DeepLabV3, smp.DeepLabV3Plus,\n smp.PAN\n ]\n SMP_MODELS = {a.__name__.lower(): a for a in SMP_MODEL_CLASS}\n\nSEMANTIC_SEGMENTATION_HEADS = FlashRegistry(\"backbones\")\n\nif _SEGMENTATION_MODELS_AVAILABLE:\n\n def _load_smp_head(\n head: str,\n backbone: str,\n pretrained: Union[bool, str] = True,\n num_classes: int = 1,\n in_channels: int = 3,\n **kwargs,\n ) -> nn.Module:\n\n if head not in SMP_MODELS:\n raise NotImplementedError(f\"{head} is not implemented! Supported heads -> {SMP_MODELS.keys()}\")\n\n encoder_weights = None\n if isinstance(pretrained, str):\n encoder_weights = pretrained\n elif pretrained:\n encoder_weights = \"imagenet\"\n\n return smp.create_model(\n arch=head,\n encoder_name=backbone,\n encoder_weights=encoder_weights,\n classes=num_classes,\n in_channels=in_channels,\n **kwargs,\n )\n\n for model_name in SMP_MODELS:\n SEMANTIC_SEGMENTATION_HEADS(\n partial(_load_smp_head, head=model_name),\n name=model_name,\n namespace=\"image/segmentation\",\n package=\"segmentation_models.pytorch\"\n )\n", "path": "flash/image/segmentation/heads.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Type, Union\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchmetrics import IoU, Metric\n\nfrom flash.core.classification import ClassificationTask\nfrom flash.core.data.data_source import DefaultDataKeys\nfrom flash.core.data.process import Postprocess, Serializer\nfrom flash.core.registry import FlashRegistry\nfrom flash.core.utilities.imports import _KORNIA_AVAILABLE\nfrom flash.image.segmentation.backbones import SEMANTIC_SEGMENTATION_BACKBONES\nfrom flash.image.segmentation.heads import SEMANTIC_SEGMENTATION_HEADS\nfrom flash.image.segmentation.serialization import SegmentationLabels\n\nif _KORNIA_AVAILABLE:\n import kornia as K\n\n\nclass SemanticSegmentationPostprocess(Postprocess):\n\n def per_sample_transform(self, sample: Any) -> Any:\n resize = K.geometry.Resize(sample[DefaultDataKeys.METADATA][\"size\"][-2:], interpolation='bilinear')\n sample[DefaultDataKeys.PREDS] = resize(torch.stack(sample[DefaultDataKeys.PREDS]))\n sample[DefaultDataKeys.INPUT] = resize(torch.stack(sample[DefaultDataKeys.INPUT]))\n return super().per_sample_transform(sample)\n\n\nclass SemanticSegmentation(ClassificationTask):\n \"\"\"``SemanticSegmentation`` is a :class:`~flash.Task` for semantic segmentation of images. For more details, see\n :ref:`semantic_segmentation`.\n\n Args:\n num_classes: Number of classes to classify.\n backbone: A string or model to use to compute image features.\n backbone_kwargs: Additional arguments for the backbone configuration.\n head: A string or (model, num_features) tuple to use to compute image features.\n head_kwargs: Additional arguments for the head configuration.\n pretrained: Use a pretrained backbone.\n loss_fn: Loss function for training.\n optimizer: Optimizer to use for training.\n metrics: Metrics to compute for training and evaluation. Can either be an metric from the `torchmetrics`\n package, a custom metric inherenting from `torchmetrics.Metric`, a callable function or a list/dict\n containing a combination of the aforementioned. In all cases, each metric needs to have the signature\n `metric(preds,target)` and return a single scalar tensor. Defaults to :class:`torchmetrics.IOU`.\n learning_rate: Learning rate to use for training.\n multi_label: Whether the targets are multi-label or not.\n serializer: The :class:`~flash.core.data.process.Serializer` to use when serializing prediction outputs.\n \"\"\"\n\n postprocess_cls = SemanticSegmentationPostprocess\n\n backbones: FlashRegistry = SEMANTIC_SEGMENTATION_BACKBONES\n\n heads: FlashRegistry = SEMANTIC_SEGMENTATION_HEADS\n\n required_extras: str = \"image\"\n\n def __init__(\n self,\n num_classes: int,\n backbone: Union[str, nn.Module] = \"resnet50\",\n backbone_kwargs: Optional[Dict] = None,\n head: str = \"fpn\",\n head_kwargs: Optional[Dict] = None,\n pretrained: Union[bool, str] = True,\n loss_fn: Optional[Callable] = None,\n optimizer: Type[torch.optim.Optimizer] = torch.optim.AdamW,\n metrics: Union[Metric, Callable, Mapping, Sequence, None] = None,\n learning_rate: float = 1e-3,\n multi_label: bool = False,\n serializer: Optional[Union[Serializer, Mapping[str, Serializer]]] = None,\n postprocess: Optional[Postprocess] = None,\n ) -> None:\n if metrics is None:\n metrics = IoU(num_classes=num_classes)\n\n if loss_fn is None:\n loss_fn = F.cross_entropy\n\n # TODO: need to check for multi_label\n if multi_label:\n raise NotImplementedError(\"Multi-label not supported yet.\")\n\n super().__init__(\n model=None,\n loss_fn=loss_fn,\n optimizer=optimizer,\n metrics=metrics,\n learning_rate=learning_rate,\n serializer=serializer or SegmentationLabels(),\n postprocess=postprocess or self.postprocess_cls()\n )\n\n self.save_hyperparameters()\n\n if not backbone_kwargs:\n backbone_kwargs = {}\n\n if not head_kwargs:\n head_kwargs = {}\n\n if isinstance(backbone, nn.Module):\n self.backbone = backbone\n else:\n self.backbone = self.backbones.get(backbone)(**backbone_kwargs)\n\n self.head: nn.Module = self.heads.get(head)(\n backbone=self.backbone, num_classes=num_classes, pretrained=pretrained, **head_kwargs\n )\n self.backbone = self.head.encoder\n\n def training_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().training_step(batch, batch_idx)\n\n def validation_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().validation_step(batch, batch_idx)\n\n def test_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().test_step(batch, batch_idx)\n\n def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:\n batch_input = (batch[DefaultDataKeys.INPUT])\n batch[DefaultDataKeys.PREDS] = super().predict_step(batch_input, batch_idx, dataloader_idx=dataloader_idx)\n return batch\n\n def forward(self, x) -> torch.Tensor:\n res = self.head(x)\n\n # some frameworks like torchvision return a dict.\n # In particular, torchvision segmentation models return the output logits\n # in the key `out`.\n if torch.jit.isinstance(res, Dict[str, torch.Tensor]):\n out = res['out']\n elif torch.is_tensor(res):\n out = res\n else:\n raise NotImplementedError(f\"Unsupported output type: {type(res)}\")\n\n return out\n\n @classmethod\n def available_pretrained_weights(cls, backbone: str):\n result = cls.backbones.get(backbone, with_metadata=True)\n pretrained_weights = None\n\n if \"weights_paths\" in result[\"metadata\"]:\n pretrained_weights = list(result[\"metadata\"][\"weights_paths\"])\n\n return pretrained_weights\n\n @staticmethod\n def _ci_benchmark_fn(history: List[Dict[str, Any]]):\n \"\"\"\n This function is used only for debugging usage with CI\n \"\"\"\n assert history[-1][\"val_iou\"] > 0.2\n", "path": "flash/image/segmentation/model.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\n\nfrom flash.core.registry import FlashRegistry\nfrom flash.core.utilities.imports import _SEGMENTATION_MODELS_AVAILABLE\n\nif _SEGMENTATION_MODELS_AVAILABLE:\n import segmentation_models_pytorch as smp\n\nSEMANTIC_SEGMENTATION_BACKBONES = FlashRegistry(\"backbones\")\n\nif _SEGMENTATION_MODELS_AVAILABLE:\n\n ENCODERS = smp.encoders.get_encoder_names()\n\n def _load_smp_backbone(backbone: str, **_) -> str:\n return backbone\n\n for encoder_name in ENCODERS:\n short_name = encoder_name\n if short_name.startswith(\"timm-\"):\n short_name = encoder_name[5:]\n\n available_weights = smp.encoders.encoders[encoder_name][\"pretrained_settings\"].keys()\n SEMANTIC_SEGMENTATION_BACKBONES(\n partial(_load_smp_backbone, backbone=encoder_name),\n name=short_name,\n namespace=\"image/segmentation\",\n weights_paths=available_weights,\n )\n", "path": "flash/image/segmentation/backbones.py"}]} | 3,205 | 751 |
gh_patches_debug_15881 | rasdani/github-patches | git_diff | svthalia__concrexit-2496 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add album to event foreign key to api
Feature request (I'm making this via the Github app so the template doesn't work)
See title, Julian wants this :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/photos/api/v2/serializers/album.py`
Content:
```
1 from rest_framework import serializers
2
3 from photos import services
4 from photos.api.v2.serializers.photo import PhotoSerializer, PhotoListSerializer
5 from photos.models import Album
6 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
7 CleanedModelSerializer,
8 )
9
10
11 class AlbumSerializer(CleanedModelSerializer):
12 """API serializer for albums."""
13
14 class Meta:
15 """Meta class for the serializer."""
16
17 model = Album
18 fields = ("slug", "title", "accessible", "shareable", "cover", "photos")
19
20 cover = PhotoSerializer()
21 accessible = serializers.SerializerMethodField("_accessible")
22 photos = PhotoListSerializer(source="photo_set", many=True)
23
24 def _accessible(self, obj):
25 return services.is_album_accessible(self.context["request"], obj)
26
27
28 class AlbumListSerializer(AlbumSerializer):
29 class Meta:
30 """Meta class for the serializer."""
31
32 model = Album
33 fields = ("slug", "title", "accessible", "shareable", "cover")
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/photos/api/v2/serializers/album.py b/website/photos/api/v2/serializers/album.py
--- a/website/photos/api/v2/serializers/album.py
+++ b/website/photos/api/v2/serializers/album.py
@@ -15,7 +15,15 @@
"""Meta class for the serializer."""
model = Album
- fields = ("slug", "title", "accessible", "shareable", "cover", "photos")
+ fields = (
+ "slug",
+ "title",
+ "accessible",
+ "shareable",
+ "cover",
+ "photos",
+ "event",
+ )
cover = PhotoSerializer()
accessible = serializers.SerializerMethodField("_accessible")
@@ -30,4 +38,4 @@
"""Meta class for the serializer."""
model = Album
- fields = ("slug", "title", "accessible", "shareable", "cover")
+ fields = ("slug", "title", "accessible", "shareable", "cover", "event")
| {"golden_diff": "diff --git a/website/photos/api/v2/serializers/album.py b/website/photos/api/v2/serializers/album.py\n--- a/website/photos/api/v2/serializers/album.py\n+++ b/website/photos/api/v2/serializers/album.py\n@@ -15,7 +15,15 @@\n \"\"\"Meta class for the serializer.\"\"\"\n \n model = Album\n- fields = (\"slug\", \"title\", \"accessible\", \"shareable\", \"cover\", \"photos\")\n+ fields = (\n+ \"slug\",\n+ \"title\",\n+ \"accessible\",\n+ \"shareable\",\n+ \"cover\",\n+ \"photos\",\n+ \"event\",\n+ )\n \n cover = PhotoSerializer()\n accessible = serializers.SerializerMethodField(\"_accessible\")\n@@ -30,4 +38,4 @@\n \"\"\"Meta class for the serializer.\"\"\"\n \n model = Album\n- fields = (\"slug\", \"title\", \"accessible\", \"shareable\", \"cover\")\n+ fields = (\"slug\", \"title\", \"accessible\", \"shareable\", \"cover\", \"event\")\n", "issue": "Add album to event foreign key to api\nFeature request (I'm making this via the Github app so the template doesn't work) \n\n\nSee title, Julian wants this :)\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom photos import services\nfrom photos.api.v2.serializers.photo import PhotoSerializer, PhotoListSerializer\nfrom photos.models import Album\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\n\n\nclass AlbumSerializer(CleanedModelSerializer):\n \"\"\"API serializer for albums.\"\"\"\n\n class Meta:\n \"\"\"Meta class for the serializer.\"\"\"\n\n model = Album\n fields = (\"slug\", \"title\", \"accessible\", \"shareable\", \"cover\", \"photos\")\n\n cover = PhotoSerializer()\n accessible = serializers.SerializerMethodField(\"_accessible\")\n photos = PhotoListSerializer(source=\"photo_set\", many=True)\n\n def _accessible(self, obj):\n return services.is_album_accessible(self.context[\"request\"], obj)\n\n\nclass AlbumListSerializer(AlbumSerializer):\n class Meta:\n \"\"\"Meta class for the serializer.\"\"\"\n\n model = Album\n fields = (\"slug\", \"title\", \"accessible\", \"shareable\", \"cover\")\n", "path": "website/photos/api/v2/serializers/album.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom photos import services\nfrom photos.api.v2.serializers.photo import PhotoSerializer, PhotoListSerializer\nfrom photos.models import Album\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\n\n\nclass AlbumSerializer(CleanedModelSerializer):\n \"\"\"API serializer for albums.\"\"\"\n\n class Meta:\n \"\"\"Meta class for the serializer.\"\"\"\n\n model = Album\n fields = (\n \"slug\",\n \"title\",\n \"accessible\",\n \"shareable\",\n \"cover\",\n \"photos\",\n \"event\",\n )\n\n cover = PhotoSerializer()\n accessible = serializers.SerializerMethodField(\"_accessible\")\n photos = PhotoListSerializer(source=\"photo_set\", many=True)\n\n def _accessible(self, obj):\n return services.is_album_accessible(self.context[\"request\"], obj)\n\n\nclass AlbumListSerializer(AlbumSerializer):\n class Meta:\n \"\"\"Meta class for the serializer.\"\"\"\n\n model = Album\n fields = (\"slug\", \"title\", \"accessible\", \"shareable\", \"cover\", \"event\")\n", "path": "website/photos/api/v2/serializers/album.py"}]} | 574 | 240 |
gh_patches_debug_532 | rasdani/github-patches | git_diff | saleor__saleor-1949 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Product variant without name or attrs displays as blank space in dashboard's breadcrumbs
Seems like `__str__` on our `ProductVariant` may produce empty string if ProductVariant has no name or attributes set. Our titles seem to fallback to SKU if this is the case, but not our breadcrumbs:
<img width="362" alt="zrzut ekranu 2018-03-01 o 13 58 23" src="https://user-images.githubusercontent.com/750553/36845921-d21bf616-1d58-11e8-851c-763c32356818.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/product/models.py`
Content:
```
1 import datetime
2 from decimal import Decimal
3
4 from django.conf import settings
5 from django.contrib.postgres.fields import HStoreField
6 from django.core.validators import MinValueValidator, RegexValidator
7 from django.db import models
8 from django.db.models import F, Max, Q
9 from django.urls import reverse
10 from django.utils.encoding import smart_text
11 from django.utils.text import slugify
12 from django.utils.translation import pgettext_lazy
13 from django_prices.models import MoneyField
14 from mptt.managers import TreeManager
15 from mptt.models import MPTTModel
16 from prices import Money, TaxedMoney, TaxedMoneyRange
17 from text_unidecode import unidecode
18 from versatileimagefield.fields import PPOIField, VersatileImageField
19
20 from ..core.exceptions import InsufficientStock
21 from ..discount.utils import calculate_discounted_price
22 from ..seo.models import SeoModel
23
24
25 class Category(MPTTModel, SeoModel):
26 name = models.CharField(max_length=128)
27 slug = models.SlugField(max_length=128)
28 description = models.TextField(blank=True)
29 parent = models.ForeignKey(
30 'self', null=True, blank=True, related_name='children',
31 on_delete=models.CASCADE)
32 background_image = VersatileImageField(
33 upload_to='category-backgrounds', blank=True, null=True)
34
35 objects = models.Manager()
36 tree = TreeManager()
37
38 class Meta:
39 app_label = 'product'
40 permissions = (
41 ('view_category',
42 pgettext_lazy('Permission description', 'Can view categories')),
43 ('edit_category',
44 pgettext_lazy('Permission description', 'Can edit categories')))
45
46 def __str__(self):
47 return self.name
48
49 def get_absolute_url(self, ancestors=None):
50 return reverse('product:category',
51 kwargs={'path': self.get_full_path(ancestors),
52 'category_id': self.id})
53
54 def get_full_path(self, ancestors=None):
55 if not self.parent_id:
56 return self.slug
57 if not ancestors:
58 ancestors = self.get_ancestors()
59 nodes = [node for node in ancestors] + [self]
60 return '/'.join([node.slug for node in nodes])
61
62
63 class ProductType(models.Model):
64 name = models.CharField(max_length=128)
65 has_variants = models.BooleanField(default=True)
66 product_attributes = models.ManyToManyField(
67 'ProductAttribute', related_name='product_types', blank=True)
68 variant_attributes = models.ManyToManyField(
69 'ProductAttribute', related_name='product_variant_types', blank=True)
70 is_shipping_required = models.BooleanField(default=False)
71
72 class Meta:
73 app_label = 'product'
74
75 def __str__(self):
76 return self.name
77
78 def __repr__(self):
79 class_ = type(self)
80 return '<%s.%s(pk=%r, name=%r)>' % (
81 class_.__module__, class_.__name__, self.pk, self.name)
82
83
84 class ProductQuerySet(models.QuerySet):
85 def available_products(self):
86 today = datetime.date.today()
87 return self.filter(
88 Q(available_on__lte=today) | Q(available_on__isnull=True),
89 Q(is_published=True))
90
91
92 class Product(SeoModel):
93 product_type = models.ForeignKey(
94 ProductType, related_name='products', on_delete=models.CASCADE)
95 name = models.CharField(max_length=128)
96 description = models.TextField()
97 category = models.ForeignKey(
98 Category, related_name='products', on_delete=models.CASCADE)
99 price = MoneyField(
100 currency=settings.DEFAULT_CURRENCY, max_digits=12,
101 decimal_places=settings.DEFAULT_DECIMAL_PLACES)
102 available_on = models.DateField(blank=True, null=True)
103 is_published = models.BooleanField(default=True)
104 attributes = HStoreField(default={}, blank=True)
105 updated_at = models.DateTimeField(auto_now=True, null=True)
106 is_featured = models.BooleanField(default=False)
107
108 objects = ProductQuerySet.as_manager()
109
110 class Meta:
111 app_label = 'product'
112 permissions = (
113 ('view_product',
114 pgettext_lazy('Permission description', 'Can view products')),
115 ('edit_product',
116 pgettext_lazy('Permission description', 'Can edit products')),
117 ('view_properties',
118 pgettext_lazy(
119 'Permission description', 'Can view product properties')),
120 ('edit_properties',
121 pgettext_lazy(
122 'Permission description', 'Can edit product properties')))
123
124 def __iter__(self):
125 if not hasattr(self, '__variants'):
126 setattr(self, '__variants', self.variants.all())
127 return iter(getattr(self, '__variants'))
128
129 def __repr__(self):
130 class_ = type(self)
131 return '<%s.%s(pk=%r, name=%r)>' % (
132 class_.__module__, class_.__name__, self.pk, self.name)
133
134 def __str__(self):
135 return self.name
136
137 def get_absolute_url(self):
138 return reverse(
139 'product:details',
140 kwargs={'slug': self.get_slug(), 'product_id': self.id})
141
142 def get_slug(self):
143 return slugify(smart_text(unidecode(self.name)))
144
145 def is_in_stock(self):
146 return any(variant.is_in_stock() for variant in self)
147
148 def is_available(self):
149 today = datetime.date.today()
150 return self.available_on is None or self.available_on <= today
151
152 def get_first_image(self):
153 first_image = self.images.first()
154 return first_image.image if first_image else None
155
156 def get_price_per_item(self, item, discounts=None):
157 return item.get_price_per_item(discounts)
158
159 def get_price_range(self, discounts=None):
160 if self.variants.exists():
161 prices = [
162 self.get_price_per_item(variant, discounts=discounts)
163 for variant in self]
164 return TaxedMoneyRange(min(prices), max(prices))
165 price = TaxedMoney(net=self.price, gross=self.price)
166 discounted_price = calculate_discounted_price(
167 self, price, discounts)
168 return TaxedMoneyRange(start=discounted_price, stop=discounted_price)
169
170 def get_gross_price_range(self, discounts=None):
171 grosses = [
172 self.get_price_per_item(variant, discounts=discounts)
173 for variant in self]
174 if not grosses:
175 return None
176 grosses = sorted(grosses, key=lambda x: x.tax)
177 return TaxedMoneyRange(min(grosses), max(grosses))
178
179
180 class ProductVariant(models.Model):
181 sku = models.CharField(max_length=32, unique=True)
182 name = models.CharField(max_length=255, blank=True)
183 price_override = MoneyField(
184 currency=settings.DEFAULT_CURRENCY, max_digits=12,
185 decimal_places=settings.DEFAULT_DECIMAL_PLACES, blank=True, null=True)
186 product = models.ForeignKey(
187 Product, related_name='variants', on_delete=models.CASCADE)
188 attributes = HStoreField(default={}, blank=True)
189 images = models.ManyToManyField('ProductImage', through='VariantImage')
190 quantity = models.IntegerField(
191 validators=[MinValueValidator(0)], default=Decimal(1))
192 quantity_allocated = models.IntegerField(
193 validators=[MinValueValidator(0)], default=Decimal(0))
194 cost_price = MoneyField(
195 currency=settings.DEFAULT_CURRENCY, max_digits=12,
196 decimal_places=settings.DEFAULT_DECIMAL_PLACES, blank=True, null=True)
197
198 class Meta:
199 app_label = 'product'
200
201 def __str__(self):
202 return self.name
203
204 @property
205 def quantity_available(self):
206 return max(self.quantity - self.quantity_allocated, 0)
207
208 def get_total(self):
209 if self.cost_price:
210 return TaxedMoney(net=self.cost_price, gross=self.cost_price)
211
212 def check_quantity(self, quantity):
213 if quantity > self.quantity_available:
214 raise InsufficientStock(self)
215
216 def get_price_per_item(self, discounts=None):
217 price = self.price_override or self.product.price
218 price = TaxedMoney(net=price, gross=price)
219 price = calculate_discounted_price(self.product, price, discounts)
220 return price
221
222 def get_absolute_url(self):
223 slug = self.product.get_slug()
224 product_id = self.product.id
225 return reverse('product:details',
226 kwargs={'slug': slug, 'product_id': product_id})
227
228 def as_data(self):
229 return {
230 'product_name': str(self),
231 'product_id': self.product.pk,
232 'variant_id': self.pk,
233 'unit_price': str(self.get_price_per_item().gross)}
234
235 def is_shipping_required(self):
236 return self.product.product_type.is_shipping_required
237
238 def is_in_stock(self):
239 return self.quantity_available > 0
240
241 def display_product(self):
242 variant_display = str(self)
243 product_display = (
244 '%s (%s)' % (self.product, variant_display)
245 if variant_display else str(self.product))
246 return smart_text(product_display)
247
248 def get_first_image(self):
249 return self.product.get_first_image()
250
251
252 class ProductAttribute(models.Model):
253 slug = models.SlugField(max_length=50, unique=True)
254 name = models.CharField(max_length=100)
255
256 class Meta:
257 ordering = ('slug', )
258
259 def __str__(self):
260 return self.name
261
262 def get_formfield_name(self):
263 return slugify('attribute-%s' % self.slug, allow_unicode=True)
264
265 def has_values(self):
266 return self.values.exists()
267
268
269 class AttributeChoiceValue(models.Model):
270 name = models.CharField(max_length=100)
271 slug = models.SlugField(max_length=100)
272 color = models.CharField(
273 max_length=7, blank=True,
274 validators=[RegexValidator('^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$')])
275 attribute = models.ForeignKey(
276 ProductAttribute, related_name='values', on_delete=models.CASCADE)
277
278 class Meta:
279 unique_together = ('name', 'attribute')
280
281 def __str__(self):
282 return self.name
283
284
285 class ProductImage(models.Model):
286 product = models.ForeignKey(
287 Product, related_name='images', on_delete=models.CASCADE)
288 image = VersatileImageField(
289 upload_to='products', ppoi_field='ppoi', blank=False)
290 ppoi = PPOIField()
291 alt = models.CharField(max_length=128, blank=True)
292 order = models.PositiveIntegerField(editable=False)
293
294 class Meta:
295 ordering = ('order', )
296 app_label = 'product'
297
298 def get_ordering_queryset(self):
299 return self.product.images.all()
300
301 def save(self, *args, **kwargs):
302 if self.order is None:
303 qs = self.get_ordering_queryset()
304 existing_max = qs.aggregate(Max('order'))
305 existing_max = existing_max.get('order__max')
306 self.order = 0 if existing_max is None else existing_max + 1
307 super().save(*args, **kwargs)
308
309 def delete(self, *args, **kwargs):
310 qs = self.get_ordering_queryset()
311 qs.filter(order__gt=self.order).update(order=F('order') - 1)
312 super().delete(*args, **kwargs)
313
314
315 class VariantImage(models.Model):
316 variant = models.ForeignKey(
317 'ProductVariant', related_name='variant_images',
318 on_delete=models.CASCADE)
319 image = models.ForeignKey(
320 ProductImage, related_name='variant_images', on_delete=models.CASCADE)
321
322
323 class Collection(SeoModel):
324 name = models.CharField(max_length=128, unique=True)
325 slug = models.SlugField(max_length=128)
326 products = models.ManyToManyField(
327 Product, blank=True, related_name='collections')
328 background_image = VersatileImageField(
329 upload_to='collection-backgrounds', blank=True, null=True)
330
331 class Meta:
332 ordering = ['pk']
333
334 def __str__(self):
335 return self.name
336
337 def get_absolute_url(self):
338 return reverse(
339 'product:collection',
340 kwargs={'pk': self.id, 'slug': self.slug})
341
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/product/models.py b/saleor/product/models.py
--- a/saleor/product/models.py
+++ b/saleor/product/models.py
@@ -199,7 +199,7 @@
app_label = 'product'
def __str__(self):
- return self.name
+ return self.name or self.sku
@property
def quantity_available(self):
| {"golden_diff": "diff --git a/saleor/product/models.py b/saleor/product/models.py\n--- a/saleor/product/models.py\n+++ b/saleor/product/models.py\n@@ -199,7 +199,7 @@\n app_label = 'product'\n \n def __str__(self):\n- return self.name\n+ return self.name or self.sku\n \n @property\n def quantity_available(self):\n", "issue": "Product variant without name or attrs displays as blank space in dashboard's breadcrumbs\nSeems like `__str__` on our `ProductVariant` may produce empty string if ProductVariant has no name or attributes set. Our titles seem to fallback to SKU if this is the case, but not our breadcrumbs:\r\n\r\n<img width=\"362\" alt=\"zrzut ekranu 2018-03-01 o 13 58 23\" src=\"https://user-images.githubusercontent.com/750553/36845921-d21bf616-1d58-11e8-851c-763c32356818.png\">\r\n\n", "before_files": [{"content": "import datetime\nfrom decimal import Decimal\n\nfrom django.conf import settings\nfrom django.contrib.postgres.fields import HStoreField\nfrom django.core.validators import MinValueValidator, RegexValidator\nfrom django.db import models\nfrom django.db.models import F, Max, Q\nfrom django.urls import reverse\nfrom django.utils.encoding import smart_text\nfrom django.utils.text import slugify\nfrom django.utils.translation import pgettext_lazy\nfrom django_prices.models import MoneyField\nfrom mptt.managers import TreeManager\nfrom mptt.models import MPTTModel\nfrom prices import Money, TaxedMoney, TaxedMoneyRange\nfrom text_unidecode import unidecode\nfrom versatileimagefield.fields import PPOIField, VersatileImageField\n\nfrom ..core.exceptions import InsufficientStock\nfrom ..discount.utils import calculate_discounted_price\nfrom ..seo.models import SeoModel\n\n\nclass Category(MPTTModel, SeoModel):\n name = models.CharField(max_length=128)\n slug = models.SlugField(max_length=128)\n description = models.TextField(blank=True)\n parent = models.ForeignKey(\n 'self', null=True, blank=True, related_name='children',\n on_delete=models.CASCADE)\n background_image = VersatileImageField(\n upload_to='category-backgrounds', blank=True, null=True)\n\n objects = models.Manager()\n tree = TreeManager()\n\n class Meta:\n app_label = 'product'\n permissions = (\n ('view_category',\n pgettext_lazy('Permission description', 'Can view categories')),\n ('edit_category',\n pgettext_lazy('Permission description', 'Can edit categories')))\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self, ancestors=None):\n return reverse('product:category',\n kwargs={'path': self.get_full_path(ancestors),\n 'category_id': self.id})\n\n def get_full_path(self, ancestors=None):\n if not self.parent_id:\n return self.slug\n if not ancestors:\n ancestors = self.get_ancestors()\n nodes = [node for node in ancestors] + [self]\n return '/'.join([node.slug for node in nodes])\n\n\nclass ProductType(models.Model):\n name = models.CharField(max_length=128)\n has_variants = models.BooleanField(default=True)\n product_attributes = models.ManyToManyField(\n 'ProductAttribute', related_name='product_types', blank=True)\n variant_attributes = models.ManyToManyField(\n 'ProductAttribute', related_name='product_variant_types', blank=True)\n is_shipping_required = models.BooleanField(default=False)\n\n class Meta:\n app_label = 'product'\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n class_ = type(self)\n return '<%s.%s(pk=%r, name=%r)>' % (\n class_.__module__, class_.__name__, self.pk, self.name)\n\n\nclass ProductQuerySet(models.QuerySet):\n def available_products(self):\n today = datetime.date.today()\n return self.filter(\n Q(available_on__lte=today) | Q(available_on__isnull=True),\n Q(is_published=True))\n\n\nclass Product(SeoModel):\n product_type = models.ForeignKey(\n ProductType, related_name='products', on_delete=models.CASCADE)\n name = models.CharField(max_length=128)\n description = models.TextField()\n category = models.ForeignKey(\n Category, related_name='products', on_delete=models.CASCADE)\n price = MoneyField(\n currency=settings.DEFAULT_CURRENCY, max_digits=12,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES)\n available_on = models.DateField(blank=True, null=True)\n is_published = models.BooleanField(default=True)\n attributes = HStoreField(default={}, blank=True)\n updated_at = models.DateTimeField(auto_now=True, null=True)\n is_featured = models.BooleanField(default=False)\n\n objects = ProductQuerySet.as_manager()\n\n class Meta:\n app_label = 'product'\n permissions = (\n ('view_product',\n pgettext_lazy('Permission description', 'Can view products')),\n ('edit_product',\n pgettext_lazy('Permission description', 'Can edit products')),\n ('view_properties',\n pgettext_lazy(\n 'Permission description', 'Can view product properties')),\n ('edit_properties',\n pgettext_lazy(\n 'Permission description', 'Can edit product properties')))\n\n def __iter__(self):\n if not hasattr(self, '__variants'):\n setattr(self, '__variants', self.variants.all())\n return iter(getattr(self, '__variants'))\n\n def __repr__(self):\n class_ = type(self)\n return '<%s.%s(pk=%r, name=%r)>' % (\n class_.__module__, class_.__name__, self.pk, self.name)\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\n 'product:details',\n kwargs={'slug': self.get_slug(), 'product_id': self.id})\n\n def get_slug(self):\n return slugify(smart_text(unidecode(self.name)))\n\n def is_in_stock(self):\n return any(variant.is_in_stock() for variant in self)\n\n def is_available(self):\n today = datetime.date.today()\n return self.available_on is None or self.available_on <= today\n\n def get_first_image(self):\n first_image = self.images.first()\n return first_image.image if first_image else None\n\n def get_price_per_item(self, item, discounts=None):\n return item.get_price_per_item(discounts)\n\n def get_price_range(self, discounts=None):\n if self.variants.exists():\n prices = [\n self.get_price_per_item(variant, discounts=discounts)\n for variant in self]\n return TaxedMoneyRange(min(prices), max(prices))\n price = TaxedMoney(net=self.price, gross=self.price)\n discounted_price = calculate_discounted_price(\n self, price, discounts)\n return TaxedMoneyRange(start=discounted_price, stop=discounted_price)\n\n def get_gross_price_range(self, discounts=None):\n grosses = [\n self.get_price_per_item(variant, discounts=discounts)\n for variant in self]\n if not grosses:\n return None\n grosses = sorted(grosses, key=lambda x: x.tax)\n return TaxedMoneyRange(min(grosses), max(grosses))\n\n\nclass ProductVariant(models.Model):\n sku = models.CharField(max_length=32, unique=True)\n name = models.CharField(max_length=255, blank=True)\n price_override = MoneyField(\n currency=settings.DEFAULT_CURRENCY, max_digits=12,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES, blank=True, null=True)\n product = models.ForeignKey(\n Product, related_name='variants', on_delete=models.CASCADE)\n attributes = HStoreField(default={}, blank=True)\n images = models.ManyToManyField('ProductImage', through='VariantImage')\n quantity = models.IntegerField(\n validators=[MinValueValidator(0)], default=Decimal(1))\n quantity_allocated = models.IntegerField(\n validators=[MinValueValidator(0)], default=Decimal(0))\n cost_price = MoneyField(\n currency=settings.DEFAULT_CURRENCY, max_digits=12,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES, blank=True, null=True)\n\n class Meta:\n app_label = 'product'\n\n def __str__(self):\n return self.name\n\n @property\n def quantity_available(self):\n return max(self.quantity - self.quantity_allocated, 0)\n\n def get_total(self):\n if self.cost_price:\n return TaxedMoney(net=self.cost_price, gross=self.cost_price)\n\n def check_quantity(self, quantity):\n if quantity > self.quantity_available:\n raise InsufficientStock(self)\n\n def get_price_per_item(self, discounts=None):\n price = self.price_override or self.product.price\n price = TaxedMoney(net=price, gross=price)\n price = calculate_discounted_price(self.product, price, discounts)\n return price\n\n def get_absolute_url(self):\n slug = self.product.get_slug()\n product_id = self.product.id\n return reverse('product:details',\n kwargs={'slug': slug, 'product_id': product_id})\n\n def as_data(self):\n return {\n 'product_name': str(self),\n 'product_id': self.product.pk,\n 'variant_id': self.pk,\n 'unit_price': str(self.get_price_per_item().gross)}\n\n def is_shipping_required(self):\n return self.product.product_type.is_shipping_required\n\n def is_in_stock(self):\n return self.quantity_available > 0\n\n def display_product(self):\n variant_display = str(self)\n product_display = (\n '%s (%s)' % (self.product, variant_display)\n if variant_display else str(self.product))\n return smart_text(product_display)\n\n def get_first_image(self):\n return self.product.get_first_image()\n\n\nclass ProductAttribute(models.Model):\n slug = models.SlugField(max_length=50, unique=True)\n name = models.CharField(max_length=100)\n\n class Meta:\n ordering = ('slug', )\n\n def __str__(self):\n return self.name\n\n def get_formfield_name(self):\n return slugify('attribute-%s' % self.slug, allow_unicode=True)\n\n def has_values(self):\n return self.values.exists()\n\n\nclass AttributeChoiceValue(models.Model):\n name = models.CharField(max_length=100)\n slug = models.SlugField(max_length=100)\n color = models.CharField(\n max_length=7, blank=True,\n validators=[RegexValidator('^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$')])\n attribute = models.ForeignKey(\n ProductAttribute, related_name='values', on_delete=models.CASCADE)\n\n class Meta:\n unique_together = ('name', 'attribute')\n\n def __str__(self):\n return self.name\n\n\nclass ProductImage(models.Model):\n product = models.ForeignKey(\n Product, related_name='images', on_delete=models.CASCADE)\n image = VersatileImageField(\n upload_to='products', ppoi_field='ppoi', blank=False)\n ppoi = PPOIField()\n alt = models.CharField(max_length=128, blank=True)\n order = models.PositiveIntegerField(editable=False)\n\n class Meta:\n ordering = ('order', )\n app_label = 'product'\n\n def get_ordering_queryset(self):\n return self.product.images.all()\n\n def save(self, *args, **kwargs):\n if self.order is None:\n qs = self.get_ordering_queryset()\n existing_max = qs.aggregate(Max('order'))\n existing_max = existing_max.get('order__max')\n self.order = 0 if existing_max is None else existing_max + 1\n super().save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n qs = self.get_ordering_queryset()\n qs.filter(order__gt=self.order).update(order=F('order') - 1)\n super().delete(*args, **kwargs)\n\n\nclass VariantImage(models.Model):\n variant = models.ForeignKey(\n 'ProductVariant', related_name='variant_images',\n on_delete=models.CASCADE)\n image = models.ForeignKey(\n ProductImage, related_name='variant_images', on_delete=models.CASCADE)\n\n\nclass Collection(SeoModel):\n name = models.CharField(max_length=128, unique=True)\n slug = models.SlugField(max_length=128)\n products = models.ManyToManyField(\n Product, blank=True, related_name='collections')\n background_image = VersatileImageField(\n upload_to='collection-backgrounds', blank=True, null=True)\n\n class Meta:\n ordering = ['pk']\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\n 'product:collection',\n kwargs={'pk': self.id, 'slug': self.slug})\n", "path": "saleor/product/models.py"}], "after_files": [{"content": "import datetime\nfrom decimal import Decimal\n\nfrom django.conf import settings\nfrom django.contrib.postgres.fields import HStoreField\nfrom django.core.validators import MinValueValidator, RegexValidator\nfrom django.db import models\nfrom django.db.models import F, Max, Q\nfrom django.urls import reverse\nfrom django.utils.encoding import smart_text\nfrom django.utils.text import slugify\nfrom django.utils.translation import pgettext_lazy\nfrom django_prices.models import MoneyField\nfrom mptt.managers import TreeManager\nfrom mptt.models import MPTTModel\nfrom prices import Money, TaxedMoney, TaxedMoneyRange\nfrom text_unidecode import unidecode\nfrom versatileimagefield.fields import PPOIField, VersatileImageField\n\nfrom ..core.exceptions import InsufficientStock\nfrom ..discount.utils import calculate_discounted_price\nfrom ..seo.models import SeoModel\n\n\nclass Category(MPTTModel, SeoModel):\n name = models.CharField(max_length=128)\n slug = models.SlugField(max_length=128)\n description = models.TextField(blank=True)\n parent = models.ForeignKey(\n 'self', null=True, blank=True, related_name='children',\n on_delete=models.CASCADE)\n background_image = VersatileImageField(\n upload_to='category-backgrounds', blank=True, null=True)\n\n objects = models.Manager()\n tree = TreeManager()\n\n class Meta:\n app_label = 'product'\n permissions = (\n ('view_category',\n pgettext_lazy('Permission description', 'Can view categories')),\n ('edit_category',\n pgettext_lazy('Permission description', 'Can edit categories')))\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self, ancestors=None):\n return reverse('product:category',\n kwargs={'path': self.get_full_path(ancestors),\n 'category_id': self.id})\n\n def get_full_path(self, ancestors=None):\n if not self.parent_id:\n return self.slug\n if not ancestors:\n ancestors = self.get_ancestors()\n nodes = [node for node in ancestors] + [self]\n return '/'.join([node.slug for node in nodes])\n\n\nclass ProductType(models.Model):\n name = models.CharField(max_length=128)\n has_variants = models.BooleanField(default=True)\n product_attributes = models.ManyToManyField(\n 'ProductAttribute', related_name='product_types', blank=True)\n variant_attributes = models.ManyToManyField(\n 'ProductAttribute', related_name='product_variant_types', blank=True)\n is_shipping_required = models.BooleanField(default=False)\n\n class Meta:\n app_label = 'product'\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n class_ = type(self)\n return '<%s.%s(pk=%r, name=%r)>' % (\n class_.__module__, class_.__name__, self.pk, self.name)\n\n\nclass ProductQuerySet(models.QuerySet):\n def available_products(self):\n today = datetime.date.today()\n return self.filter(\n Q(available_on__lte=today) | Q(available_on__isnull=True),\n Q(is_published=True))\n\n\nclass Product(SeoModel):\n product_type = models.ForeignKey(\n ProductType, related_name='products', on_delete=models.CASCADE)\n name = models.CharField(max_length=128)\n description = models.TextField()\n category = models.ForeignKey(\n Category, related_name='products', on_delete=models.CASCADE)\n price = MoneyField(\n currency=settings.DEFAULT_CURRENCY, max_digits=12,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES)\n available_on = models.DateField(blank=True, null=True)\n is_published = models.BooleanField(default=True)\n attributes = HStoreField(default={}, blank=True)\n updated_at = models.DateTimeField(auto_now=True, null=True)\n is_featured = models.BooleanField(default=False)\n\n objects = ProductQuerySet.as_manager()\n\n class Meta:\n app_label = 'product'\n permissions = (\n ('view_product',\n pgettext_lazy('Permission description', 'Can view products')),\n ('edit_product',\n pgettext_lazy('Permission description', 'Can edit products')),\n ('view_properties',\n pgettext_lazy(\n 'Permission description', 'Can view product properties')),\n ('edit_properties',\n pgettext_lazy(\n 'Permission description', 'Can edit product properties')))\n\n def __iter__(self):\n if not hasattr(self, '__variants'):\n setattr(self, '__variants', self.variants.all())\n return iter(getattr(self, '__variants'))\n\n def __repr__(self):\n class_ = type(self)\n return '<%s.%s(pk=%r, name=%r)>' % (\n class_.__module__, class_.__name__, self.pk, self.name)\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\n 'product:details',\n kwargs={'slug': self.get_slug(), 'product_id': self.id})\n\n def get_slug(self):\n return slugify(smart_text(unidecode(self.name)))\n\n def is_in_stock(self):\n return any(variant.is_in_stock() for variant in self)\n\n def is_available(self):\n today = datetime.date.today()\n return self.available_on is None or self.available_on <= today\n\n def get_first_image(self):\n first_image = self.images.first()\n return first_image.image if first_image else None\n\n def get_price_per_item(self, item, discounts=None):\n return item.get_price_per_item(discounts)\n\n def get_price_range(self, discounts=None):\n if self.variants.exists():\n prices = [\n self.get_price_per_item(variant, discounts=discounts)\n for variant in self]\n return TaxedMoneyRange(min(prices), max(prices))\n price = TaxedMoney(net=self.price, gross=self.price)\n discounted_price = calculate_discounted_price(\n self, price, discounts)\n return TaxedMoneyRange(start=discounted_price, stop=discounted_price)\n\n def get_gross_price_range(self, discounts=None):\n grosses = [\n self.get_price_per_item(variant, discounts=discounts)\n for variant in self]\n if not grosses:\n return None\n grosses = sorted(grosses, key=lambda x: x.tax)\n return TaxedMoneyRange(min(grosses), max(grosses))\n\n\nclass ProductVariant(models.Model):\n sku = models.CharField(max_length=32, unique=True)\n name = models.CharField(max_length=255, blank=True)\n price_override = MoneyField(\n currency=settings.DEFAULT_CURRENCY, max_digits=12,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES, blank=True, null=True)\n product = models.ForeignKey(\n Product, related_name='variants', on_delete=models.CASCADE)\n attributes = HStoreField(default={}, blank=True)\n images = models.ManyToManyField('ProductImage', through='VariantImage')\n quantity = models.IntegerField(\n validators=[MinValueValidator(0)], default=Decimal(1))\n quantity_allocated = models.IntegerField(\n validators=[MinValueValidator(0)], default=Decimal(0))\n cost_price = MoneyField(\n currency=settings.DEFAULT_CURRENCY, max_digits=12,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES, blank=True, null=True)\n\n class Meta:\n app_label = 'product'\n\n def __str__(self):\n return self.name or self.sku\n\n @property\n def quantity_available(self):\n return max(self.quantity - self.quantity_allocated, 0)\n\n def get_total(self):\n if self.cost_price:\n return TaxedMoney(net=self.cost_price, gross=self.cost_price)\n\n def check_quantity(self, quantity):\n if quantity > self.quantity_available:\n raise InsufficientStock(self)\n\n def get_price_per_item(self, discounts=None):\n price = self.price_override or self.product.price\n price = TaxedMoney(net=price, gross=price)\n price = calculate_discounted_price(self.product, price, discounts)\n return price\n\n def get_absolute_url(self):\n slug = self.product.get_slug()\n product_id = self.product.id\n return reverse('product:details',\n kwargs={'slug': slug, 'product_id': product_id})\n\n def as_data(self):\n return {\n 'product_name': str(self),\n 'product_id': self.product.pk,\n 'variant_id': self.pk,\n 'unit_price': str(self.get_price_per_item().gross)}\n\n def is_shipping_required(self):\n return self.product.product_type.is_shipping_required\n\n def is_in_stock(self):\n return self.quantity_available > 0\n\n def display_product(self):\n variant_display = str(self)\n product_display = (\n '%s (%s)' % (self.product, variant_display)\n if variant_display else str(self.product))\n return smart_text(product_display)\n\n def get_first_image(self):\n return self.product.get_first_image()\n\n\nclass ProductAttribute(models.Model):\n slug = models.SlugField(max_length=50, unique=True)\n name = models.CharField(max_length=100)\n\n class Meta:\n ordering = ('slug', )\n\n def __str__(self):\n return self.name\n\n def get_formfield_name(self):\n return slugify('attribute-%s' % self.slug, allow_unicode=True)\n\n def has_values(self):\n return self.values.exists()\n\n\nclass AttributeChoiceValue(models.Model):\n name = models.CharField(max_length=100)\n slug = models.SlugField(max_length=100)\n color = models.CharField(\n max_length=7, blank=True,\n validators=[RegexValidator('^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$')])\n attribute = models.ForeignKey(\n ProductAttribute, related_name='values', on_delete=models.CASCADE)\n\n class Meta:\n unique_together = ('name', 'attribute')\n\n def __str__(self):\n return self.name\n\n\nclass ProductImage(models.Model):\n product = models.ForeignKey(\n Product, related_name='images', on_delete=models.CASCADE)\n image = VersatileImageField(\n upload_to='products', ppoi_field='ppoi', blank=False)\n ppoi = PPOIField()\n alt = models.CharField(max_length=128, blank=True)\n order = models.PositiveIntegerField(editable=False)\n\n class Meta:\n ordering = ('order', )\n app_label = 'product'\n\n def get_ordering_queryset(self):\n return self.product.images.all()\n\n def save(self, *args, **kwargs):\n if self.order is None:\n qs = self.get_ordering_queryset()\n existing_max = qs.aggregate(Max('order'))\n existing_max = existing_max.get('order__max')\n self.order = 0 if existing_max is None else existing_max + 1\n super().save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n qs = self.get_ordering_queryset()\n qs.filter(order__gt=self.order).update(order=F('order') - 1)\n super().delete(*args, **kwargs)\n\n\nclass VariantImage(models.Model):\n variant = models.ForeignKey(\n 'ProductVariant', related_name='variant_images',\n on_delete=models.CASCADE)\n image = models.ForeignKey(\n ProductImage, related_name='variant_images', on_delete=models.CASCADE)\n\n\nclass Collection(SeoModel):\n name = models.CharField(max_length=128, unique=True)\n slug = models.SlugField(max_length=128)\n products = models.ManyToManyField(\n Product, blank=True, related_name='collections')\n background_image = VersatileImageField(\n upload_to='collection-backgrounds', blank=True, null=True)\n\n class Meta:\n ordering = ['pk']\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\n 'product:collection',\n kwargs={'pk': self.id, 'slug': self.slug})\n", "path": "saleor/product/models.py"}]} | 3,918 | 91 |
gh_patches_debug_35760 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1406 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[PS-1] RPC service `push_gradient` async-SGD implementation
Async apply gradients to non-embedding parameters. Updating embedding parameters will be supported in issue #1306 .
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/ps/parameters.py`
Content:
```
1 import tensorflow as tf
2
3 from elasticdl.python.common.tensor import tensor_pb_to_ndarray
4 from elasticdl.python.ps.embedding_table import create_embedding_table
5
6
7 class Parameters(object):
8 """
9 There are two kinds of parameters:
10
11 1. non-embedding parameters, or dense tensor parameters. We save it
12 in a hashmap `non-embedding_params`, the key is the parameter name,
13 the value is a tf.Variable` object.
14 2. embedding parameters, or row-sparse parameters. We save it in a
15 hashmap `embedding_params`, the key is the embedding layer name,
16 the value is an `EmbeddingTable` object.
17
18 """
19
20 def __init__(self):
21 self.version = 0
22 self.init_status = False
23 self.non_embedding_params = {}
24 self.embedding_params = {}
25
26 def get_embedding_param(self, name, indices):
27 if name not in self.embedding_params:
28 raise ValueError(
29 "Please initialize embedding param %s first!", name
30 )
31 return self.embedding_params[name].get(indices)
32
33 def set_embedding_param(self, name, indices, values):
34 if name not in self.embedding_params:
35 raise ValueError(
36 "Please initialize embedding param %s first!", name
37 )
38 self.embedding_params[name].set(indices, values)
39
40 def init_from_model_pb(self, model_pb):
41 if not self.init_status:
42 tensors_pb = model_pb.param
43 embeddings_pb = model_pb.embedding_table_info
44 self._init_non_embedding_params(tensors_pb)
45 self._init_embedding_params(embeddings_pb)
46 self.version = model_pb.version
47 self.init_status = True
48
49 def _init_non_embedding_params(self, tensors_pb):
50 for pb in tensors_pb:
51 name = pb.name
52 arr = tensor_pb_to_ndarray(pb)
53 var = tf.Variable(name=name, initial_value=arr, trainable=True)
54 self.non_embedding_params[name] = var
55
56 def _init_embedding_params(self, embeddings_pb):
57 for pb in embeddings_pb:
58 self.embedding_params[pb.name] = create_embedding_table(pb)
59
```
Path: `elasticdl/python/ps/servicer.py`
Content:
```
1 import threading
2
3 from google.protobuf import empty_pb2
4
5 from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
6 from elasticdl.python.common.dtypes import dtype_numpy_to_tensor
7 from elasticdl.python.common.tensor import Tensor, serialize_tensor
8
9
10 class PserverServicer(elasticdl_pb2_grpc.PserverServicer):
11 """PS service implementation"""
12
13 def __init__(
14 self,
15 parameters,
16 grads_to_wait,
17 optimizer,
18 lr_staleness_modulation=False,
19 use_async=False,
20 ):
21 self._parameters = parameters
22 self._grads_to_wait = grads_to_wait
23 self._optimizer = optimizer
24 self._lr_staleness_modulation = lr_staleness_modulation
25 self._use_async = use_async
26 self._version = 0
27 self._lock = threading.Lock()
28
29 def pull_variable(self, request, _):
30 """
31 Response with all non-embedding parameters if initialized.
32 """
33 res = elasticdl_pb2.PullVariableResponse()
34 if not self._parameters.init_status:
35 res.model_init_status = False
36 return res
37
38 # Only sync-SGD needs lock
39 # TODO: use a read-write lock to support multiple concurrent reads
40 if not self._use_async:
41 self._lock.acquire()
42 res.model.version = self._parameters.version
43 for name, var in self._parameters.non_embedding_params.items():
44 tensor = res.model.param.add()
45 tensor.name = name
46 tensor.dim.extend(var.shape.as_list())
47 var_values = var.numpy()
48 tensor.content = var_values.tobytes()
49 tensor.dtype = dtype_numpy_to_tensor(var_values.dtype)
50 if not self._use_async:
51 self._lock.release()
52 res.model_init_status = True
53 return res
54
55 def pull_embedding_vector(self, request, _):
56 ret = elasticdl_pb2.Tensor()
57 if not request.ids:
58 return ret
59 embedding_vectors = self._parameters.get_embedding_param(
60 request.name, request.ids
61 )
62 tensor = Tensor(values=embedding_vectors)
63 serialize_tensor(tensor, ret)
64 return ret
65
66 def push_model(self, request, _):
67 with self._lock:
68 self._parameters.init_from_model_pb(request)
69 return empty_pb2.Empty()
70
71 def push_gradient(self, request, _):
72 # TODO: implement this RPC service
73 return elasticdl_pb2.PushGradientResponse()
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/ps/parameters.py b/elasticdl/python/ps/parameters.py
--- a/elasticdl/python/ps/parameters.py
+++ b/elasticdl/python/ps/parameters.py
@@ -23,6 +23,9 @@
self.non_embedding_params = {}
self.embedding_params = {}
+ def get_non_embedding_param(self, name, default_value=None):
+ return self.non_embedding_params.get(name, default_value)
+
def get_embedding_param(self, name, indices):
if name not in self.embedding_params:
raise ValueError(
diff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py
--- a/elasticdl/python/ps/servicer.py
+++ b/elasticdl/python/ps/servicer.py
@@ -4,6 +4,7 @@
from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
from elasticdl.python.common.dtypes import dtype_numpy_to_tensor
+from elasticdl.python.common.log_utils import default_logger as logger
from elasticdl.python.common.tensor import Tensor, serialize_tensor
@@ -23,7 +24,7 @@
self._optimizer = optimizer
self._lr_staleness_modulation = lr_staleness_modulation
self._use_async = use_async
- self._version = 0
+ self._version_lock = threading.Lock()
self._lock = threading.Lock()
def pull_variable(self, request, _):
@@ -69,5 +70,29 @@
return empty_pb2.Empty()
def push_gradient(self, request, _):
- # TODO: implement this RPC service
+ if self._use_async:
+ grad_vars = []
+ for pb in request.gradients:
+ tensor = Tensor.from_tensor_pb(pb)
+ var = self._parameters.get_non_embedding_param(tensor.name)
+ if var is None:
+ logger.warning(
+ "Gradients with invalid name %s" % tensor.name
+ )
+ continue
+ grad = tensor.to_tf_tensor()
+ grad_vars.append((grad, var))
+
+ self._optimizer.apply_gradients(grad_vars)
+ with self._version_lock:
+ self._parameters.version += 1
+
+ res = elasticdl_pb2.PushGradientResponse()
+ res.accepted = True
+ res.model_version = self._parameters.version
+ return res
+
+ raise NotImplementedError(
+ "Updating parameters synchronously is not implemented."
+ )
return elasticdl_pb2.PushGradientResponse()
| {"golden_diff": "diff --git a/elasticdl/python/ps/parameters.py b/elasticdl/python/ps/parameters.py\n--- a/elasticdl/python/ps/parameters.py\n+++ b/elasticdl/python/ps/parameters.py\n@@ -23,6 +23,9 @@\n self.non_embedding_params = {}\n self.embedding_params = {}\n \n+ def get_non_embedding_param(self, name, default_value=None):\n+ return self.non_embedding_params.get(name, default_value)\n+\n def get_embedding_param(self, name, indices):\n if name not in self.embedding_params:\n raise ValueError(\ndiff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py\n--- a/elasticdl/python/ps/servicer.py\n+++ b/elasticdl/python/ps/servicer.py\n@@ -4,6 +4,7 @@\n \n from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n from elasticdl.python.common.dtypes import dtype_numpy_to_tensor\n+from elasticdl.python.common.log_utils import default_logger as logger\n from elasticdl.python.common.tensor import Tensor, serialize_tensor\n \n \n@@ -23,7 +24,7 @@\n self._optimizer = optimizer\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n- self._version = 0\n+ self._version_lock = threading.Lock()\n self._lock = threading.Lock()\n \n def pull_variable(self, request, _):\n@@ -69,5 +70,29 @@\n return empty_pb2.Empty()\n \n def push_gradient(self, request, _):\n- # TODO: implement this RPC service\n+ if self._use_async:\n+ grad_vars = []\n+ for pb in request.gradients:\n+ tensor = Tensor.from_tensor_pb(pb)\n+ var = self._parameters.get_non_embedding_param(tensor.name)\n+ if var is None:\n+ logger.warning(\n+ \"Gradients with invalid name %s\" % tensor.name\n+ )\n+ continue\n+ grad = tensor.to_tf_tensor()\n+ grad_vars.append((grad, var))\n+\n+ self._optimizer.apply_gradients(grad_vars)\n+ with self._version_lock:\n+ self._parameters.version += 1\n+\n+ res = elasticdl_pb2.PushGradientResponse()\n+ res.accepted = True\n+ res.model_version = self._parameters.version\n+ return res\n+\n+ raise NotImplementedError(\n+ \"Updating parameters synchronously is not implemented.\"\n+ )\n return elasticdl_pb2.PushGradientResponse()\n", "issue": "[PS-1] RPC service `push_gradient` async-SGD implementation\nAsync apply gradients to non-embedding parameters. Updating embedding parameters will be supported in issue #1306 .\n", "before_files": [{"content": "import tensorflow as tf\n\nfrom elasticdl.python.common.tensor import tensor_pb_to_ndarray\nfrom elasticdl.python.ps.embedding_table import create_embedding_table\n\n\nclass Parameters(object):\n \"\"\"\n There are two kinds of parameters:\n\n 1. non-embedding parameters, or dense tensor parameters. We save it\n in a hashmap `non-embedding_params`, the key is the parameter name,\n the value is a tf.Variable` object.\n 2. embedding parameters, or row-sparse parameters. We save it in a\n hashmap `embedding_params`, the key is the embedding layer name,\n the value is an `EmbeddingTable` object.\n\n \"\"\"\n\n def __init__(self):\n self.version = 0\n self.init_status = False\n self.non_embedding_params = {}\n self.embedding_params = {}\n\n def get_embedding_param(self, name, indices):\n if name not in self.embedding_params:\n raise ValueError(\n \"Please initialize embedding param %s first!\", name\n )\n return self.embedding_params[name].get(indices)\n\n def set_embedding_param(self, name, indices, values):\n if name not in self.embedding_params:\n raise ValueError(\n \"Please initialize embedding param %s first!\", name\n )\n self.embedding_params[name].set(indices, values)\n\n def init_from_model_pb(self, model_pb):\n if not self.init_status:\n tensors_pb = model_pb.param\n embeddings_pb = model_pb.embedding_table_info\n self._init_non_embedding_params(tensors_pb)\n self._init_embedding_params(embeddings_pb)\n self.version = model_pb.version\n self.init_status = True\n\n def _init_non_embedding_params(self, tensors_pb):\n for pb in tensors_pb:\n name = pb.name\n arr = tensor_pb_to_ndarray(pb)\n var = tf.Variable(name=name, initial_value=arr, trainable=True)\n self.non_embedding_params[name] = var\n\n def _init_embedding_params(self, embeddings_pb):\n for pb in embeddings_pb:\n self.embedding_params[pb.name] = create_embedding_table(pb)\n", "path": "elasticdl/python/ps/parameters.py"}, {"content": "import threading\n\nfrom google.protobuf import empty_pb2\n\nfrom elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\nfrom elasticdl.python.common.dtypes import dtype_numpy_to_tensor\nfrom elasticdl.python.common.tensor import Tensor, serialize_tensor\n\n\nclass PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n \"\"\"PS service implementation\"\"\"\n\n def __init__(\n self,\n parameters,\n grads_to_wait,\n optimizer,\n lr_staleness_modulation=False,\n use_async=False,\n ):\n self._parameters = parameters\n self._grads_to_wait = grads_to_wait\n self._optimizer = optimizer\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n self._version = 0\n self._lock = threading.Lock()\n\n def pull_variable(self, request, _):\n \"\"\"\n Response with all non-embedding parameters if initialized.\n \"\"\"\n res = elasticdl_pb2.PullVariableResponse()\n if not self._parameters.init_status:\n res.model_init_status = False\n return res\n\n # Only sync-SGD needs lock\n # TODO: use a read-write lock to support multiple concurrent reads\n if not self._use_async:\n self._lock.acquire()\n res.model.version = self._parameters.version\n for name, var in self._parameters.non_embedding_params.items():\n tensor = res.model.param.add()\n tensor.name = name\n tensor.dim.extend(var.shape.as_list())\n var_values = var.numpy()\n tensor.content = var_values.tobytes()\n tensor.dtype = dtype_numpy_to_tensor(var_values.dtype)\n if not self._use_async:\n self._lock.release()\n res.model_init_status = True\n return res\n\n def pull_embedding_vector(self, request, _):\n ret = elasticdl_pb2.Tensor()\n if not request.ids:\n return ret\n embedding_vectors = self._parameters.get_embedding_param(\n request.name, request.ids\n )\n tensor = Tensor(values=embedding_vectors)\n serialize_tensor(tensor, ret)\n return ret\n\n def push_model(self, request, _):\n with self._lock:\n self._parameters.init_from_model_pb(request)\n return empty_pb2.Empty()\n\n def push_gradient(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.PushGradientResponse()\n", "path": "elasticdl/python/ps/servicer.py"}], "after_files": [{"content": "import tensorflow as tf\n\nfrom elasticdl.python.common.tensor import tensor_pb_to_ndarray\nfrom elasticdl.python.ps.embedding_table import create_embedding_table\n\n\nclass Parameters(object):\n \"\"\"\n There are two kinds of parameters:\n\n 1. non-embedding parameters, or dense tensor parameters. We save it\n in a hashmap `non-embedding_params`, the key is the parameter name,\n the value is a tf.Variable` object.\n 2. embedding parameters, or row-sparse parameters. We save it in a\n hashmap `embedding_params`, the key is the embedding layer name,\n the value is an `EmbeddingTable` object.\n\n \"\"\"\n\n def __init__(self):\n self.version = 0\n self.init_status = False\n self.non_embedding_params = {}\n self.embedding_params = {}\n\n def get_non_embedding_param(self, name, default_value=None):\n return self.non_embedding_params.get(name, default_value)\n\n def get_embedding_param(self, name, indices):\n if name not in self.embedding_params:\n raise ValueError(\n \"Please initialize embedding param %s first!\", name\n )\n return self.embedding_params[name].get(indices)\n\n def set_embedding_param(self, name, indices, values):\n if name not in self.embedding_params:\n raise ValueError(\n \"Please initialize embedding param %s first!\", name\n )\n self.embedding_params[name].set(indices, values)\n\n def init_from_model_pb(self, model_pb):\n if not self.init_status:\n tensors_pb = model_pb.param\n embeddings_pb = model_pb.embedding_table_info\n self._init_non_embedding_params(tensors_pb)\n self._init_embedding_params(embeddings_pb)\n self.version = model_pb.version\n self.init_status = True\n\n def _init_non_embedding_params(self, tensors_pb):\n for pb in tensors_pb:\n name = pb.name\n arr = tensor_pb_to_ndarray(pb)\n var = tf.Variable(name=name, initial_value=arr, trainable=True)\n self.non_embedding_params[name] = var\n\n def _init_embedding_params(self, embeddings_pb):\n for pb in embeddings_pb:\n self.embedding_params[pb.name] = create_embedding_table(pb)\n", "path": "elasticdl/python/ps/parameters.py"}, {"content": "import threading\n\nfrom google.protobuf import empty_pb2\n\nfrom elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\nfrom elasticdl.python.common.dtypes import dtype_numpy_to_tensor\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.common.tensor import Tensor, serialize_tensor\n\n\nclass PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n \"\"\"PS service implementation\"\"\"\n\n def __init__(\n self,\n parameters,\n grads_to_wait,\n optimizer,\n lr_staleness_modulation=False,\n use_async=False,\n ):\n self._parameters = parameters\n self._grads_to_wait = grads_to_wait\n self._optimizer = optimizer\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n self._version_lock = threading.Lock()\n self._lock = threading.Lock()\n\n def pull_variable(self, request, _):\n \"\"\"\n Response with all non-embedding parameters if initialized.\n \"\"\"\n res = elasticdl_pb2.PullVariableResponse()\n if not self._parameters.init_status:\n res.model_init_status = False\n return res\n\n # Only sync-SGD needs lock\n # TODO: use a read-write lock to support multiple concurrent reads\n if not self._use_async:\n self._lock.acquire()\n res.model.version = self._parameters.version\n for name, var in self._parameters.non_embedding_params.items():\n tensor = res.model.param.add()\n tensor.name = name\n tensor.dim.extend(var.shape.as_list())\n var_values = var.numpy()\n tensor.content = var_values.tobytes()\n tensor.dtype = dtype_numpy_to_tensor(var_values.dtype)\n if not self._use_async:\n self._lock.release()\n res.model_init_status = True\n return res\n\n def pull_embedding_vector(self, request, _):\n ret = elasticdl_pb2.Tensor()\n if not request.ids:\n return ret\n embedding_vectors = self._parameters.get_embedding_param(\n request.name, request.ids\n )\n tensor = Tensor(values=embedding_vectors)\n serialize_tensor(tensor, ret)\n return ret\n\n def push_model(self, request, _):\n with self._lock:\n self._parameters.init_from_model_pb(request)\n return empty_pb2.Empty()\n\n def push_gradient(self, request, _):\n if self._use_async:\n grad_vars = []\n for pb in request.gradients:\n tensor = Tensor.from_tensor_pb(pb)\n var = self._parameters.get_non_embedding_param(tensor.name)\n if var is None:\n logger.warning(\n \"Gradients with invalid name %s\" % tensor.name\n )\n continue\n grad = tensor.to_tf_tensor()\n grad_vars.append((grad, var))\n\n self._optimizer.apply_gradients(grad_vars)\n with self._version_lock:\n self._parameters.version += 1\n\n res = elasticdl_pb2.PushGradientResponse()\n res.accepted = True\n res.model_version = self._parameters.version\n return res\n\n raise NotImplementedError(\n \"Updating parameters synchronously is not implemented.\"\n )\n return elasticdl_pb2.PushGradientResponse()\n", "path": "elasticdl/python/ps/servicer.py"}]} | 1,532 | 567 |
gh_patches_debug_39766 | rasdani/github-patches | git_diff | SeldonIO__MLServer-381 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make the model's URI relative to the model-settings.json file
Currently, the model URI is expected to be relative to where the `mlserver` command is running from. However, this can be unintuitive for users, who may expect that path to be relative to where it's specified, i.e. the `model-settings.json` file.
It would be good to consider whether changing the reference for the path could make this more intuitive for users and what could its side-effects.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/repository.py`
Content:
```
1 import os
2 import glob
3
4 from typing import List
5
6 from .settings import ModelParameters, ModelSettings
7 from .errors import ModelNotFound
8 from .logging import logger
9
10 DEFAULT_MODEL_SETTINGS_FILENAME = "model-settings.json"
11
12
13 class ModelRepository:
14 """
15 Model repository, responsible of the discovery of models which can be
16 loaded onto the model registry.
17 """
18
19 def __init__(self, root: str = None):
20 self._root = root
21
22 async def list(self) -> List[ModelSettings]:
23 all_model_settings = []
24
25 # TODO: Use an async alternative for filesys ops
26 if self._root:
27 pattern = os.path.join(self._root, "**", DEFAULT_MODEL_SETTINGS_FILENAME)
28 matches = glob.glob(pattern, recursive=True)
29
30 for model_settings_path in matches:
31 model_settings = self._load_model_settings(model_settings_path)
32 all_model_settings.append(model_settings)
33
34 # If there were no matches, try to load model from environment
35 if not all_model_settings:
36 # return default
37 model_settings = ModelSettings()
38 model_settings.parameters = ModelParameters()
39 all_model_settings.append(model_settings)
40
41 return all_model_settings
42
43 def _load_model_settings(self, model_settings_path: str) -> ModelSettings:
44 model_settings = ModelSettings.parse_file(model_settings_path)
45
46 # If name not present, default to folder name
47 default_model_name = os.path.basename(os.path.dirname(model_settings_path))
48 if model_settings.name:
49 if model_settings.name != default_model_name:
50 # Raise warning if name is different than folder's name
51 logger.warning(
52 f"Model name '{model_settings.name}' is different than "
53 f"model's folder name '{default_model_name}'."
54 )
55 else:
56 model_settings.name = default_model_name
57
58 if not model_settings.parameters:
59 model_settings.parameters = ModelParameters()
60
61 if not model_settings.parameters.uri:
62 # If not specified, default to its own folder
63 default_model_uri = os.path.dirname(model_settings_path)
64 model_settings.parameters.uri = default_model_uri
65
66 return model_settings
67
68 async def find(self, name: str) -> ModelSettings:
69 all_settings = await self.list()
70 for model_settings in all_settings:
71 if model_settings.name == name:
72 # TODO: Implement version policy
73 return model_settings
74
75 raise ModelNotFound(name)
76
```
Path: `mlserver/utils.py`
Content:
```
1 import os
2 import uuid
3
4 from typing import Callable, List
5
6 from .settings import ModelSettings
7 from .errors import InvalidModelURI
8
9
10 async def get_model_uri(
11 settings: ModelSettings, wellknown_filenames: List[str] = []
12 ) -> str:
13 if not settings.parameters:
14 raise InvalidModelURI(settings.name)
15
16 model_uri = settings.parameters.uri
17
18 if not model_uri:
19 raise InvalidModelURI(settings.name)
20
21 if os.path.isfile(model_uri):
22 return model_uri
23
24 if os.path.isdir(model_uri):
25 # If model_uri is a folder, search for a well-known model filename
26 for fname in wellknown_filenames:
27 model_path = os.path.join(model_uri, fname)
28 if os.path.isfile(model_path):
29 return model_path
30
31 # If none, return the folder
32 return model_uri
33
34 # Otherwise, the uri is neither a file nor a folder
35 raise InvalidModelURI(settings.name, model_uri)
36
37
38 def get_wrapped_method(f: Callable) -> Callable:
39 while hasattr(f, "__wrapped__"):
40 f = f.__wrapped__ # type: ignore
41
42 return f
43
44
45 def generate_uuid() -> str:
46 return str(uuid.uuid4())
47
```
Path: `mlserver/settings.py`
Content:
```
1 from typing import List, Optional
2 from pydantic import BaseSettings, PyObject
3
4 from .version import __version__
5 from .types import MetadataTensor
6
7 ENV_PREFIX_SETTINGS = "MLSERVER_"
8 ENV_PREFIX_MODEL_SETTINGS = "MLSERVER_MODEL_"
9
10
11 class Settings(BaseSettings):
12 class Config:
13 env_prefix = ENV_PREFIX_SETTINGS
14
15 debug: bool = True
16
17 # Model repository settings
18 """Root of the model repository, where we will search for models."""
19 model_repository_root: str = "."
20 """Flag to load all available models automatically at startup."""
21 load_models_at_startup: bool = True
22
23 # Server metadata
24 """Name of the server."""
25 server_name: str = "mlserver"
26 """Version of the server."""
27 server_version: str = __version__
28 """Server extensions loaded."""
29 extensions: List[str] = []
30
31 # Server settings
32 """Host where to listen for connections."""
33 host: str = "0.0.0.0"
34 """Port where to listen for HTTP / REST connections."""
35 http_port: int = 8080
36 """Port where to listen for gRPC connections."""
37 grpc_port: int = 8081
38 """Maximum length (i.e. size) of gRPC payloads."""
39 grpc_max_message_length: Optional[int] = None
40
41
42 class ModelParameters(BaseSettings):
43 """
44 Parameters that apply only to a particular instance of a model.
45 This can include things like model weights, or arbitrary ``extra``
46 parameters particular to the underlying inference runtime.
47 The main difference with respect to ``ModelSettings`` is that parameters
48 can change on each instance (e.g. each version) of the model.
49 """
50
51 class Config:
52 env_prefix = ENV_PREFIX_MODEL_SETTINGS
53
54 """
55 URI where the model artifacts can be found.
56 This path must be either absolute or relative to where MLServer is running.
57 """
58 uri: Optional[str] = None
59 """Version of the model."""
60 version: Optional[str] = None
61 """Format of the model (only available on certain runtimes)."""
62 format: Optional[str] = None
63 """Default content type to use for requests and responses."""
64 content_type: Optional[str] = None
65 """Arbitrary settings, dependent on the inference runtime
66 implementation."""
67 extra: Optional[dict] = {}
68
69
70 class ModelSettings(BaseSettings):
71 class Config:
72 env_prefix = ENV_PREFIX_MODEL_SETTINGS
73
74 """Name of the model."""
75 name: str = ""
76
77 # Model metadata
78 """Framework used to train and serialise the model (e.g. sklearn)."""
79 platform: str = ""
80 """Versions of dependencies used to train the model (e.g.
81 sklearn/0.20.1)."""
82 versions: List[str] = []
83 """Metadata about the inputs accepted by the model."""
84 inputs: List[MetadataTensor] = []
85 """Metadata about the outputs returned by the model."""
86 outputs: List[MetadataTensor] = []
87
88 # Parallel settings
89 """When parallel inference is enabled, number of workers to run inference
90 across."""
91 parallel_workers: int = 4
92
93 # Adaptive Batching settings (disabled by default)
94 """When adaptive batching is enabled, maximum number of requests to group
95 together in a single batch."""
96 max_batch_size: int = 0
97 """When adaptive batching is enabled, maximum amount of time (in seconds)
98 to wait for enough requests to build a full batch."""
99 max_batch_time: float = 0.0
100
101 # Custom model class implementation
102 """*Python path* to the inference runtime to use to serve this model (e.g.
103 ``mlserver_sklearn.SKLearnModel``)."""
104 implementation: PyObject = "mlserver.model.MLModel" # type: ignore
105
106 # Model parameters are meant to be set directly by the MLServer runtime.
107 # However, it's also possible to override them manually.
108 """Extra parameters for each instance of this model."""
109 parameters: Optional[ModelParameters] = None
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/repository.py b/mlserver/repository.py
--- a/mlserver/repository.py
+++ b/mlserver/repository.py
@@ -42,9 +42,11 @@
def _load_model_settings(self, model_settings_path: str) -> ModelSettings:
model_settings = ModelSettings.parse_file(model_settings_path)
+ model_settings._source = model_settings_path
# If name not present, default to folder name
- default_model_name = os.path.basename(os.path.dirname(model_settings_path))
+ model_settings_folder = os.path.dirname(model_settings_path)
+ default_model_name = os.path.basename(model_settings_folder)
if model_settings.name:
if model_settings.name != default_model_name:
# Raise warning if name is different than folder's name
diff --git a/mlserver/settings.py b/mlserver/settings.py
--- a/mlserver/settings.py
+++ b/mlserver/settings.py
@@ -70,6 +70,10 @@
class ModelSettings(BaseSettings):
class Config:
env_prefix = ENV_PREFIX_MODEL_SETTINGS
+ underscore_attrs_are_private = True
+
+ # Source points to the file where model settings were loaded from
+ _source: Optional[str] = None
"""Name of the model."""
name: str = ""
diff --git a/mlserver/utils.py b/mlserver/utils.py
--- a/mlserver/utils.py
+++ b/mlserver/utils.py
@@ -1,7 +1,7 @@
import os
import uuid
-from typing import Callable, List
+from typing import Callable, Optional, List
from .settings import ModelSettings
from .errors import InvalidModelURI
@@ -14,25 +14,36 @@
raise InvalidModelURI(settings.name)
model_uri = settings.parameters.uri
-
if not model_uri:
raise InvalidModelURI(settings.name)
- if os.path.isfile(model_uri):
- return model_uri
+ full_model_uri = _to_absolute_path(settings._source, model_uri)
+ if os.path.isfile(full_model_uri):
+ return full_model_uri
- if os.path.isdir(model_uri):
- # If model_uri is a folder, search for a well-known model filename
+ if os.path.isdir(full_model_uri):
+ # If full_model_uri is a folder, search for a well-known model filename
for fname in wellknown_filenames:
- model_path = os.path.join(model_uri, fname)
+ model_path = os.path.join(full_model_uri, fname)
if os.path.isfile(model_path):
return model_path
# If none, return the folder
- return model_uri
+ return full_model_uri
# Otherwise, the uri is neither a file nor a folder
- raise InvalidModelURI(settings.name, model_uri)
+ raise InvalidModelURI(settings.name, full_model_uri)
+
+
+def _to_absolute_path(source: Optional[str], model_uri: str) -> str:
+ if source is None:
+ # Treat path as either absolute or relative to the working directory of
+ # the MLServer instance
+ return model_uri
+
+ parent_folder = os.path.dirname(source)
+ unnormalised = os.path.join(parent_folder, model_uri)
+ return os.path.normpath(unnormalised)
def get_wrapped_method(f: Callable) -> Callable:
| {"golden_diff": "diff --git a/mlserver/repository.py b/mlserver/repository.py\n--- a/mlserver/repository.py\n+++ b/mlserver/repository.py\n@@ -42,9 +42,11 @@\n \n def _load_model_settings(self, model_settings_path: str) -> ModelSettings:\n model_settings = ModelSettings.parse_file(model_settings_path)\n+ model_settings._source = model_settings_path\n \n # If name not present, default to folder name\n- default_model_name = os.path.basename(os.path.dirname(model_settings_path))\n+ model_settings_folder = os.path.dirname(model_settings_path)\n+ default_model_name = os.path.basename(model_settings_folder)\n if model_settings.name:\n if model_settings.name != default_model_name:\n # Raise warning if name is different than folder's name\ndiff --git a/mlserver/settings.py b/mlserver/settings.py\n--- a/mlserver/settings.py\n+++ b/mlserver/settings.py\n@@ -70,6 +70,10 @@\n class ModelSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n+ underscore_attrs_are_private = True\n+\n+ # Source points to the file where model settings were loaded from\n+ _source: Optional[str] = None\n \n \"\"\"Name of the model.\"\"\"\n name: str = \"\"\ndiff --git a/mlserver/utils.py b/mlserver/utils.py\n--- a/mlserver/utils.py\n+++ b/mlserver/utils.py\n@@ -1,7 +1,7 @@\n import os\n import uuid\n \n-from typing import Callable, List\n+from typing import Callable, Optional, List\n \n from .settings import ModelSettings\n from .errors import InvalidModelURI\n@@ -14,25 +14,36 @@\n raise InvalidModelURI(settings.name)\n \n model_uri = settings.parameters.uri\n-\n if not model_uri:\n raise InvalidModelURI(settings.name)\n \n- if os.path.isfile(model_uri):\n- return model_uri\n+ full_model_uri = _to_absolute_path(settings._source, model_uri)\n+ if os.path.isfile(full_model_uri):\n+ return full_model_uri\n \n- if os.path.isdir(model_uri):\n- # If model_uri is a folder, search for a well-known model filename\n+ if os.path.isdir(full_model_uri):\n+ # If full_model_uri is a folder, search for a well-known model filename\n for fname in wellknown_filenames:\n- model_path = os.path.join(model_uri, fname)\n+ model_path = os.path.join(full_model_uri, fname)\n if os.path.isfile(model_path):\n return model_path\n \n # If none, return the folder\n- return model_uri\n+ return full_model_uri\n \n # Otherwise, the uri is neither a file nor a folder\n- raise InvalidModelURI(settings.name, model_uri)\n+ raise InvalidModelURI(settings.name, full_model_uri)\n+\n+\n+def _to_absolute_path(source: Optional[str], model_uri: str) -> str:\n+ if source is None:\n+ # Treat path as either absolute or relative to the working directory of\n+ # the MLServer instance\n+ return model_uri\n+\n+ parent_folder = os.path.dirname(source)\n+ unnormalised = os.path.join(parent_folder, model_uri)\n+ return os.path.normpath(unnormalised)\n \n \n def get_wrapped_method(f: Callable) -> Callable:\n", "issue": "Make the model's URI relative to the model-settings.json file\nCurrently, the model URI is expected to be relative to where the `mlserver` command is running from. However, this can be unintuitive for users, who may expect that path to be relative to where it's specified, i.e. the `model-settings.json` file.\n\nIt would be good to consider whether changing the reference for the path could make this more intuitive for users and what could its side-effects.\n", "before_files": [{"content": "import os\nimport glob\n\nfrom typing import List\n\nfrom .settings import ModelParameters, ModelSettings\nfrom .errors import ModelNotFound\nfrom .logging import logger\n\nDEFAULT_MODEL_SETTINGS_FILENAME = \"model-settings.json\"\n\n\nclass ModelRepository:\n \"\"\"\n Model repository, responsible of the discovery of models which can be\n loaded onto the model registry.\n \"\"\"\n\n def __init__(self, root: str = None):\n self._root = root\n\n async def list(self) -> List[ModelSettings]:\n all_model_settings = []\n\n # TODO: Use an async alternative for filesys ops\n if self._root:\n pattern = os.path.join(self._root, \"**\", DEFAULT_MODEL_SETTINGS_FILENAME)\n matches = glob.glob(pattern, recursive=True)\n\n for model_settings_path in matches:\n model_settings = self._load_model_settings(model_settings_path)\n all_model_settings.append(model_settings)\n\n # If there were no matches, try to load model from environment\n if not all_model_settings:\n # return default\n model_settings = ModelSettings()\n model_settings.parameters = ModelParameters()\n all_model_settings.append(model_settings)\n\n return all_model_settings\n\n def _load_model_settings(self, model_settings_path: str) -> ModelSettings:\n model_settings = ModelSettings.parse_file(model_settings_path)\n\n # If name not present, default to folder name\n default_model_name = os.path.basename(os.path.dirname(model_settings_path))\n if model_settings.name:\n if model_settings.name != default_model_name:\n # Raise warning if name is different than folder's name\n logger.warning(\n f\"Model name '{model_settings.name}' is different than \"\n f\"model's folder name '{default_model_name}'.\"\n )\n else:\n model_settings.name = default_model_name\n\n if not model_settings.parameters:\n model_settings.parameters = ModelParameters()\n\n if not model_settings.parameters.uri:\n # If not specified, default to its own folder\n default_model_uri = os.path.dirname(model_settings_path)\n model_settings.parameters.uri = default_model_uri\n\n return model_settings\n\n async def find(self, name: str) -> ModelSettings:\n all_settings = await self.list()\n for model_settings in all_settings:\n if model_settings.name == name:\n # TODO: Implement version policy\n return model_settings\n\n raise ModelNotFound(name)\n", "path": "mlserver/repository.py"}, {"content": "import os\nimport uuid\n\nfrom typing import Callable, List\n\nfrom .settings import ModelSettings\nfrom .errors import InvalidModelURI\n\n\nasync def get_model_uri(\n settings: ModelSettings, wellknown_filenames: List[str] = []\n) -> str:\n if not settings.parameters:\n raise InvalidModelURI(settings.name)\n\n model_uri = settings.parameters.uri\n\n if not model_uri:\n raise InvalidModelURI(settings.name)\n\n if os.path.isfile(model_uri):\n return model_uri\n\n if os.path.isdir(model_uri):\n # If model_uri is a folder, search for a well-known model filename\n for fname in wellknown_filenames:\n model_path = os.path.join(model_uri, fname)\n if os.path.isfile(model_path):\n return model_path\n\n # If none, return the folder\n return model_uri\n\n # Otherwise, the uri is neither a file nor a folder\n raise InvalidModelURI(settings.name, model_uri)\n\n\ndef get_wrapped_method(f: Callable) -> Callable:\n while hasattr(f, \"__wrapped__\"):\n f = f.__wrapped__ # type: ignore\n\n return f\n\n\ndef generate_uuid() -> str:\n return str(uuid.uuid4())\n", "path": "mlserver/utils.py"}, {"content": "from typing import List, Optional\nfrom pydantic import BaseSettings, PyObject\n\nfrom .version import __version__\nfrom .types import MetadataTensor\n\nENV_PREFIX_SETTINGS = \"MLSERVER_\"\nENV_PREFIX_MODEL_SETTINGS = \"MLSERVER_MODEL_\"\n\n\nclass Settings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n debug: bool = True\n\n # Model repository settings\n \"\"\"Root of the model repository, where we will search for models.\"\"\"\n model_repository_root: str = \".\"\n \"\"\"Flag to load all available models automatically at startup.\"\"\"\n load_models_at_startup: bool = True\n\n # Server metadata\n \"\"\"Name of the server.\"\"\"\n server_name: str = \"mlserver\"\n \"\"\"Version of the server.\"\"\"\n server_version: str = __version__\n \"\"\"Server extensions loaded.\"\"\"\n extensions: List[str] = []\n\n # Server settings\n \"\"\"Host where to listen for connections.\"\"\"\n host: str = \"0.0.0.0\"\n \"\"\"Port where to listen for HTTP / REST connections.\"\"\"\n http_port: int = 8080\n \"\"\"Port where to listen for gRPC connections.\"\"\"\n grpc_port: int = 8081\n \"\"\"Maximum length (i.e. size) of gRPC payloads.\"\"\"\n grpc_max_message_length: Optional[int] = None\n\n\nclass ModelParameters(BaseSettings):\n \"\"\"\n Parameters that apply only to a particular instance of a model.\n This can include things like model weights, or arbitrary ``extra``\n parameters particular to the underlying inference runtime.\n The main difference with respect to ``ModelSettings`` is that parameters\n can change on each instance (e.g. each version) of the model.\n \"\"\"\n\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n \"\"\"\n URI where the model artifacts can be found.\n This path must be either absolute or relative to where MLServer is running.\n \"\"\"\n uri: Optional[str] = None\n \"\"\"Version of the model.\"\"\"\n version: Optional[str] = None\n \"\"\"Format of the model (only available on certain runtimes).\"\"\"\n format: Optional[str] = None\n \"\"\"Default content type to use for requests and responses.\"\"\"\n content_type: Optional[str] = None\n \"\"\"Arbitrary settings, dependent on the inference runtime\n implementation.\"\"\"\n extra: Optional[dict] = {}\n\n\nclass ModelSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n \"\"\"Name of the model.\"\"\"\n name: str = \"\"\n\n # Model metadata\n \"\"\"Framework used to train and serialise the model (e.g. sklearn).\"\"\"\n platform: str = \"\"\n \"\"\"Versions of dependencies used to train the model (e.g.\n sklearn/0.20.1).\"\"\"\n versions: List[str] = []\n \"\"\"Metadata about the inputs accepted by the model.\"\"\"\n inputs: List[MetadataTensor] = []\n \"\"\"Metadata about the outputs returned by the model.\"\"\"\n outputs: List[MetadataTensor] = []\n\n # Parallel settings\n \"\"\"When parallel inference is enabled, number of workers to run inference\n across.\"\"\"\n parallel_workers: int = 4\n\n # Adaptive Batching settings (disabled by default)\n \"\"\"When adaptive batching is enabled, maximum number of requests to group\n together in a single batch.\"\"\"\n max_batch_size: int = 0\n \"\"\"When adaptive batching is enabled, maximum amount of time (in seconds)\n to wait for enough requests to build a full batch.\"\"\"\n max_batch_time: float = 0.0\n\n # Custom model class implementation\n \"\"\"*Python path* to the inference runtime to use to serve this model (e.g.\n ``mlserver_sklearn.SKLearnModel``).\"\"\"\n implementation: PyObject = \"mlserver.model.MLModel\" # type: ignore\n\n # Model parameters are meant to be set directly by the MLServer runtime.\n # However, it's also possible to override them manually.\n \"\"\"Extra parameters for each instance of this model.\"\"\"\n parameters: Optional[ModelParameters] = None\n", "path": "mlserver/settings.py"}], "after_files": [{"content": "import os\nimport glob\n\nfrom typing import List\n\nfrom .settings import ModelParameters, ModelSettings\nfrom .errors import ModelNotFound\nfrom .logging import logger\n\nDEFAULT_MODEL_SETTINGS_FILENAME = \"model-settings.json\"\n\n\nclass ModelRepository:\n \"\"\"\n Model repository, responsible of the discovery of models which can be\n loaded onto the model registry.\n \"\"\"\n\n def __init__(self, root: str = None):\n self._root = root\n\n async def list(self) -> List[ModelSettings]:\n all_model_settings = []\n\n # TODO: Use an async alternative for filesys ops\n if self._root:\n pattern = os.path.join(self._root, \"**\", DEFAULT_MODEL_SETTINGS_FILENAME)\n matches = glob.glob(pattern, recursive=True)\n\n for model_settings_path in matches:\n model_settings = self._load_model_settings(model_settings_path)\n all_model_settings.append(model_settings)\n\n # If there were no matches, try to load model from environment\n if not all_model_settings:\n # return default\n model_settings = ModelSettings()\n model_settings.parameters = ModelParameters()\n all_model_settings.append(model_settings)\n\n return all_model_settings\n\n def _load_model_settings(self, model_settings_path: str) -> ModelSettings:\n model_settings = ModelSettings.parse_file(model_settings_path)\n model_settings._source = model_settings_path\n\n # If name not present, default to folder name\n model_settings_folder = os.path.dirname(model_settings_path)\n default_model_name = os.path.basename(model_settings_folder)\n if model_settings.name:\n if model_settings.name != default_model_name:\n # Raise warning if name is different than folder's name\n logger.warning(\n f\"Model name '{model_settings.name}' is different than \"\n f\"model's folder name '{default_model_name}'.\"\n )\n else:\n model_settings.name = default_model_name\n\n if not model_settings.parameters:\n model_settings.parameters = ModelParameters()\n\n if not model_settings.parameters.uri:\n # If not specified, default to its own folder\n default_model_uri = os.path.dirname(model_settings_path)\n model_settings.parameters.uri = default_model_uri\n\n return model_settings\n\n async def find(self, name: str) -> ModelSettings:\n all_settings = await self.list()\n for model_settings in all_settings:\n if model_settings.name == name:\n # TODO: Implement version policy\n return model_settings\n\n raise ModelNotFound(name)\n", "path": "mlserver/repository.py"}, {"content": "import os\nimport uuid\n\nfrom typing import Callable, Optional, List\n\nfrom .settings import ModelSettings\nfrom .errors import InvalidModelURI\n\n\nasync def get_model_uri(\n settings: ModelSettings, wellknown_filenames: List[str] = []\n) -> str:\n if not settings.parameters:\n raise InvalidModelURI(settings.name)\n\n model_uri = settings.parameters.uri\n if not model_uri:\n raise InvalidModelURI(settings.name)\n\n full_model_uri = _to_absolute_path(settings._source, model_uri)\n if os.path.isfile(full_model_uri):\n return full_model_uri\n\n if os.path.isdir(full_model_uri):\n # If full_model_uri is a folder, search for a well-known model filename\n for fname in wellknown_filenames:\n model_path = os.path.join(full_model_uri, fname)\n if os.path.isfile(model_path):\n return model_path\n\n # If none, return the folder\n return full_model_uri\n\n # Otherwise, the uri is neither a file nor a folder\n raise InvalidModelURI(settings.name, full_model_uri)\n\n\ndef _to_absolute_path(source: Optional[str], model_uri: str) -> str:\n if source is None:\n # Treat path as either absolute or relative to the working directory of\n # the MLServer instance\n return model_uri\n\n parent_folder = os.path.dirname(source)\n unnormalised = os.path.join(parent_folder, model_uri)\n return os.path.normpath(unnormalised)\n\n\ndef get_wrapped_method(f: Callable) -> Callable:\n while hasattr(f, \"__wrapped__\"):\n f = f.__wrapped__ # type: ignore\n\n return f\n\n\ndef generate_uuid() -> str:\n return str(uuid.uuid4())\n", "path": "mlserver/utils.py"}, {"content": "from typing import List, Optional\nfrom pydantic import BaseSettings, PyObject\n\nfrom .version import __version__\nfrom .types import MetadataTensor\n\nENV_PREFIX_SETTINGS = \"MLSERVER_\"\nENV_PREFIX_MODEL_SETTINGS = \"MLSERVER_MODEL_\"\n\n\nclass Settings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n debug: bool = True\n\n # Model repository settings\n \"\"\"Root of the model repository, where we will search for models.\"\"\"\n model_repository_root: str = \".\"\n \"\"\"Flag to load all available models automatically at startup.\"\"\"\n load_models_at_startup: bool = True\n\n # Server metadata\n \"\"\"Name of the server.\"\"\"\n server_name: str = \"mlserver\"\n \"\"\"Version of the server.\"\"\"\n server_version: str = __version__\n \"\"\"Server extensions loaded.\"\"\"\n extensions: List[str] = []\n\n # Server settings\n \"\"\"Host where to listen for connections.\"\"\"\n host: str = \"0.0.0.0\"\n \"\"\"Port where to listen for HTTP / REST connections.\"\"\"\n http_port: int = 8080\n \"\"\"Port where to listen for gRPC connections.\"\"\"\n grpc_port: int = 8081\n \"\"\"Maximum length (i.e. size) of gRPC payloads.\"\"\"\n grpc_max_message_length: Optional[int] = None\n\n\nclass ModelParameters(BaseSettings):\n \"\"\"\n Parameters that apply only to a particular instance of a model.\n This can include things like model weights, or arbitrary ``extra``\n parameters particular to the underlying inference runtime.\n The main difference with respect to ``ModelSettings`` is that parameters\n can change on each instance (e.g. each version) of the model.\n \"\"\"\n\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n \"\"\"\n URI where the model artifacts can be found.\n This path must be either absolute or relative to where MLServer is running.\n \"\"\"\n uri: Optional[str] = None\n \"\"\"Version of the model.\"\"\"\n version: Optional[str] = None\n \"\"\"Format of the model (only available on certain runtimes).\"\"\"\n format: Optional[str] = None\n \"\"\"Default content type to use for requests and responses.\"\"\"\n content_type: Optional[str] = None\n \"\"\"Arbitrary settings, dependent on the inference runtime\n implementation.\"\"\"\n extra: Optional[dict] = {}\n\n\nclass ModelSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n underscore_attrs_are_private = True\n\n # Source points to the file where model settings were loaded from\n _source: Optional[str] = None\n\n \"\"\"Name of the model.\"\"\"\n name: str = \"\"\n\n # Model metadata\n \"\"\"Framework used to train and serialise the model (e.g. sklearn).\"\"\"\n platform: str = \"\"\n \"\"\"Versions of dependencies used to train the model (e.g.\n sklearn/0.20.1).\"\"\"\n versions: List[str] = []\n \"\"\"Metadata about the inputs accepted by the model.\"\"\"\n inputs: List[MetadataTensor] = []\n \"\"\"Metadata about the outputs returned by the model.\"\"\"\n outputs: List[MetadataTensor] = []\n\n # Parallel settings\n \"\"\"When parallel inference is enabled, number of workers to run inference\n across.\"\"\"\n parallel_workers: int = 4\n\n # Adaptive Batching settings (disabled by default)\n \"\"\"When adaptive batching is enabled, maximum number of requests to group\n together in a single batch.\"\"\"\n max_batch_size: int = 0\n \"\"\"When adaptive batching is enabled, maximum amount of time (in seconds)\n to wait for enough requests to build a full batch.\"\"\"\n max_batch_time: float = 0.0\n\n # Custom model class implementation\n \"\"\"*Python path* to the inference runtime to use to serve this model (e.g.\n ``mlserver_sklearn.SKLearnModel``).\"\"\"\n implementation: PyObject = \"mlserver.model.MLModel\" # type: ignore\n\n # Model parameters are meant to be set directly by the MLServer runtime.\n # However, it's also possible to override them manually.\n \"\"\"Extra parameters for each instance of this model.\"\"\"\n parameters: Optional[ModelParameters] = None\n", "path": "mlserver/settings.py"}]} | 2,488 | 720 |
gh_patches_debug_5111 | rasdani/github-patches | git_diff | pytorch__TensorRT-1004 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🐛 [Bug] Failed to use stoi to get the engine ID in TRTEngine function
## Bug Description
Got this error:
```
INFO: [Torch-TensorRT - Debug Build] - [MemUsageChange] TensorRT-managed allocation in IExecutionContext creation: CPU +0, GPU +132, now: CPU 0, GPU 540 (MiB)
terminate called after throwing an instance of 'std::invalid_argument'
what(): stoi
Aborted (core dumped)
```
Debugged and found that it fails at this line: https://github.com/NVIDIA/Torch-TensorRT/blob/c95229144432d96f4bdaa71fb1d242242d42bc29/core/runtime/TRTEngine.cpp#L63
## To Reproduce
MaskRCNN model from detectron2.
## Expected behavior
should figure out why we cannot find a number from this engine.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py/torch_tensorrt/ts/_compiler.py`
Content:
```
1 from typing import List, Dict, Any
2 import torch
3 from torch import nn
4
5 import torch_tensorrt._C.ts as _C
6 from torch_tensorrt import _enums
7 from torch_tensorrt.ts._compile_spec import _parse_compile_spec, _parse_device
8 from torch_tensorrt._Device import Device
9 from types import FunctionType
10
11
12 def compile(module: torch.jit.ScriptModule,
13 inputs=[],
14 device=Device._current_device(),
15 disable_tf32=False,
16 sparse_weights=False,
17 enabled_precisions=set(),
18 refit=False,
19 debug=False,
20 capability=_enums.EngineCapability.default,
21 num_min_timing_iters=2,
22 num_avg_timing_iters=1,
23 workspace_size=0,
24 calibrator=None,
25 truncate_long_and_double=False,
26 require_full_compilation=False,
27 min_block_size=3,
28 torch_executed_ops=[],
29 torch_executed_modules=[]) -> torch.jit.ScriptModule:
30 """Compile a TorchScript module for NVIDIA GPUs using TensorRT
31
32 Takes a existing TorchScript module and a set of settings to configure the compiler
33 and will convert methods to JIT Graphs which call equivalent TensorRT engines
34
35 Converts specifically the forward method of a TorchScript Module
36
37 Arguments:
38 module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch
39 ``torch.nn.Module``
40
41 Keyword Arguments:
42 inputs (List[Union(torch_tensorrt.Input, torch.Tensor)]): **Required** List of specifications of input shape, dtype and memory layout for inputs to the module. This argument is required. Input Sizes can be specified as torch sizes, tuples or lists. dtypes can be specified using
43 torch datatypes or torch_tensorrt datatypes and you can use either torch devices or the torch_tensorrt device type enum
44 to select device type. ::
45
46 input=[
47 torch_tensorrt.Input((1, 3, 224, 224)), # Static NCHW input shape for input #1
48 torch_tensorrt.Input(
49 min_shape=(1, 224, 224, 3),
50 opt_shape=(1, 512, 512, 3),
51 max_shape=(1, 1024, 1024, 3),
52 dtype=torch.int32
53 format=torch.channel_last
54 ), # Dynamic input shape for input #2
55 torch.randn((1, 3, 224, 244)) # Use an example tensor and let torch_tensorrt infer settings
56 ]
57
58 device (Union(torch_tensorrt.Device, torch.device, dict)): Target device for TensorRT engines to run on ::
59
60 device=torch_tensorrt.Device("dla:1", allow_gpu_fallback=True)
61
62 disable_tf32 (bool): Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas
63 sparse_weights (bool): Enable sparsity for convolution and fully connected layers.
64 enabled_precision (Set(Union(torch.dtype, torch_tensorrt.dtype))): The set of datatypes that TensorRT can use when selecting kernels
65 refit (bool): Enable refitting
66 debug (bool): Enable debuggable engine
67 capability (torch_tensorrt.EngineCapability): Restrict kernel selection to safe gpu kernels or safe dla kernels
68 num_min_timing_iters (int): Number of minimization timing iterations used to select kernels
69 num_avg_timing_iters (int): Number of averaging timing iterations used to select kernels
70 workspace_size (int): Maximum size of workspace given to TensorRT
71 truncate_long_and_double (bool): Truncate weights provided in int64 or double (float64) to int32 and float32
72 calibrator (Union(torch_tensorrt._C.IInt8Calibrator, tensorrt.IInt8Calibrator)): Calibrator object which will provide data to the PTQ system for INT8 Calibration
73 require_full_compilation (bool): Require modules to be compiled end to end or return an error as opposed to returning a hybrid graph where operations that cannot be run in TensorRT are run in PyTorch
74 min_block_size (int): The minimum number of contiguous TensorRT convertable operations in order to run a set of operations in TensorRT
75 torch_executed_ops (List[str]): List of aten operators that must be run in PyTorch. An error will be thrown if this list is not empty but ``require_full_compilation`` is True
76 torch_executed_modules (List[str]): List of modules that must be run in PyTorch. An error will be thrown if this list is not empty but ``require_full_compilation`` is True
77
78 Returns:
79 torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT
80 """
81
82 if isinstance(module, torch.jit.ScriptFunction):
83 raise TypeError(
84 "torch.jit.ScriptFunction currently is not directly supported, wrap the function in a module to compile")
85
86 if require_full_compilation and (len(torch_executed_modules) > 0 or len(torch_executed_ops) > 0):
87 raise ValueError(
88 "require_full_compilation is enabled however the list of modules and ops to run in torch is not empty. Found: torch_executed_ops: "
89 + torch_executed_ops + ", torch_executed_modules: " + torch_executed_modules)
90
91 spec = {
92 "inputs": inputs,
93 "device": device,
94 "disable_tf32": disable_tf32, # Force FP32 layers to use traditional as FP32 format
95 "sparse_weights": sparse_weights, #Enable sparsity for convolution and fully connected layers.
96 "enabled_precisions": enabled_precisions, # Enabling FP16 kernels
97 "refit": refit, # enable refit
98 "debug": debug, # enable debuggable engine
99 "capability": capability, # Restrict kernel selection to safe gpu kernels or safe dla kernels
100 "num_min_timing_iters": num_min_timing_iters, # Number of minimization timing iterations used to select kernels
101 "num_avg_timing_iters": num_avg_timing_iters, # Number of averaging timing iterations used to select kernels
102 "workspace_size": workspace_size, # Maximum size of workspace given to TensorRT
103 "calibrator": calibrator,
104 "truncate_long_and_double": truncate_long_and_double,
105 "torch_fallback": {
106 "enabled": not require_full_compilation,
107 "forced_fallback_ops": torch_executed_ops,
108 "forced_fallback_modules": torch_executed_modules,
109 "min_block_size": min_block_size
110 }
111 }
112
113 compiled_cpp_mod = _C.compile_graph(module._c, _parse_compile_spec(spec))
114 compiled_module = torch.jit._recursive.wrap_cpp_module(compiled_cpp_mod)
115 return compiled_module
116
117
118 def convert_method_to_trt_engine(module: torch.jit.ScriptModule,
119 method_name: str,
120 inputs=[],
121 device=Device._current_device(),
122 disable_tf32=False,
123 sparse_weights=False,
124 enabled_precisions=set(),
125 refit=False,
126 debug=False,
127 capability=_enums.EngineCapability.default,
128 num_min_timing_iters=2,
129 num_avg_timing_iters=1,
130 workspace_size=0,
131 truncate_long_and_double=False,
132 calibrator=None) -> str:
133 """Convert a TorchScript module method to a serialized TensorRT engine
134
135 Converts a specified method of a module to a serialized TensorRT engine given a dictionary of conversion settings
136
137 Arguments:
138 module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch
139 ``torch.nn.Module``
140 method_name (str): Name of method to convert
141
142 Keyword Args:
143 inputs (List[Union(torch_tensorrt.Input, torch.Tensor)]): **Required** List of specifications of input shape, dtype and memory layout for inputs to the module. This argument is required. Input Sizes can be specified as torch sizes, tuples or lists. dtypes can be specified using
144 torch datatypes or torch_tensorrt datatypes and you can use either torch devices or the torch_tensorrt device type enum
145 to select device type. ::
146
147 input=[
148 torch_tensorrt.Input((1, 3, 224, 224)), # Static NCHW input shape for input #1
149 torch_tensorrt.Input(
150 min_shape=(1, 224, 224, 3),
151 opt_shape=(1, 512, 512, 3),
152 max_shape=(1, 1024, 1024, 3),
153 dtype=torch.int32
154 format=torch.channel_last
155 ), # Dynamic input shape for input #2
156 torch.randn((1, 3, 224, 244)) # Use an example tensor and let torch_tensorrt infer settings
157 ]
158
159 device (Union(torch_tensorrt.Device, torch.device, dict)): Target device for TensorRT engines to run on ::
160
161 device=torch_tensorrt.Device("dla:1", allow_gpu_fallback=True)
162
163 disable_tf32 (bool): Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas
164 sparse_weights (bool): Enable sparsity for convolution and fully connected layers.
165 enabled_precision (Set(Union(torch.dtype, torch_tensorrt.dtype))): The set of datatypes that TensorRT can use when selecting kernels
166 refit (bool): Enable refitting
167 debug (bool): Enable debuggable engine
168 capability (torch_tensorrt.EngineCapability): Restrict kernel selection to safe gpu kernels or safe dla kernels
169 num_min_timing_iters (int): Number of minimization timing iterations used to select kernels
170 num_avg_timing_iters (int): Number of averaging timing iterations used to select kernels
171 workspace_size (int): Maximum size of workspace given to TensorRT
172 truncate_long_and_double (bool): Truncate weights provided in int64 or double (float64) to int32 and float32
173 calibrator (Union(torch_tensorrt._C.IInt8Calibrator, tensorrt.IInt8Calibrator)): Calibrator object which will provide data to the PTQ system for INT8 Calibration
174
175 Returns:
176 bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs
177 """
178 if isinstance(module, torch.jit.ScriptFunction):
179 raise TypeError(
180 "torch.jit.ScriptFunctions currently are not directly supported, wrap the function in a module to compile")
181
182 compile_spec = {
183 "inputs": inputs,
184 "device": device,
185 "disable_tf32":
186 disable_tf32, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas
187 "sparse_weights": sparse_weights, #Enable sparsity for convolution and fully connected layers.
188 "enabled_precisions": enabled_precisions, # Enabling FP16 kernels
189 "refit": refit, # enable refit
190 "debug": debug, # enable debuggable engine
191 "capability": capability, # Restrict kernel selection to safe gpu kernels or safe dla kernels
192 "num_min_timing_iters": num_min_timing_iters, # Number of minimization timing iterations used to select kernels
193 "num_avg_timing_iters": num_avg_timing_iters, # Number of averaging timing iterations used to select kernels
194 "workspace_size": workspace_size, # Maximum size of workspace given to TensorRT
195 "calibrator": calibrator,
196 "truncate_long_and_double": truncate_long_and_double
197 }
198
199 return _C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))
200
201
202 def embed_engine_in_new_module(serialized_engine: bytes, device=Device._current_device()) -> torch.jit.ScriptModule:
203 """Takes a pre-built serialized TensorRT engine and embeds it within a TorchScript module
204
205 Takes a pre-built serialied TensorRT engine (as bytes) and embeds it within a TorchScript module.
206 Registers the forward method to execute the TensorRT engine with the function signature:
207
208 forward(Tensor[]) -> Tensor[]
209
210 Module can be save with engine embedded with torch.jit.save and moved / loaded according to torch_tensorrt portability rules
211
212 Arguments:
213 serialized_engine (bytes): Serialized TensorRT engine from either torch_tensorrt or TensorRT APIs
214
215 Keyword Arguments:
216 device (Union(torch_tensorrt.Device, torch.device, dict)): Target device to run engine on. Must be compatible with engine provided. Default: Current active device
217
218 Returns:
219 torch.jit.ScriptModule: New TorchScript module with engine embedded
220 """
221 cpp_mod = _C.embed_engine_in_new_module(serialized_engine, _parse_device(device))
222 return torch.jit._recursive.wrap_cpp_module(cpp_mod)
223
224
225 def check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:
226 """Checks to see if a method is fully supported by torch_tensorrt
227
228 Checks if a method of a TorchScript module can be compiled by torch_tensorrt, if not, a list of operators
229 that are not supported are printed out and the function returns false, else true.
230
231 Arguments:
232 module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch
233 ``torch.nn.Module``
234 method_name (str): Name of method to check
235
236 Returns:
237 bool: True if supported Method
238 """
239 return _C.check_method_op_support(module._c, method_name)
240
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/py/torch_tensorrt/ts/_compiler.py b/py/torch_tensorrt/ts/_compiler.py
--- a/py/torch_tensorrt/ts/_compiler.py
+++ b/py/torch_tensorrt/ts/_compiler.py
@@ -207,6 +207,11 @@
forward(Tensor[]) -> Tensor[]
+ TensorRT bindings must have names with the following format:
+ - [symbol].[index in input / output array]
+ ex.
+ - [x.0, x.1, x.2] -> [y.0]
+
Module can be save with engine embedded with torch.jit.save and moved / loaded according to torch_tensorrt portability rules
Arguments:
| {"golden_diff": "diff --git a/py/torch_tensorrt/ts/_compiler.py b/py/torch_tensorrt/ts/_compiler.py\n--- a/py/torch_tensorrt/ts/_compiler.py\n+++ b/py/torch_tensorrt/ts/_compiler.py\n@@ -207,6 +207,11 @@\n \n forward(Tensor[]) -> Tensor[]\n \n+ TensorRT bindings must have names with the following format:\n+ - [symbol].[index in input / output array]\n+ ex.\n+ - [x.0, x.1, x.2] -> [y.0]\n+\n Module can be save with engine embedded with torch.jit.save and moved / loaded according to torch_tensorrt portability rules\n \n Arguments:\n", "issue": "\ud83d\udc1b [Bug] Failed to use stoi to get the engine ID in TRTEngine function\n## Bug Description\r\n\r\nGot this error: \r\n```\r\nINFO: [Torch-TensorRT - Debug Build] - [MemUsageChange] TensorRT-managed allocation in IExecutionContext creation: CPU +0, GPU +132, now: CPU 0, GPU 540 (MiB)\r\nterminate called after throwing an instance of 'std::invalid_argument'\r\n what(): stoi\r\nAborted (core dumped)\r\n```\r\n\r\nDebugged and found that it fails at this line: https://github.com/NVIDIA/Torch-TensorRT/blob/c95229144432d96f4bdaa71fb1d242242d42bc29/core/runtime/TRTEngine.cpp#L63 \r\n\r\n## To Reproduce\r\n\r\n MaskRCNN model from detectron2. \r\n\r\n## Expected behavior\r\n\r\nshould figure out why we cannot find a number from this engine. \r\n\r\n\r\n\n", "before_files": [{"content": "from typing import List, Dict, Any\nimport torch\nfrom torch import nn\n\nimport torch_tensorrt._C.ts as _C\nfrom torch_tensorrt import _enums\nfrom torch_tensorrt.ts._compile_spec import _parse_compile_spec, _parse_device\nfrom torch_tensorrt._Device import Device\nfrom types import FunctionType\n\n\ndef compile(module: torch.jit.ScriptModule,\n inputs=[],\n device=Device._current_device(),\n disable_tf32=False,\n sparse_weights=False,\n enabled_precisions=set(),\n refit=False,\n debug=False,\n capability=_enums.EngineCapability.default,\n num_min_timing_iters=2,\n num_avg_timing_iters=1,\n workspace_size=0,\n calibrator=None,\n truncate_long_and_double=False,\n require_full_compilation=False,\n min_block_size=3,\n torch_executed_ops=[],\n torch_executed_modules=[]) -> torch.jit.ScriptModule:\n \"\"\"Compile a TorchScript module for NVIDIA GPUs using TensorRT\n\n Takes a existing TorchScript module and a set of settings to configure the compiler\n and will convert methods to JIT Graphs which call equivalent TensorRT engines\n\n Converts specifically the forward method of a TorchScript Module\n\n Arguments:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n\n Keyword Arguments:\n inputs (List[Union(torch_tensorrt.Input, torch.Tensor)]): **Required** List of specifications of input shape, dtype and memory layout for inputs to the module. This argument is required. Input Sizes can be specified as torch sizes, tuples or lists. dtypes can be specified using\n torch datatypes or torch_tensorrt datatypes and you can use either torch devices or the torch_tensorrt device type enum\n to select device type. ::\n\n input=[\n torch_tensorrt.Input((1, 3, 224, 224)), # Static NCHW input shape for input #1\n torch_tensorrt.Input(\n min_shape=(1, 224, 224, 3),\n opt_shape=(1, 512, 512, 3),\n max_shape=(1, 1024, 1024, 3),\n dtype=torch.int32\n format=torch.channel_last\n ), # Dynamic input shape for input #2\n torch.randn((1, 3, 224, 244)) # Use an example tensor and let torch_tensorrt infer settings\n ]\n\n device (Union(torch_tensorrt.Device, torch.device, dict)): Target device for TensorRT engines to run on ::\n\n device=torch_tensorrt.Device(\"dla:1\", allow_gpu_fallback=True)\n\n disable_tf32 (bool): Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas\n sparse_weights (bool): Enable sparsity for convolution and fully connected layers.\n enabled_precision (Set(Union(torch.dtype, torch_tensorrt.dtype))): The set of datatypes that TensorRT can use when selecting kernels\n refit (bool): Enable refitting\n debug (bool): Enable debuggable engine\n capability (torch_tensorrt.EngineCapability): Restrict kernel selection to safe gpu kernels or safe dla kernels\n num_min_timing_iters (int): Number of minimization timing iterations used to select kernels\n num_avg_timing_iters (int): Number of averaging timing iterations used to select kernels\n workspace_size (int): Maximum size of workspace given to TensorRT\n truncate_long_and_double (bool): Truncate weights provided in int64 or double (float64) to int32 and float32\n calibrator (Union(torch_tensorrt._C.IInt8Calibrator, tensorrt.IInt8Calibrator)): Calibrator object which will provide data to the PTQ system for INT8 Calibration\n require_full_compilation (bool): Require modules to be compiled end to end or return an error as opposed to returning a hybrid graph where operations that cannot be run in TensorRT are run in PyTorch\n min_block_size (int): The minimum number of contiguous TensorRT convertable operations in order to run a set of operations in TensorRT\n torch_executed_ops (List[str]): List of aten operators that must be run in PyTorch. An error will be thrown if this list is not empty but ``require_full_compilation`` is True\n torch_executed_modules (List[str]): List of modules that must be run in PyTorch. An error will be thrown if this list is not empty but ``require_full_compilation`` is True\n\n Returns:\n torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT\n \"\"\"\n\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunction currently is not directly supported, wrap the function in a module to compile\")\n\n if require_full_compilation and (len(torch_executed_modules) > 0 or len(torch_executed_ops) > 0):\n raise ValueError(\n \"require_full_compilation is enabled however the list of modules and ops to run in torch is not empty. Found: torch_executed_ops: \"\n + torch_executed_ops + \", torch_executed_modules: \" + torch_executed_modules)\n\n spec = {\n \"inputs\": inputs,\n \"device\": device,\n \"disable_tf32\": disable_tf32, # Force FP32 layers to use traditional as FP32 format\n \"sparse_weights\": sparse_weights, #Enable sparsity for convolution and fully connected layers.\n \"enabled_precisions\": enabled_precisions, # Enabling FP16 kernels\n \"refit\": refit, # enable refit\n \"debug\": debug, # enable debuggable engine\n \"capability\": capability, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": num_min_timing_iters, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": num_avg_timing_iters, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": workspace_size, # Maximum size of workspace given to TensorRT\n \"calibrator\": calibrator,\n \"truncate_long_and_double\": truncate_long_and_double,\n \"torch_fallback\": {\n \"enabled\": not require_full_compilation,\n \"forced_fallback_ops\": torch_executed_ops,\n \"forced_fallback_modules\": torch_executed_modules,\n \"min_block_size\": min_block_size\n }\n }\n\n compiled_cpp_mod = _C.compile_graph(module._c, _parse_compile_spec(spec))\n compiled_module = torch.jit._recursive.wrap_cpp_module(compiled_cpp_mod)\n return compiled_module\n\n\ndef convert_method_to_trt_engine(module: torch.jit.ScriptModule,\n method_name: str,\n inputs=[],\n device=Device._current_device(),\n disable_tf32=False,\n sparse_weights=False,\n enabled_precisions=set(),\n refit=False,\n debug=False,\n capability=_enums.EngineCapability.default,\n num_min_timing_iters=2,\n num_avg_timing_iters=1,\n workspace_size=0,\n truncate_long_and_double=False,\n calibrator=None) -> str:\n \"\"\"Convert a TorchScript module method to a serialized TensorRT engine\n\n Converts a specified method of a module to a serialized TensorRT engine given a dictionary of conversion settings\n\n Arguments:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to convert\n\n Keyword Args:\n inputs (List[Union(torch_tensorrt.Input, torch.Tensor)]): **Required** List of specifications of input shape, dtype and memory layout for inputs to the module. This argument is required. Input Sizes can be specified as torch sizes, tuples or lists. dtypes can be specified using\n torch datatypes or torch_tensorrt datatypes and you can use either torch devices or the torch_tensorrt device type enum\n to select device type. ::\n\n input=[\n torch_tensorrt.Input((1, 3, 224, 224)), # Static NCHW input shape for input #1\n torch_tensorrt.Input(\n min_shape=(1, 224, 224, 3),\n opt_shape=(1, 512, 512, 3),\n max_shape=(1, 1024, 1024, 3),\n dtype=torch.int32\n format=torch.channel_last\n ), # Dynamic input shape for input #2\n torch.randn((1, 3, 224, 244)) # Use an example tensor and let torch_tensorrt infer settings\n ]\n\n device (Union(torch_tensorrt.Device, torch.device, dict)): Target device for TensorRT engines to run on ::\n\n device=torch_tensorrt.Device(\"dla:1\", allow_gpu_fallback=True)\n\n disable_tf32 (bool): Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas\n sparse_weights (bool): Enable sparsity for convolution and fully connected layers.\n enabled_precision (Set(Union(torch.dtype, torch_tensorrt.dtype))): The set of datatypes that TensorRT can use when selecting kernels\n refit (bool): Enable refitting\n debug (bool): Enable debuggable engine\n capability (torch_tensorrt.EngineCapability): Restrict kernel selection to safe gpu kernels or safe dla kernels\n num_min_timing_iters (int): Number of minimization timing iterations used to select kernels\n num_avg_timing_iters (int): Number of averaging timing iterations used to select kernels\n workspace_size (int): Maximum size of workspace given to TensorRT\n truncate_long_and_double (bool): Truncate weights provided in int64 or double (float64) to int32 and float32\n calibrator (Union(torch_tensorrt._C.IInt8Calibrator, tensorrt.IInt8Calibrator)): Calibrator object which will provide data to the PTQ system for INT8 Calibration\n\n Returns:\n bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs\n \"\"\"\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunctions currently are not directly supported, wrap the function in a module to compile\")\n\n compile_spec = {\n \"inputs\": inputs,\n \"device\": device,\n \"disable_tf32\":\n disable_tf32, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas\n \"sparse_weights\": sparse_weights, #Enable sparsity for convolution and fully connected layers.\n \"enabled_precisions\": enabled_precisions, # Enabling FP16 kernels\n \"refit\": refit, # enable refit\n \"debug\": debug, # enable debuggable engine\n \"capability\": capability, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": num_min_timing_iters, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": num_avg_timing_iters, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": workspace_size, # Maximum size of workspace given to TensorRT\n \"calibrator\": calibrator,\n \"truncate_long_and_double\": truncate_long_and_double\n }\n\n return _C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))\n\n\ndef embed_engine_in_new_module(serialized_engine: bytes, device=Device._current_device()) -> torch.jit.ScriptModule:\n \"\"\"Takes a pre-built serialized TensorRT engine and embeds it within a TorchScript module\n\n Takes a pre-built serialied TensorRT engine (as bytes) and embeds it within a TorchScript module.\n Registers the forward method to execute the TensorRT engine with the function signature:\n\n forward(Tensor[]) -> Tensor[]\n\n Module can be save with engine embedded with torch.jit.save and moved / loaded according to torch_tensorrt portability rules\n\n Arguments:\n serialized_engine (bytes): Serialized TensorRT engine from either torch_tensorrt or TensorRT APIs\n\n Keyword Arguments:\n device (Union(torch_tensorrt.Device, torch.device, dict)): Target device to run engine on. Must be compatible with engine provided. Default: Current active device\n\n Returns:\n torch.jit.ScriptModule: New TorchScript module with engine embedded\n \"\"\"\n cpp_mod = _C.embed_engine_in_new_module(serialized_engine, _parse_device(device))\n return torch.jit._recursive.wrap_cpp_module(cpp_mod)\n\n\ndef check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:\n \"\"\"Checks to see if a method is fully supported by torch_tensorrt\n\n Checks if a method of a TorchScript module can be compiled by torch_tensorrt, if not, a list of operators\n that are not supported are printed out and the function returns false, else true.\n\n Arguments:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to check\n\n Returns:\n bool: True if supported Method\n \"\"\"\n return _C.check_method_op_support(module._c, method_name)\n", "path": "py/torch_tensorrt/ts/_compiler.py"}], "after_files": [{"content": "from typing import List, Dict, Any\nimport torch\nfrom torch import nn\n\nimport torch_tensorrt._C.ts as _C\nfrom torch_tensorrt import _enums\nfrom torch_tensorrt.ts._compile_spec import _parse_compile_spec, _parse_device\nfrom torch_tensorrt._Device import Device\nfrom types import FunctionType\n\n\ndef compile(module: torch.jit.ScriptModule,\n inputs=[],\n device=Device._current_device(),\n disable_tf32=False,\n sparse_weights=False,\n enabled_precisions=set(),\n refit=False,\n debug=False,\n capability=_enums.EngineCapability.default,\n num_min_timing_iters=2,\n num_avg_timing_iters=1,\n workspace_size=0,\n calibrator=None,\n truncate_long_and_double=False,\n require_full_compilation=False,\n min_block_size=3,\n torch_executed_ops=[],\n torch_executed_modules=[]) -> torch.jit.ScriptModule:\n \"\"\"Compile a TorchScript module for NVIDIA GPUs using TensorRT\n\n Takes a existing TorchScript module and a set of settings to configure the compiler\n and will convert methods to JIT Graphs which call equivalent TensorRT engines\n\n Converts specifically the forward method of a TorchScript Module\n\n Arguments:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n\n Keyword Arguments:\n inputs (List[Union(torch_tensorrt.Input, torch.Tensor)]): **Required** List of specifications of input shape, dtype and memory layout for inputs to the module. This argument is required. Input Sizes can be specified as torch sizes, tuples or lists. dtypes can be specified using\n torch datatypes or torch_tensorrt datatypes and you can use either torch devices or the torch_tensorrt device type enum\n to select device type. ::\n\n input=[\n torch_tensorrt.Input((1, 3, 224, 224)), # Static NCHW input shape for input #1\n torch_tensorrt.Input(\n min_shape=(1, 224, 224, 3),\n opt_shape=(1, 512, 512, 3),\n max_shape=(1, 1024, 1024, 3),\n dtype=torch.int32\n format=torch.channel_last\n ), # Dynamic input shape for input #2\n torch.randn((1, 3, 224, 244)) # Use an example tensor and let torch_tensorrt infer settings\n ]\n\n device (Union(torch_tensorrt.Device, torch.device, dict)): Target device for TensorRT engines to run on ::\n\n device=torch_tensorrt.Device(\"dla:1\", allow_gpu_fallback=True)\n\n disable_tf32 (bool): Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas\n sparse_weights (bool): Enable sparsity for convolution and fully connected layers.\n enabled_precision (Set(Union(torch.dtype, torch_tensorrt.dtype))): The set of datatypes that TensorRT can use when selecting kernels\n refit (bool): Enable refitting\n debug (bool): Enable debuggable engine\n capability (torch_tensorrt.EngineCapability): Restrict kernel selection to safe gpu kernels or safe dla kernels\n num_min_timing_iters (int): Number of minimization timing iterations used to select kernels\n num_avg_timing_iters (int): Number of averaging timing iterations used to select kernels\n workspace_size (int): Maximum size of workspace given to TensorRT\n truncate_long_and_double (bool): Truncate weights provided in int64 or double (float64) to int32 and float32\n calibrator (Union(torch_tensorrt._C.IInt8Calibrator, tensorrt.IInt8Calibrator)): Calibrator object which will provide data to the PTQ system for INT8 Calibration\n require_full_compilation (bool): Require modules to be compiled end to end or return an error as opposed to returning a hybrid graph where operations that cannot be run in TensorRT are run in PyTorch\n min_block_size (int): The minimum number of contiguous TensorRT convertable operations in order to run a set of operations in TensorRT\n torch_executed_ops (List[str]): List of aten operators that must be run in PyTorch. An error will be thrown if this list is not empty but ``require_full_compilation`` is True\n torch_executed_modules (List[str]): List of modules that must be run in PyTorch. An error will be thrown if this list is not empty but ``require_full_compilation`` is True\n\n Returns:\n torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT\n \"\"\"\n\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunction currently is not directly supported, wrap the function in a module to compile\")\n\n if require_full_compilation and (len(torch_executed_modules) > 0 or len(torch_executed_ops) > 0):\n raise ValueError(\n \"require_full_compilation is enabled however the list of modules and ops to run in torch is not empty. Found: torch_executed_ops: \"\n + torch_executed_ops + \", torch_executed_modules: \" + torch_executed_modules)\n\n spec = {\n \"inputs\": inputs,\n \"device\": device,\n \"disable_tf32\": disable_tf32, # Force FP32 layers to use traditional as FP32 format\n \"sparse_weights\": sparse_weights, #Enable sparsity for convolution and fully connected layers.\n \"enabled_precisions\": enabled_precisions, # Enabling FP16 kernels\n \"refit\": refit, # enable refit\n \"debug\": debug, # enable debuggable engine\n \"capability\": capability, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": num_min_timing_iters, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": num_avg_timing_iters, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": workspace_size, # Maximum size of workspace given to TensorRT\n \"calibrator\": calibrator,\n \"truncate_long_and_double\": truncate_long_and_double,\n \"torch_fallback\": {\n \"enabled\": not require_full_compilation,\n \"forced_fallback_ops\": torch_executed_ops,\n \"forced_fallback_modules\": torch_executed_modules,\n \"min_block_size\": min_block_size\n }\n }\n\n compiled_cpp_mod = _C.compile_graph(module._c, _parse_compile_spec(spec))\n compiled_module = torch.jit._recursive.wrap_cpp_module(compiled_cpp_mod)\n return compiled_module\n\n\ndef convert_method_to_trt_engine(module: torch.jit.ScriptModule,\n method_name: str,\n inputs=[],\n device=Device._current_device(),\n disable_tf32=False,\n sparse_weights=False,\n enabled_precisions=set(),\n refit=False,\n debug=False,\n capability=_enums.EngineCapability.default,\n num_min_timing_iters=2,\n num_avg_timing_iters=1,\n workspace_size=0,\n truncate_long_and_double=False,\n calibrator=None) -> str:\n \"\"\"Convert a TorchScript module method to a serialized TensorRT engine\n\n Converts a specified method of a module to a serialized TensorRT engine given a dictionary of conversion settings\n\n Arguments:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to convert\n\n Keyword Args:\n inputs (List[Union(torch_tensorrt.Input, torch.Tensor)]): **Required** List of specifications of input shape, dtype and memory layout for inputs to the module. This argument is required. Input Sizes can be specified as torch sizes, tuples or lists. dtypes can be specified using\n torch datatypes or torch_tensorrt datatypes and you can use either torch devices or the torch_tensorrt device type enum\n to select device type. ::\n\n input=[\n torch_tensorrt.Input((1, 3, 224, 224)), # Static NCHW input shape for input #1\n torch_tensorrt.Input(\n min_shape=(1, 224, 224, 3),\n opt_shape=(1, 512, 512, 3),\n max_shape=(1, 1024, 1024, 3),\n dtype=torch.int32\n format=torch.channel_last\n ), # Dynamic input shape for input #2\n torch.randn((1, 3, 224, 244)) # Use an example tensor and let torch_tensorrt infer settings\n ]\n\n device (Union(torch_tensorrt.Device, torch.device, dict)): Target device for TensorRT engines to run on ::\n\n device=torch_tensorrt.Device(\"dla:1\", allow_gpu_fallback=True)\n\n disable_tf32 (bool): Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas\n sparse_weights (bool): Enable sparsity for convolution and fully connected layers.\n enabled_precision (Set(Union(torch.dtype, torch_tensorrt.dtype))): The set of datatypes that TensorRT can use when selecting kernels\n refit (bool): Enable refitting\n debug (bool): Enable debuggable engine\n capability (torch_tensorrt.EngineCapability): Restrict kernel selection to safe gpu kernels or safe dla kernels\n num_min_timing_iters (int): Number of minimization timing iterations used to select kernels\n num_avg_timing_iters (int): Number of averaging timing iterations used to select kernels\n workspace_size (int): Maximum size of workspace given to TensorRT\n truncate_long_and_double (bool): Truncate weights provided in int64 or double (float64) to int32 and float32\n calibrator (Union(torch_tensorrt._C.IInt8Calibrator, tensorrt.IInt8Calibrator)): Calibrator object which will provide data to the PTQ system for INT8 Calibration\n\n Returns:\n bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs\n \"\"\"\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunctions currently are not directly supported, wrap the function in a module to compile\")\n\n compile_spec = {\n \"inputs\": inputs,\n \"device\": device,\n \"disable_tf32\":\n disable_tf32, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas\n \"sparse_weights\": sparse_weights, #Enable sparsity for convolution and fully connected layers.\n \"enabled_precisions\": enabled_precisions, # Enabling FP16 kernels\n \"refit\": refit, # enable refit\n \"debug\": debug, # enable debuggable engine\n \"capability\": capability, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": num_min_timing_iters, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": num_avg_timing_iters, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": workspace_size, # Maximum size of workspace given to TensorRT\n \"calibrator\": calibrator,\n \"truncate_long_and_double\": truncate_long_and_double\n }\n\n return _C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))\n\n\ndef embed_engine_in_new_module(serialized_engine: bytes, device=Device._current_device()) -> torch.jit.ScriptModule:\n \"\"\"Takes a pre-built serialized TensorRT engine and embeds it within a TorchScript module\n\n Takes a pre-built serialied TensorRT engine (as bytes) and embeds it within a TorchScript module.\n Registers the forward method to execute the TensorRT engine with the function signature:\n\n forward(Tensor[]) -> Tensor[]\n\n TensorRT bindings must have names with the following format:\n - [symbol].[index in input / output array]\n ex.\n - [x.0, x.1, x.2] -> [y.0]\n\n Module can be save with engine embedded with torch.jit.save and moved / loaded according to torch_tensorrt portability rules\n\n Arguments:\n serialized_engine (bytes): Serialized TensorRT engine from either torch_tensorrt or TensorRT APIs\n\n Keyword Arguments:\n device (Union(torch_tensorrt.Device, torch.device, dict)): Target device to run engine on. Must be compatible with engine provided. Default: Current active device\n\n Returns:\n torch.jit.ScriptModule: New TorchScript module with engine embedded\n \"\"\"\n cpp_mod = _C.embed_engine_in_new_module(serialized_engine, _parse_device(device))\n return torch.jit._recursive.wrap_cpp_module(cpp_mod)\n\n\ndef check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:\n \"\"\"Checks to see if a method is fully supported by torch_tensorrt\n\n Checks if a method of a TorchScript module can be compiled by torch_tensorrt, if not, a list of operators\n that are not supported are printed out and the function returns false, else true.\n\n Arguments:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to check\n\n Returns:\n bool: True if supported Method\n \"\"\"\n return _C.check_method_op_support(module._c, method_name)\n", "path": "py/torch_tensorrt/ts/_compiler.py"}]} | 4,087 | 155 |
gh_patches_debug_17483 | rasdani/github-patches | git_diff | networkx__networkx-3110 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
eigenvector_centrality() docstring needs rewording
The docstring for eigenvector_centrality() refers to the cluster centrality of a node i and then proceeds to give an equation for the vector of cluster centrality for all nodes. Suggest rewording similar to the katz_similarity() method.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/algorithms/centrality/eigenvector.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (C) 2004-2018 by
3 # Aric Hagberg <[email protected]>
4 # Dan Schult <[email protected]>
5 # Pieter Swart <[email protected]>
6 # All rights reserved.
7 # BSD license.
8 #
9 # Authors:
10 # Aric Hagberg <[email protected]>
11 # Pieter Swart <[email protected]>
12 # Sasha Gutfraind <[email protected]>
13 """Functions for computing eigenvector centrality."""
14 from __future__ import division
15
16 from math import sqrt
17
18 import networkx as nx
19 from networkx.utils import not_implemented_for
20
21 __all__ = ['eigenvector_centrality', 'eigenvector_centrality_numpy']
22
23
24 @not_implemented_for('multigraph')
25 def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None,
26 weight=None):
27 r"""Compute the eigenvector centrality for the graph `G`.
28
29 Eigenvector centrality computes the centrality for a node based on the
30 centrality of its neighbors. The eigenvector centrality for node $i$ is
31
32 .. math::
33
34 Ax = \lambda x
35
36 where $A$ is the adjacency matrix of the graph `G` with eigenvalue
37 $\lambda$. By virtue of the Perron–Frobenius theorem, there is
38 a unique and positive solution if $\lambda$ is the largest
39 eigenvalue associated with the eigenvector of the adjacency matrix
40 $A$ ([2]_).
41
42 Parameters
43 ----------
44 G : graph
45 A networkx graph
46
47 max_iter : integer, optional (default=100)
48 Maximum number of iterations in power method.
49
50 tol : float, optional (default=1.0e-6)
51 Error tolerance used to check convergence in power method iteration.
52
53 nstart : dictionary, optional (default=None)
54 Starting value of eigenvector iteration for each node.
55
56 weight : None or string, optional (default=None)
57 If None, all edge weights are considered equal.
58 Otherwise holds the name of the edge attribute used as weight.
59
60 Returns
61 -------
62 nodes : dictionary
63 Dictionary of nodes with eigenvector centrality as the value.
64
65 Examples
66 --------
67 >>> G = nx.path_graph(4)
68 >>> centrality = nx.eigenvector_centrality(G)
69 >>> sorted((v, '{:0.2f}'.format(c)) for v, c in centrality.items())
70 [(0, '0.37'), (1, '0.60'), (2, '0.60'), (3, '0.37')]
71
72 Raises
73 ------
74 NetworkXPointlessConcept
75 If the graph `G` is the null graph.
76
77 NetworkXError
78 If each value in `nstart` is zero.
79
80 PowerIterationFailedConvergence
81 If the algorithm fails to converge to the specified tolerance
82 within the specified number of iterations of the power iteration
83 method.
84
85 See Also
86 --------
87 eigenvector_centrality_numpy
88 pagerank
89 hits
90
91 Notes
92 -----
93 The measure was introduced by [1]_ and is discussed in [2]_.
94
95 The power iteration method is used to compute the eigenvector and
96 convergence is **not** guaranteed. Our method stops after ``max_iter``
97 iterations or when the change in the computed vector between two
98 iterations is smaller than an error tolerance of
99 ``G.number_of_nodes() * tol``. This implementation uses ($A + I$)
100 rather than the adjacency matrix $A$ because it shifts the spectrum
101 to enable discerning the correct eigenvector even for networks with
102 multiple dominant eigenvalues.
103
104 For directed graphs this is "left" eigenvector centrality which corresponds
105 to the in-edges in the graph. For out-edges eigenvector centrality
106 first reverse the graph with ``G.reverse()``.
107
108 References
109 ----------
110 .. [1] Phillip Bonacich.
111 "Power and Centrality: A Family of Measures."
112 *American Journal of Sociology* 92(5):1170–1182, 1986
113 <http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf>
114 .. [2] Mark E. J. Newman.
115 *Networks: An Introduction.*
116 Oxford University Press, USA, 2010, pp. 169.
117
118 """
119 if len(G) == 0:
120 raise nx.NetworkXPointlessConcept('cannot compute centrality for the'
121 ' null graph')
122 # If no initial vector is provided, start with the all-ones vector.
123 if nstart is None:
124 nstart = {v: 1 for v in G}
125 if all(v == 0 for v in nstart.values()):
126 raise nx.NetworkXError('initial vector cannot have all zero values')
127 # Normalize the initial vector so that each entry is in [0, 1]. This is
128 # guaranteed to never have a divide-by-zero error by the previous line.
129 x = {k: v / sum(nstart.values()) for k, v in nstart.items()}
130 nnodes = G.number_of_nodes()
131 # make up to max_iter iterations
132 for i in range(max_iter):
133 xlast = x
134 x = xlast.copy() # Start with xlast times I to iterate with (A+I)
135 # do the multiplication y^T = x^T A (left eigenvector)
136 for n in x:
137 for nbr in G[n]:
138 x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)
139 # Normalize the vector. The normalization denominator `norm`
140 # should never be zero by the Perron--Frobenius
141 # theorem. However, in case it is due to numerical error, we
142 # assume the norm to be one instead.
143 norm = sqrt(sum(z ** 2 for z in x.values())) or 1
144 x = {k: v / norm for k, v in x.items()}
145 # Check for convergence (in the L_1 norm).
146 if sum(abs(x[n] - xlast[n]) for n in x) < nnodes * tol:
147 return x
148 raise nx.PowerIterationFailedConvergence(max_iter)
149
150
151 def eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0):
152 r"""Compute the eigenvector centrality for the graph G.
153
154 Eigenvector centrality computes the centrality for a node based on the
155 centrality of its neighbors. The eigenvector centrality for node $i$ is
156
157 .. math::
158
159 Ax = \lambda x
160
161 where $A$ is the adjacency matrix of the graph G with eigenvalue $\lambda$.
162 By virtue of the Perron–Frobenius theorem, there is a unique and positive
163 solution if $\lambda$ is the largest eigenvalue associated with the
164 eigenvector of the adjacency matrix $A$ ([2]_).
165
166 Parameters
167 ----------
168 G : graph
169 A networkx graph
170
171 weight : None or string, optional (default=None)
172 The name of the edge attribute used as weight.
173 If None, all edge weights are considered equal.
174
175 max_iter : integer, optional (default=100)
176 Maximum number of iterations in power method.
177
178 tol : float, optional (default=1.0e-6)
179 Relative accuracy for eigenvalues (stopping criterion).
180 The default value of 0 implies machine precision.
181
182 Returns
183 -------
184 nodes : dictionary
185 Dictionary of nodes with eigenvector centrality as the value.
186
187 Examples
188 --------
189 >>> G = nx.path_graph(4)
190 >>> centrality = nx.eigenvector_centrality_numpy(G)
191 >>> print(['{} {:0.2f}'.format(node, centrality[node]) for node in centrality])
192 ['0 0.37', '1 0.60', '2 0.60', '3 0.37']
193
194 See Also
195 --------
196 eigenvector_centrality
197 pagerank
198 hits
199
200 Notes
201 -----
202 The measure was introduced by [1]_.
203
204 This algorithm uses the SciPy sparse eigenvalue solver (ARPACK) to
205 find the largest eigenvalue/eigenvector pair.
206
207 For directed graphs this is "left" eigenvector centrality which corresponds
208 to the in-edges in the graph. For out-edges eigenvector centrality
209 first reverse the graph with ``G.reverse()``.
210
211 Raises
212 ------
213 NetworkXPointlessConcept
214 If the graph ``G`` is the null graph.
215
216 References
217 ----------
218 .. [1] Phillip Bonacich:
219 Power and Centrality: A Family of Measures.
220 American Journal of Sociology 92(5):1170–1182, 1986
221 http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf
222 .. [2] Mark E. J. Newman:
223 Networks: An Introduction.
224 Oxford University Press, USA, 2010, pp. 169.
225 """
226 import scipy as sp
227 from scipy.sparse import linalg
228 if len(G) == 0:
229 raise nx.NetworkXPointlessConcept('cannot compute centrality for the'
230 ' null graph')
231 M = nx.to_scipy_sparse_matrix(G, nodelist=list(G), weight=weight,
232 dtype=float)
233 eigenvalue, eigenvector = linalg.eigs(M.T, k=1, which='LR',
234 maxiter=max_iter, tol=tol)
235 largest = eigenvector.flatten().real
236 norm = sp.sign(largest.sum()) * sp.linalg.norm(largest)
237 return dict(zip(G, largest / norm))
238
239
240 # fixture for nose tests
241 def setup_module(module):
242 from nose import SkipTest
243 try:
244 import scipy
245 except:
246 raise SkipTest("SciPy not available")
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/networkx/algorithms/centrality/eigenvector.py b/networkx/algorithms/centrality/eigenvector.py
--- a/networkx/algorithms/centrality/eigenvector.py
+++ b/networkx/algorithms/centrality/eigenvector.py
@@ -28,16 +28,16 @@
Eigenvector centrality computes the centrality for a node based on the
centrality of its neighbors. The eigenvector centrality for node $i$ is
+ the $i$-th element of the vector $x$ defined by the equation
.. math::
Ax = \lambda x
where $A$ is the adjacency matrix of the graph `G` with eigenvalue
- $\lambda$. By virtue of the Perron–Frobenius theorem, there is
- a unique and positive solution if $\lambda$ is the largest
- eigenvalue associated with the eigenvector of the adjacency matrix
- $A$ ([2]_).
+ $\lambda$. By virtue of the Perron–Frobenius theorem, there is a unique
+ solution $x$, all of whose entries are positive, if $\lambda$ is the
+ largest eigenvalue of the adjacency matrix $A$ ([2]_).
Parameters
----------
| {"golden_diff": "diff --git a/networkx/algorithms/centrality/eigenvector.py b/networkx/algorithms/centrality/eigenvector.py\n--- a/networkx/algorithms/centrality/eigenvector.py\n+++ b/networkx/algorithms/centrality/eigenvector.py\n@@ -28,16 +28,16 @@\n \n Eigenvector centrality computes the centrality for a node based on the\n centrality of its neighbors. The eigenvector centrality for node $i$ is\n+ the $i$-th element of the vector $x$ defined by the equation\n \n .. math::\n \n Ax = \\lambda x\n \n where $A$ is the adjacency matrix of the graph `G` with eigenvalue\n- $\\lambda$. By virtue of the Perron\u2013Frobenius theorem, there is\n- a unique and positive solution if $\\lambda$ is the largest\n- eigenvalue associated with the eigenvector of the adjacency matrix\n- $A$ ([2]_).\n+ $\\lambda$. By virtue of the Perron\u2013Frobenius theorem, there is a unique\n+ solution $x$, all of whose entries are positive, if $\\lambda$ is the\n+ largest eigenvalue of the adjacency matrix $A$ ([2]_).\n \n Parameters\n ----------\n", "issue": "eigenvector_centrality() docstring needs rewording\nThe docstring for eigenvector_centrality() refers to the cluster centrality of a node i and then proceeds to give an equation for the vector of cluster centrality for all nodes. Suggest rewording similar to the katz_similarity() method.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2004-2018 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n#\n# Authors:\n# Aric Hagberg <[email protected]>\n# Pieter Swart <[email protected]>\n# Sasha Gutfraind <[email protected]>\n\"\"\"Functions for computing eigenvector centrality.\"\"\"\nfrom __future__ import division\n\nfrom math import sqrt\n\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n\n__all__ = ['eigenvector_centrality', 'eigenvector_centrality_numpy']\n\n\n@not_implemented_for('multigraph')\ndef eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None,\n weight=None):\n r\"\"\"Compute the eigenvector centrality for the graph `G`.\n\n Eigenvector centrality computes the centrality for a node based on the\n centrality of its neighbors. The eigenvector centrality for node $i$ is\n\n .. math::\n\n Ax = \\lambda x\n\n where $A$ is the adjacency matrix of the graph `G` with eigenvalue\n $\\lambda$. By virtue of the Perron\u2013Frobenius theorem, there is\n a unique and positive solution if $\\lambda$ is the largest\n eigenvalue associated with the eigenvector of the adjacency matrix\n $A$ ([2]_).\n\n Parameters\n ----------\n G : graph\n A networkx graph\n\n max_iter : integer, optional (default=100)\n Maximum number of iterations in power method.\n\n tol : float, optional (default=1.0e-6)\n Error tolerance used to check convergence in power method iteration.\n\n nstart : dictionary, optional (default=None)\n Starting value of eigenvector iteration for each node.\n\n weight : None or string, optional (default=None)\n If None, all edge weights are considered equal.\n Otherwise holds the name of the edge attribute used as weight.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with eigenvector centrality as the value.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> centrality = nx.eigenvector_centrality(G)\n >>> sorted((v, '{:0.2f}'.format(c)) for v, c in centrality.items())\n [(0, '0.37'), (1, '0.60'), (2, '0.60'), (3, '0.37')]\n\n Raises\n ------\n NetworkXPointlessConcept\n If the graph `G` is the null graph.\n\n NetworkXError\n If each value in `nstart` is zero.\n\n PowerIterationFailedConvergence\n If the algorithm fails to converge to the specified tolerance\n within the specified number of iterations of the power iteration\n method.\n\n See Also\n --------\n eigenvector_centrality_numpy\n pagerank\n hits\n\n Notes\n -----\n The measure was introduced by [1]_ and is discussed in [2]_.\n\n The power iteration method is used to compute the eigenvector and\n convergence is **not** guaranteed. Our method stops after ``max_iter``\n iterations or when the change in the computed vector between two\n iterations is smaller than an error tolerance of\n ``G.number_of_nodes() * tol``. This implementation uses ($A + I$)\n rather than the adjacency matrix $A$ because it shifts the spectrum\n to enable discerning the correct eigenvector even for networks with\n multiple dominant eigenvalues.\n\n For directed graphs this is \"left\" eigenvector centrality which corresponds\n to the in-edges in the graph. For out-edges eigenvector centrality\n first reverse the graph with ``G.reverse()``.\n\n References\n ----------\n .. [1] Phillip Bonacich.\n \"Power and Centrality: A Family of Measures.\"\n *American Journal of Sociology* 92(5):1170\u20131182, 1986\n <http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf>\n .. [2] Mark E. J. Newman.\n *Networks: An Introduction.*\n Oxford University Press, USA, 2010, pp. 169.\n\n \"\"\"\n if len(G) == 0:\n raise nx.NetworkXPointlessConcept('cannot compute centrality for the'\n ' null graph')\n # If no initial vector is provided, start with the all-ones vector.\n if nstart is None:\n nstart = {v: 1 for v in G}\n if all(v == 0 for v in nstart.values()):\n raise nx.NetworkXError('initial vector cannot have all zero values')\n # Normalize the initial vector so that each entry is in [0, 1]. This is\n # guaranteed to never have a divide-by-zero error by the previous line.\n x = {k: v / sum(nstart.values()) for k, v in nstart.items()}\n nnodes = G.number_of_nodes()\n # make up to max_iter iterations\n for i in range(max_iter):\n xlast = x\n x = xlast.copy() # Start with xlast times I to iterate with (A+I)\n # do the multiplication y^T = x^T A (left eigenvector)\n for n in x:\n for nbr in G[n]:\n x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)\n # Normalize the vector. The normalization denominator `norm`\n # should never be zero by the Perron--Frobenius\n # theorem. However, in case it is due to numerical error, we\n # assume the norm to be one instead.\n norm = sqrt(sum(z ** 2 for z in x.values())) or 1\n x = {k: v / norm for k, v in x.items()}\n # Check for convergence (in the L_1 norm).\n if sum(abs(x[n] - xlast[n]) for n in x) < nnodes * tol:\n return x\n raise nx.PowerIterationFailedConvergence(max_iter)\n\n\ndef eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0):\n r\"\"\"Compute the eigenvector centrality for the graph G.\n\n Eigenvector centrality computes the centrality for a node based on the\n centrality of its neighbors. The eigenvector centrality for node $i$ is\n\n .. math::\n\n Ax = \\lambda x\n\n where $A$ is the adjacency matrix of the graph G with eigenvalue $\\lambda$.\n By virtue of the Perron\u2013Frobenius theorem, there is a unique and positive\n solution if $\\lambda$ is the largest eigenvalue associated with the\n eigenvector of the adjacency matrix $A$ ([2]_).\n\n Parameters\n ----------\n G : graph\n A networkx graph\n\n weight : None or string, optional (default=None)\n The name of the edge attribute used as weight.\n If None, all edge weights are considered equal.\n\n max_iter : integer, optional (default=100)\n Maximum number of iterations in power method.\n\n tol : float, optional (default=1.0e-6)\n Relative accuracy for eigenvalues (stopping criterion).\n The default value of 0 implies machine precision.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with eigenvector centrality as the value.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> centrality = nx.eigenvector_centrality_numpy(G)\n >>> print(['{} {:0.2f}'.format(node, centrality[node]) for node in centrality])\n ['0 0.37', '1 0.60', '2 0.60', '3 0.37']\n\n See Also\n --------\n eigenvector_centrality\n pagerank\n hits\n\n Notes\n -----\n The measure was introduced by [1]_.\n\n This algorithm uses the SciPy sparse eigenvalue solver (ARPACK) to\n find the largest eigenvalue/eigenvector pair.\n\n For directed graphs this is \"left\" eigenvector centrality which corresponds\n to the in-edges in the graph. For out-edges eigenvector centrality\n first reverse the graph with ``G.reverse()``.\n\n Raises\n ------\n NetworkXPointlessConcept\n If the graph ``G`` is the null graph.\n\n References\n ----------\n .. [1] Phillip Bonacich:\n Power and Centrality: A Family of Measures.\n American Journal of Sociology 92(5):1170\u20131182, 1986\n http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf\n .. [2] Mark E. J. Newman:\n Networks: An Introduction.\n Oxford University Press, USA, 2010, pp. 169.\n \"\"\"\n import scipy as sp\n from scipy.sparse import linalg\n if len(G) == 0:\n raise nx.NetworkXPointlessConcept('cannot compute centrality for the'\n ' null graph')\n M = nx.to_scipy_sparse_matrix(G, nodelist=list(G), weight=weight,\n dtype=float)\n eigenvalue, eigenvector = linalg.eigs(M.T, k=1, which='LR',\n maxiter=max_iter, tol=tol)\n largest = eigenvector.flatten().real\n norm = sp.sign(largest.sum()) * sp.linalg.norm(largest)\n return dict(zip(G, largest / norm))\n\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import scipy\n except:\n raise SkipTest(\"SciPy not available\")\n", "path": "networkx/algorithms/centrality/eigenvector.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2004-2018 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n#\n# Authors:\n# Aric Hagberg <[email protected]>\n# Pieter Swart <[email protected]>\n# Sasha Gutfraind <[email protected]>\n\"\"\"Functions for computing eigenvector centrality.\"\"\"\nfrom __future__ import division\n\nfrom math import sqrt\n\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n\n__all__ = ['eigenvector_centrality', 'eigenvector_centrality_numpy']\n\n\n@not_implemented_for('multigraph')\ndef eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None,\n weight=None):\n r\"\"\"Compute the eigenvector centrality for the graph `G`.\n\n Eigenvector centrality computes the centrality for a node based on the\n centrality of its neighbors. The eigenvector centrality for node $i$ is\n the $i$-th element of the vector $x$ defined by the equation\n\n .. math::\n\n Ax = \\lambda x\n\n where $A$ is the adjacency matrix of the graph `G` with eigenvalue\n $\\lambda$. By virtue of the Perron\u2013Frobenius theorem, there is a unique\n solution $x$, all of whose entries are positive, if $\\lambda$ is the\n largest eigenvalue of the adjacency matrix $A$ ([2]_).\n\n Parameters\n ----------\n G : graph\n A networkx graph\n\n max_iter : integer, optional (default=100)\n Maximum number of iterations in power method.\n\n tol : float, optional (default=1.0e-6)\n Error tolerance used to check convergence in power method iteration.\n\n nstart : dictionary, optional (default=None)\n Starting value of eigenvector iteration for each node.\n\n weight : None or string, optional (default=None)\n If None, all edge weights are considered equal.\n Otherwise holds the name of the edge attribute used as weight.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with eigenvector centrality as the value.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> centrality = nx.eigenvector_centrality(G)\n >>> sorted((v, '{:0.2f}'.format(c)) for v, c in centrality.items())\n [(0, '0.37'), (1, '0.60'), (2, '0.60'), (3, '0.37')]\n\n Raises\n ------\n NetworkXPointlessConcept\n If the graph `G` is the null graph.\n\n NetworkXError\n If each value in `nstart` is zero.\n\n PowerIterationFailedConvergence\n If the algorithm fails to converge to the specified tolerance\n within the specified number of iterations of the power iteration\n method.\n\n See Also\n --------\n eigenvector_centrality_numpy\n pagerank\n hits\n\n Notes\n -----\n The measure was introduced by [1]_ and is discussed in [2]_.\n\n The power iteration method is used to compute the eigenvector and\n convergence is **not** guaranteed. Our method stops after ``max_iter``\n iterations or when the change in the computed vector between two\n iterations is smaller than an error tolerance of\n ``G.number_of_nodes() * tol``. This implementation uses ($A + I$)\n rather than the adjacency matrix $A$ because it shifts the spectrum\n to enable discerning the correct eigenvector even for networks with\n multiple dominant eigenvalues.\n\n For directed graphs this is \"left\" eigenvector centrality which corresponds\n to the in-edges in the graph. For out-edges eigenvector centrality\n first reverse the graph with ``G.reverse()``.\n\n References\n ----------\n .. [1] Phillip Bonacich.\n \"Power and Centrality: A Family of Measures.\"\n *American Journal of Sociology* 92(5):1170\u20131182, 1986\n <http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf>\n .. [2] Mark E. J. Newman.\n *Networks: An Introduction.*\n Oxford University Press, USA, 2010, pp. 169.\n\n \"\"\"\n if len(G) == 0:\n raise nx.NetworkXPointlessConcept('cannot compute centrality for the'\n ' null graph')\n # If no initial vector is provided, start with the all-ones vector.\n if nstart is None:\n nstart = {v: 1 for v in G}\n if all(v == 0 for v in nstart.values()):\n raise nx.NetworkXError('initial vector cannot have all zero values')\n # Normalize the initial vector so that each entry is in [0, 1]. This is\n # guaranteed to never have a divide-by-zero error by the previous line.\n x = {k: v / sum(nstart.values()) for k, v in nstart.items()}\n nnodes = G.number_of_nodes()\n # make up to max_iter iterations\n for i in range(max_iter):\n xlast = x\n x = xlast.copy() # Start with xlast times I to iterate with (A+I)\n # do the multiplication y^T = x^T A (left eigenvector)\n for n in x:\n for nbr in G[n]:\n x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)\n # Normalize the vector. The normalization denominator `norm`\n # should never be zero by the Perron--Frobenius\n # theorem. However, in case it is due to numerical error, we\n # assume the norm to be one instead.\n norm = sqrt(sum(z ** 2 for z in x.values())) or 1\n x = {k: v / norm for k, v in x.items()}\n # Check for convergence (in the L_1 norm).\n if sum(abs(x[n] - xlast[n]) for n in x) < nnodes * tol:\n return x\n raise nx.PowerIterationFailedConvergence(max_iter)\n\n\ndef eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0):\n r\"\"\"Compute the eigenvector centrality for the graph G.\n\n Eigenvector centrality computes the centrality for a node based on the\n centrality of its neighbors. The eigenvector centrality for node $i$ is\n\n .. math::\n\n Ax = \\lambda x\n\n where $A$ is the adjacency matrix of the graph G with eigenvalue $\\lambda$.\n By virtue of the Perron\u2013Frobenius theorem, there is a unique and positive\n solution if $\\lambda$ is the largest eigenvalue associated with the\n eigenvector of the adjacency matrix $A$ ([2]_).\n\n Parameters\n ----------\n G : graph\n A networkx graph\n\n weight : None or string, optional (default=None)\n The name of the edge attribute used as weight.\n If None, all edge weights are considered equal.\n\n max_iter : integer, optional (default=100)\n Maximum number of iterations in power method.\n\n tol : float, optional (default=1.0e-6)\n Relative accuracy for eigenvalues (stopping criterion).\n The default value of 0 implies machine precision.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with eigenvector centrality as the value.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> centrality = nx.eigenvector_centrality_numpy(G)\n >>> print(['{} {:0.2f}'.format(node, centrality[node]) for node in centrality])\n ['0 0.37', '1 0.60', '2 0.60', '3 0.37']\n\n See Also\n --------\n eigenvector_centrality\n pagerank\n hits\n\n Notes\n -----\n The measure was introduced by [1]_.\n\n This algorithm uses the SciPy sparse eigenvalue solver (ARPACK) to\n find the largest eigenvalue/eigenvector pair.\n\n For directed graphs this is \"left\" eigenvector centrality which corresponds\n to the in-edges in the graph. For out-edges eigenvector centrality\n first reverse the graph with ``G.reverse()``.\n\n Raises\n ------\n NetworkXPointlessConcept\n If the graph ``G`` is the null graph.\n\n References\n ----------\n .. [1] Phillip Bonacich:\n Power and Centrality: A Family of Measures.\n American Journal of Sociology 92(5):1170\u20131182, 1986\n http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf\n .. [2] Mark E. J. Newman:\n Networks: An Introduction.\n Oxford University Press, USA, 2010, pp. 169.\n \"\"\"\n import scipy as sp\n from scipy.sparse import linalg\n if len(G) == 0:\n raise nx.NetworkXPointlessConcept('cannot compute centrality for the'\n ' null graph')\n M = nx.to_scipy_sparse_matrix(G, nodelist=list(G), weight=weight,\n dtype=float)\n eigenvalue, eigenvector = linalg.eigs(M.T, k=1, which='LR',\n maxiter=max_iter, tol=tol)\n largest = eigenvector.flatten().real\n norm = sp.sign(largest.sum()) * sp.linalg.norm(largest)\n return dict(zip(G, largest / norm))\n\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import scipy\n except:\n raise SkipTest(\"SciPy not available\")\n", "path": "networkx/algorithms/centrality/eigenvector.py"}]} | 3,270 | 294 |
gh_patches_debug_19539 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-3661 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Conditionally do git submodule operations
We shouldn't be doing submodule operations on repositories that don't have submodules. We can check for this by determining if a repository has a `.gitmodules` file at the top level, and if not, we can skip these commands.
Raised in #3520 as we're storing all these as extraneous commands now.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/vcs_support/backends/git.py`
Content:
```
1 """Git-related utilities."""
2
3 from __future__ import absolute_import
4
5 import csv
6 import logging
7 import os
8 import re
9
10 from builtins import str
11 from six import StringIO
12
13 from readthedocs.projects.exceptions import RepositoryError
14 from readthedocs.vcs_support.base import BaseVCS, VCSVersion
15
16
17 log = logging.getLogger(__name__)
18
19
20 class Backend(BaseVCS):
21
22 """Git VCS backend."""
23
24 supports_tags = True
25 supports_branches = True
26 fallback_branch = 'master' # default branch
27
28 def __init__(self, *args, **kwargs):
29 super(Backend, self).__init__(*args, **kwargs)
30 self.token = kwargs.get('token', None)
31 self.repo_url = self._get_clone_url()
32
33 def _get_clone_url(self):
34 if '://' in self.repo_url:
35 hacked_url = self.repo_url.split('://')[1]
36 hacked_url = re.sub('.git$', '', hacked_url)
37 clone_url = 'https://%s' % hacked_url
38 if self.token:
39 clone_url = 'https://%s@%s' % (self.token, hacked_url)
40 return clone_url
41 # Don't edit URL because all hosts aren't the same
42
43 # else:
44 # clone_url = 'git://%s' % (hacked_url)
45 return self.repo_url
46
47 def set_remote_url(self, url):
48 return self.run('git', 'remote', 'set-url', 'origin', url)
49
50 def update(self):
51 # Use checkout() to update repo
52 self.checkout()
53
54 def repo_exists(self):
55 code, _, _ = self.run('git', 'status', record=False)
56 return code == 0
57
58 def fetch(self):
59 code, _, _ = self.run('git', 'fetch', '--tags', '--prune')
60 if code != 0:
61 raise RepositoryError
62
63 def checkout_revision(self, revision=None):
64 if not revision:
65 branch = self.default_branch or self.fallback_branch
66 revision = 'origin/%s' % branch
67
68 code, out, err = self.run(
69 'git', 'checkout', '--force', revision)
70 if code != 0:
71 log.warning("Failed to checkout revision '%s': %s",
72 revision, code)
73 return [code, out, err]
74
75 def clone(self):
76 code, _, _ = self.run(
77 'git', 'clone', '--recursive', self.repo_url, '.')
78 if code != 0:
79 raise RepositoryError
80
81 @property
82 def tags(self):
83 retcode, stdout, _ = self.run('git', 'show-ref', '--tags', record_as_success=True)
84 # error (or no tags found)
85 if retcode != 0:
86 return []
87 return self.parse_tags(stdout)
88
89 def parse_tags(self, data):
90 """
91 Parses output of show-ref --tags, eg:
92
93 3b32886c8d3cb815df3793b3937b2e91d0fb00f1 refs/tags/2.0.0
94 bd533a768ff661991a689d3758fcfe72f455435d refs/tags/2.0.1
95 c0288a17899b2c6818f74e3a90b77e2a1779f96a refs/tags/2.0.2
96 a63a2de628a3ce89034b7d1a5ca5e8159534eef0 refs/tags/2.1.0.beta2
97 c7fc3d16ed9dc0b19f0d27583ca661a64562d21e refs/tags/2.1.0.rc1
98 edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1 refs/tags/2.1.0.rc2
99
100 Into VCSTag objects with the tag name as verbose_name and the commit
101 hash as identifier.
102 """
103 # parse the lines into a list of tuples (commit-hash, tag ref name)
104 # StringIO below is expecting Unicode data, so ensure that it gets it.
105 if not isinstance(data, str):
106 data = str(data)
107 raw_tags = csv.reader(StringIO(data), delimiter=' ')
108 vcs_tags = []
109 for row in raw_tags:
110 row = [f for f in row if f != '']
111 if row == []:
112 continue
113 commit_hash, name = row
114 clean_name = name.replace('refs/tags/', '')
115 vcs_tags.append(VCSVersion(self, commit_hash, clean_name))
116 return vcs_tags
117
118 @property
119 def branches(self):
120 # Only show remote branches
121 retcode, stdout, _ = self.run('git', 'branch', '-r')
122 # error (or no tags found)
123 if retcode != 0:
124 return []
125 return self.parse_branches(stdout)
126
127 def parse_branches(self, data):
128 """
129 Parse output of git branch -r
130
131 e.g.:
132
133 origin/2.0.X
134 origin/HEAD -> origin/master
135 origin/develop
136 origin/master
137 origin/release/2.0.0
138 origin/release/2.1.0
139 """
140 clean_branches = []
141 # StringIO below is expecting Unicode data, so ensure that it gets it.
142 if not isinstance(data, str):
143 data = str(data)
144 raw_branches = csv.reader(StringIO(data), delimiter=' ')
145 for branch in raw_branches:
146 branch = [f for f in branch if f != '' and f != '*']
147 # Handle empty branches
148 if branch:
149 branch = branch[0]
150 if branch.startswith('origin/'):
151 verbose_name = branch.replace('origin/', '')
152 if verbose_name in ['HEAD']:
153 continue
154 clean_branches.append(VCSVersion(self, branch, verbose_name))
155 else:
156 clean_branches.append(VCSVersion(self, branch, branch))
157 return clean_branches
158
159 @property
160 def commit(self):
161 _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')
162 return stdout.strip()
163
164 def checkout(self, identifier=None):
165 self.check_working_dir()
166
167 # Clone or update repository
168 if self.repo_exists():
169 self.set_remote_url(self.repo_url)
170 self.fetch()
171 else:
172 self.make_clean_working_dir()
173 self.clone()
174
175 # Find proper identifier
176 if not identifier:
177 identifier = self.default_branch or self.fallback_branch
178
179 identifier = self.find_ref(identifier)
180
181 # Checkout the correct identifier for this branch.
182 code, out, err = self.checkout_revision(identifier)
183 if code != 0:
184 return code, out, err
185
186 # Clean any remains of previous checkouts
187 self.run('git', 'clean', '-d', '-f', '-f')
188
189 # Update submodules
190 self.run('git', 'submodule', 'sync')
191 self.run('git', 'submodule', 'update',
192 '--init', '--recursive', '--force')
193
194 return code, out, err
195
196 def find_ref(self, ref):
197 # Check if ref starts with 'origin/'
198 if ref.startswith('origin/'):
199 return ref
200
201 # Check if ref is a branch of the origin remote
202 if self.ref_exists('remotes/origin/' + ref):
203 return 'origin/' + ref
204
205 return ref
206
207 def ref_exists(self, ref):
208 code, _, _ = self.run('git', 'show-ref', ref, record_as_success=True)
209 return code == 0
210
211 @property
212 def env(self):
213 env = super(Backend, self).env
214 env['GIT_DIR'] = os.path.join(self.working_dir, '.git')
215 # Don't prompt for username, this requires Git 2.3+
216 env['GIT_TERMINAL_PROMPT'] = '0'
217 return env
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/vcs_support/backends/git.py b/readthedocs/vcs_support/backends/git.py
--- a/readthedocs/vcs_support/backends/git.py
+++ b/readthedocs/vcs_support/backends/git.py
@@ -55,6 +55,10 @@
code, _, _ = self.run('git', 'status', record=False)
return code == 0
+ def submodules_exists(self):
+ code, out, _ = self.run('git', 'submodule', 'status', record=False)
+ return code == 0 and bool(out)
+
def fetch(self):
code, _, _ = self.run('git', 'fetch', '--tags', '--prune')
if code != 0:
@@ -187,9 +191,10 @@
self.run('git', 'clean', '-d', '-f', '-f')
# Update submodules
- self.run('git', 'submodule', 'sync')
- self.run('git', 'submodule', 'update',
- '--init', '--recursive', '--force')
+ if self.submodules_exists():
+ self.run('git', 'submodule', 'sync')
+ self.run('git', 'submodule', 'update',
+ '--init', '--recursive', '--force')
return code, out, err
| {"golden_diff": "diff --git a/readthedocs/vcs_support/backends/git.py b/readthedocs/vcs_support/backends/git.py\n--- a/readthedocs/vcs_support/backends/git.py\n+++ b/readthedocs/vcs_support/backends/git.py\n@@ -55,6 +55,10 @@\n code, _, _ = self.run('git', 'status', record=False)\n return code == 0\n \n+ def submodules_exists(self):\n+ code, out, _ = self.run('git', 'submodule', 'status', record=False)\n+ return code == 0 and bool(out)\n+\n def fetch(self):\n code, _, _ = self.run('git', 'fetch', '--tags', '--prune')\n if code != 0:\n@@ -187,9 +191,10 @@\n self.run('git', 'clean', '-d', '-f', '-f')\n \n # Update submodules\n- self.run('git', 'submodule', 'sync')\n- self.run('git', 'submodule', 'update',\n- '--init', '--recursive', '--force')\n+ if self.submodules_exists():\n+ self.run('git', 'submodule', 'sync')\n+ self.run('git', 'submodule', 'update',\n+ '--init', '--recursive', '--force')\n \n return code, out, err\n", "issue": "Conditionally do git submodule operations\nWe shouldn't be doing submodule operations on repositories that don't have submodules. We can check for this by determining if a repository has a `.gitmodules` file at the top level, and if not, we can skip these commands.\r\n\r\nRaised in #3520 as we're storing all these as extraneous commands now.\n", "before_files": [{"content": "\"\"\"Git-related utilities.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport csv\nimport logging\nimport os\nimport re\n\nfrom builtins import str\nfrom six import StringIO\n\nfrom readthedocs.projects.exceptions import RepositoryError\nfrom readthedocs.vcs_support.base import BaseVCS, VCSVersion\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Backend(BaseVCS):\n\n \"\"\"Git VCS backend.\"\"\"\n\n supports_tags = True\n supports_branches = True\n fallback_branch = 'master' # default branch\n\n def __init__(self, *args, **kwargs):\n super(Backend, self).__init__(*args, **kwargs)\n self.token = kwargs.get('token', None)\n self.repo_url = self._get_clone_url()\n\n def _get_clone_url(self):\n if '://' in self.repo_url:\n hacked_url = self.repo_url.split('://')[1]\n hacked_url = re.sub('.git$', '', hacked_url)\n clone_url = 'https://%s' % hacked_url\n if self.token:\n clone_url = 'https://%s@%s' % (self.token, hacked_url)\n return clone_url\n # Don't edit URL because all hosts aren't the same\n\n # else:\n # clone_url = 'git://%s' % (hacked_url)\n return self.repo_url\n\n def set_remote_url(self, url):\n return self.run('git', 'remote', 'set-url', 'origin', url)\n\n def update(self):\n # Use checkout() to update repo\n self.checkout()\n\n def repo_exists(self):\n code, _, _ = self.run('git', 'status', record=False)\n return code == 0\n\n def fetch(self):\n code, _, _ = self.run('git', 'fetch', '--tags', '--prune')\n if code != 0:\n raise RepositoryError\n\n def checkout_revision(self, revision=None):\n if not revision:\n branch = self.default_branch or self.fallback_branch\n revision = 'origin/%s' % branch\n\n code, out, err = self.run(\n 'git', 'checkout', '--force', revision)\n if code != 0:\n log.warning(\"Failed to checkout revision '%s': %s\",\n revision, code)\n return [code, out, err]\n\n def clone(self):\n code, _, _ = self.run(\n 'git', 'clone', '--recursive', self.repo_url, '.')\n if code != 0:\n raise RepositoryError\n\n @property\n def tags(self):\n retcode, stdout, _ = self.run('git', 'show-ref', '--tags', record_as_success=True)\n # error (or no tags found)\n if retcode != 0:\n return []\n return self.parse_tags(stdout)\n\n def parse_tags(self, data):\n \"\"\"\n Parses output of show-ref --tags, eg:\n\n 3b32886c8d3cb815df3793b3937b2e91d0fb00f1 refs/tags/2.0.0\n bd533a768ff661991a689d3758fcfe72f455435d refs/tags/2.0.1\n c0288a17899b2c6818f74e3a90b77e2a1779f96a refs/tags/2.0.2\n a63a2de628a3ce89034b7d1a5ca5e8159534eef0 refs/tags/2.1.0.beta2\n c7fc3d16ed9dc0b19f0d27583ca661a64562d21e refs/tags/2.1.0.rc1\n edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1 refs/tags/2.1.0.rc2\n\n Into VCSTag objects with the tag name as verbose_name and the commit\n hash as identifier.\n \"\"\"\n # parse the lines into a list of tuples (commit-hash, tag ref name)\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n raw_tags = csv.reader(StringIO(data), delimiter=' ')\n vcs_tags = []\n for row in raw_tags:\n row = [f for f in row if f != '']\n if row == []:\n continue\n commit_hash, name = row\n clean_name = name.replace('refs/tags/', '')\n vcs_tags.append(VCSVersion(self, commit_hash, clean_name))\n return vcs_tags\n\n @property\n def branches(self):\n # Only show remote branches\n retcode, stdout, _ = self.run('git', 'branch', '-r')\n # error (or no tags found)\n if retcode != 0:\n return []\n return self.parse_branches(stdout)\n\n def parse_branches(self, data):\n \"\"\"\n Parse output of git branch -r\n\n e.g.:\n\n origin/2.0.X\n origin/HEAD -> origin/master\n origin/develop\n origin/master\n origin/release/2.0.0\n origin/release/2.1.0\n \"\"\"\n clean_branches = []\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n raw_branches = csv.reader(StringIO(data), delimiter=' ')\n for branch in raw_branches:\n branch = [f for f in branch if f != '' and f != '*']\n # Handle empty branches\n if branch:\n branch = branch[0]\n if branch.startswith('origin/'):\n verbose_name = branch.replace('origin/', '')\n if verbose_name in ['HEAD']:\n continue\n clean_branches.append(VCSVersion(self, branch, verbose_name))\n else:\n clean_branches.append(VCSVersion(self, branch, branch))\n return clean_branches\n\n @property\n def commit(self):\n _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')\n return stdout.strip()\n\n def checkout(self, identifier=None):\n self.check_working_dir()\n\n # Clone or update repository\n if self.repo_exists():\n self.set_remote_url(self.repo_url)\n self.fetch()\n else:\n self.make_clean_working_dir()\n self.clone()\n\n # Find proper identifier\n if not identifier:\n identifier = self.default_branch or self.fallback_branch\n\n identifier = self.find_ref(identifier)\n\n # Checkout the correct identifier for this branch.\n code, out, err = self.checkout_revision(identifier)\n if code != 0:\n return code, out, err\n\n # Clean any remains of previous checkouts\n self.run('git', 'clean', '-d', '-f', '-f')\n\n # Update submodules\n self.run('git', 'submodule', 'sync')\n self.run('git', 'submodule', 'update',\n '--init', '--recursive', '--force')\n\n return code, out, err\n\n def find_ref(self, ref):\n # Check if ref starts with 'origin/'\n if ref.startswith('origin/'):\n return ref\n\n # Check if ref is a branch of the origin remote\n if self.ref_exists('remotes/origin/' + ref):\n return 'origin/' + ref\n\n return ref\n\n def ref_exists(self, ref):\n code, _, _ = self.run('git', 'show-ref', ref, record_as_success=True)\n return code == 0\n\n @property\n def env(self):\n env = super(Backend, self).env\n env['GIT_DIR'] = os.path.join(self.working_dir, '.git')\n # Don't prompt for username, this requires Git 2.3+\n env['GIT_TERMINAL_PROMPT'] = '0'\n return env\n", "path": "readthedocs/vcs_support/backends/git.py"}], "after_files": [{"content": "\"\"\"Git-related utilities.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport csv\nimport logging\nimport os\nimport re\n\nfrom builtins import str\nfrom six import StringIO\n\nfrom readthedocs.projects.exceptions import RepositoryError\nfrom readthedocs.vcs_support.base import BaseVCS, VCSVersion\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Backend(BaseVCS):\n\n \"\"\"Git VCS backend.\"\"\"\n\n supports_tags = True\n supports_branches = True\n fallback_branch = 'master' # default branch\n\n def __init__(self, *args, **kwargs):\n super(Backend, self).__init__(*args, **kwargs)\n self.token = kwargs.get('token', None)\n self.repo_url = self._get_clone_url()\n\n def _get_clone_url(self):\n if '://' in self.repo_url:\n hacked_url = self.repo_url.split('://')[1]\n hacked_url = re.sub('.git$', '', hacked_url)\n clone_url = 'https://%s' % hacked_url\n if self.token:\n clone_url = 'https://%s@%s' % (self.token, hacked_url)\n return clone_url\n # Don't edit URL because all hosts aren't the same\n\n # else:\n # clone_url = 'git://%s' % (hacked_url)\n return self.repo_url\n\n def set_remote_url(self, url):\n return self.run('git', 'remote', 'set-url', 'origin', url)\n\n def update(self):\n # Use checkout() to update repo\n self.checkout()\n\n def repo_exists(self):\n code, _, _ = self.run('git', 'status', record=False)\n return code == 0\n\n def submodules_exists(self):\n code, out, _ = self.run('git', 'submodule', 'status', record=False)\n return code == 0 and bool(out)\n\n def fetch(self):\n code, _, _ = self.run('git', 'fetch', '--tags', '--prune')\n if code != 0:\n raise RepositoryError\n\n def checkout_revision(self, revision=None):\n if not revision:\n branch = self.default_branch or self.fallback_branch\n revision = 'origin/%s' % branch\n\n code, out, err = self.run(\n 'git', 'checkout', '--force', revision)\n if code != 0:\n log.warning(\"Failed to checkout revision '%s': %s\",\n revision, code)\n return [code, out, err]\n\n def clone(self):\n code, _, _ = self.run(\n 'git', 'clone', '--recursive', self.repo_url, '.')\n if code != 0:\n raise RepositoryError\n\n @property\n def tags(self):\n retcode, stdout, _ = self.run('git', 'show-ref', '--tags', record_as_success=True)\n # error (or no tags found)\n if retcode != 0:\n return []\n return self.parse_tags(stdout)\n\n def parse_tags(self, data):\n \"\"\"\n Parses output of show-ref --tags, eg:\n\n 3b32886c8d3cb815df3793b3937b2e91d0fb00f1 refs/tags/2.0.0\n bd533a768ff661991a689d3758fcfe72f455435d refs/tags/2.0.1\n c0288a17899b2c6818f74e3a90b77e2a1779f96a refs/tags/2.0.2\n a63a2de628a3ce89034b7d1a5ca5e8159534eef0 refs/tags/2.1.0.beta2\n c7fc3d16ed9dc0b19f0d27583ca661a64562d21e refs/tags/2.1.0.rc1\n edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1 refs/tags/2.1.0.rc2\n\n Into VCSTag objects with the tag name as verbose_name and the commit\n hash as identifier.\n \"\"\"\n # parse the lines into a list of tuples (commit-hash, tag ref name)\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n raw_tags = csv.reader(StringIO(data), delimiter=' ')\n vcs_tags = []\n for row in raw_tags:\n row = [f for f in row if f != '']\n if row == []:\n continue\n commit_hash, name = row\n clean_name = name.replace('refs/tags/', '')\n vcs_tags.append(VCSVersion(self, commit_hash, clean_name))\n return vcs_tags\n\n @property\n def branches(self):\n # Only show remote branches\n retcode, stdout, _ = self.run('git', 'branch', '-r')\n # error (or no tags found)\n if retcode != 0:\n return []\n return self.parse_branches(stdout)\n\n def parse_branches(self, data):\n \"\"\"\n Parse output of git branch -r\n\n e.g.:\n\n origin/2.0.X\n origin/HEAD -> origin/master\n origin/develop\n origin/master\n origin/release/2.0.0\n origin/release/2.1.0\n \"\"\"\n clean_branches = []\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n raw_branches = csv.reader(StringIO(data), delimiter=' ')\n for branch in raw_branches:\n branch = [f for f in branch if f != '' and f != '*']\n # Handle empty branches\n if branch:\n branch = branch[0]\n if branch.startswith('origin/'):\n verbose_name = branch.replace('origin/', '')\n if verbose_name in ['HEAD']:\n continue\n clean_branches.append(VCSVersion(self, branch, verbose_name))\n else:\n clean_branches.append(VCSVersion(self, branch, branch))\n return clean_branches\n\n @property\n def commit(self):\n _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')\n return stdout.strip()\n\n def checkout(self, identifier=None):\n self.check_working_dir()\n\n # Clone or update repository\n if self.repo_exists():\n self.set_remote_url(self.repo_url)\n self.fetch()\n else:\n self.make_clean_working_dir()\n self.clone()\n\n # Find proper identifier\n if not identifier:\n identifier = self.default_branch or self.fallback_branch\n\n identifier = self.find_ref(identifier)\n\n # Checkout the correct identifier for this branch.\n code, out, err = self.checkout_revision(identifier)\n if code != 0:\n return code, out, err\n\n # Clean any remains of previous checkouts\n self.run('git', 'clean', '-d', '-f', '-f')\n\n # Update submodules\n if self.submodules_exists():\n self.run('git', 'submodule', 'sync')\n self.run('git', 'submodule', 'update',\n '--init', '--recursive', '--force')\n\n return code, out, err\n\n def find_ref(self, ref):\n # Check if ref starts with 'origin/'\n if ref.startswith('origin/'):\n return ref\n\n # Check if ref is a branch of the origin remote\n if self.ref_exists('remotes/origin/' + ref):\n return 'origin/' + ref\n\n return ref\n\n def ref_exists(self, ref):\n code, _, _ = self.run('git', 'show-ref', ref, record_as_success=True)\n return code == 0\n\n @property\n def env(self):\n env = super(Backend, self).env\n env['GIT_DIR'] = os.path.join(self.working_dir, '.git')\n # Don't prompt for username, this requires Git 2.3+\n env['GIT_TERMINAL_PROMPT'] = '0'\n return env\n", "path": "readthedocs/vcs_support/backends/git.py"}]} | 2,696 | 300 |
gh_patches_debug_41595 | rasdani/github-patches | git_diff | aws__aws-cli-1565 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ec2 run-instances ignores count in its json input
When preparing a reservation with `aws ec2 run-instances --cli-input-json JSONBLOB` the `MinCount` and `MaxCount` parameters are ignored and overwritten by the value 1. The equivalent reservation using command line options and esp. the `--count` option functions as expected.
How to reproduce:
``` shell
jsonblob()
{
cat <<'EOF'
{
"DryRun": false,
"ImageId": "ami-XXXXXXXX",
"MinCount": 3,
"MaxCount": 3,
"SubnetId": "subnet-XXXXXXXX",
"UserData": "IyEvYmluL3NoCnNodXRkb3duIC1QICsyCg==",
"InstanceInitiatedShutdownBehavior": "terminate",
"InstanceType": "t2.small"
}
EOF
}
aws --debug ec2 run-instances --cli-input-json "$(jsonblob)"
```
(The user data is a script terminating the instance after a few minutes.)
The output contains the line
```
2015-09-23 16:49:09,762 - MainThread - botocore.endpoint - DEBUG - Making request for <botocore.model.OperationModel object at 0xXXXXXXXXXX> (verify_ssl=True) with params: {'body': {u'UserData': u'SXlFdlltbHVMM05vQ25Ob2RYUmtiM2R1SUMxUUlDc3lDZz09', u'DryRun': 'false', u'InstanceInitiatedShutdownBehavior': u'terminate', u'ImageId': u'ami-XXXXXXXX', u'SubnetId': u'subnet-XXXXXXXX', 'Version': u'2015-04-15', 'MaxCount': 1, 'Action': u'RunInstances', 'MinCount': 1, u'InstanceType': u't2.small'}, 'url': u'https://ec2.us-east-1.amazonaws.com/', 'headers': {'User-Agent': 'aws-cli/1.7.32 Python/2.7.10 Darwin/14.5.0'}, 'query_string': '', 'url_path': '/', 'method': u'POST'}
```
This demonstrates that the parameters `MaxCount` and `MinCount` in the JSON input are ignored.
---
``` console
% pip show awscli
---
Metadata-Version: 1.1
Name: awscli
Version: 1.7.32
Summary: Universal Command Line Environment for AWS.
Home-page: http://aws.amazon.com/cli/
Author: Mitch Garnaat
Author-email: [email protected]
License: Apache License 2.0
Location: /opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages
Requires: botocore, bcdoc, colorama, docutils, rsa
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/handlers.py`
Content:
```
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """Builtin CLI extensions.
14
15 This is a collection of built in CLI extensions that can be automatically
16 registered with the event system.
17
18 """
19 from awscli.argprocess import ParamShorthand
20 from awscli.argprocess import uri_param
21 from awscli.errorhandler import ErrorHandler
22 from awscli.customizations.streamingoutputarg import add_streaming_output_arg
23 from awscli.customizations.addexamples import add_examples
24 from awscli.customizations.removals import register_removals
25 from awscli.customizations.ec2addcount import ec2_add_count
26 from awscli.customizations.paginate import register_pagination
27 from awscli.customizations.ec2decryptpassword import ec2_add_priv_launch_key
28 from awscli.customizations.ec2secgroupsimplify import register_secgroup
29 from awscli.customizations.preview import register_preview_commands
30 from awscli.customizations.ec2bundleinstance import register_bundleinstance
31 from awscli.customizations.s3.s3 import s3_plugin_initialize
32 from awscli.customizations.ec2runinstances import register_runinstances
33 from awscli.customizations.rds import register_rds_modify_split
34 from awscli.customizations.putmetricdata import register_put_metric_data
35 from awscli.customizations.sessendemail import register_ses_send_email
36 from awscli.customizations.iamvirtmfa import IAMVMFAWrapper
37 from awscli.customizations.argrename import register_arg_renames
38 from awscli.customizations.configure import register_configure_cmd
39 from awscli.customizations.cloudtrail import initialize as cloudtrail_init
40 from awscli.customizations.toplevelbool import register_bool_params
41 from awscli.customizations.ec2protocolarg import register_protocol_args
42 from awscli.customizations import datapipeline
43 from awscli.customizations.globalargs import register_parse_global_args
44 from awscli.customizations.cloudsearch import initialize as cloudsearch_init
45 from awscli.customizations.emr.emr import emr_initialize
46 from awscli.customizations.cloudsearchdomain import register_cloudsearchdomain
47 from awscli.customizations.s3endpoint import register_s3_endpoint
48 from awscli.customizations.s3errormsg import register_s3_error_msg
49 from awscli.customizations.cliinputjson import register_cli_input_json
50 from awscli.customizations.generatecliskeleton import \
51 register_generate_cli_skeleton
52 from awscli.customizations.assumerole import register_assume_role_provider
53 from awscli.customizations.waiters import register_add_waiters
54 from awscli.customizations.codedeploy.codedeploy import initialize as \
55 codedeploy_init
56 from awscli.customizations.configservice.subscribe import register_subscribe
57 from awscli.customizations.configservice.getstatus import register_get_status
58 from awscli.customizations.configservice.rename_cmd import \
59 register_rename_config
60 from awscli.customizations.configservice.putconfigurationrecorder import \
61 register_modify_put_configuration_recorder
62 from awscli.customizations.scalarparse import register_scalar_parser
63 from awscli.customizations.opsworks import initialize as opsworks_init
64 from awscli.customizations.awslambda import register_lambda_create_function
65 from awscli.customizations.kms import register_fix_kms_create_grant_docs
66 from awscli.customizations.route53 import register_create_hosted_zone_doc_fix
67 from awscli.customizations.codecommit import initialize as codecommit_init
68 from awscli.customizations.iot_data import register_custom_endpoint_note
69
70
71 def awscli_initialize(event_handlers):
72 event_handlers.register('load-cli-arg', uri_param)
73 param_shorthand = ParamShorthand()
74 event_handlers.register('process-cli-arg', param_shorthand)
75 # The s3 error mesage needs to registered before the
76 # generic error handler.
77 register_s3_error_msg(event_handlers)
78 error_handler = ErrorHandler()
79 event_handlers.register('after-call', error_handler,
80 unique_id='awscli-error-handler')
81 # # The following will get fired for every option we are
82 # # documenting. It will attempt to add an example_fn on to
83 # # the parameter object if the parameter supports shorthand
84 # # syntax. The documentation event handlers will then use
85 # # the examplefn to generate the sample shorthand syntax
86 # # in the docs. Registering here should ensure that this
87 # # handler gets called first but it still feels a bit brittle.
88 # event_handlers.register('doc-option-example.*.*.*',
89 # param_shorthand.add_example_fn)
90 event_handlers.register('doc-examples.*.*',
91 add_examples)
92 register_cli_input_json(event_handlers)
93 event_handlers.register('building-argument-table.*',
94 add_streaming_output_arg)
95 event_handlers.register('building-argument-table.ec2.run-instances',
96 ec2_add_count)
97 event_handlers.register('building-argument-table.ec2.get-password-data',
98 ec2_add_priv_launch_key)
99 register_parse_global_args(event_handlers)
100 register_pagination(event_handlers)
101 register_secgroup(event_handlers)
102 register_bundleinstance(event_handlers)
103 s3_plugin_initialize(event_handlers)
104 register_runinstances(event_handlers)
105 register_removals(event_handlers)
106 register_preview_commands(event_handlers)
107 register_rds_modify_split(event_handlers)
108 register_put_metric_data(event_handlers)
109 register_ses_send_email(event_handlers)
110 IAMVMFAWrapper(event_handlers)
111 register_arg_renames(event_handlers)
112 register_configure_cmd(event_handlers)
113 cloudtrail_init(event_handlers)
114 register_bool_params(event_handlers)
115 register_protocol_args(event_handlers)
116 datapipeline.register_customizations(event_handlers)
117 cloudsearch_init(event_handlers)
118 emr_initialize(event_handlers)
119 register_cloudsearchdomain(event_handlers)
120 register_s3_endpoint(event_handlers)
121 register_generate_cli_skeleton(event_handlers)
122 register_assume_role_provider(event_handlers)
123 register_add_waiters(event_handlers)
124 codedeploy_init(event_handlers)
125 register_subscribe(event_handlers)
126 register_get_status(event_handlers)
127 register_rename_config(event_handlers)
128 register_scalar_parser(event_handlers)
129 opsworks_init(event_handlers)
130 register_lambda_create_function(event_handlers)
131 register_fix_kms_create_grant_docs(event_handlers)
132 register_create_hosted_zone_doc_fix(event_handlers)
133 register_modify_put_configuration_recorder(event_handlers)
134 codecommit_init(event_handlers)
135 register_custom_endpoint_note(event_handlers)
136
```
Path: `awscli/customizations/ec2addcount.py`
Content:
```
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import logging
14
15 from botocore import model
16
17 from awscli.arguments import BaseCLIArgument
18
19
20 logger = logging.getLogger(__name__)
21
22
23 HELP = """
24 <p>Number of instances to launch. If a single number is provided, it
25 is assumed to be the minimum to launch (defaults to 1). If a range is
26 provided in the form <code>min:max</code> then the first number is
27 interpreted as the minimum number of instances to launch and the second
28 is interpreted as the maximum number of instances to launch.</p>"""
29
30
31 def ec2_add_count(argument_table, **kwargs):
32 argument_table['count'] = CountArgument('count')
33 del argument_table['min-count']
34 del argument_table['max-count']
35
36
37 class CountArgument(BaseCLIArgument):
38
39 def __init__(self, name):
40 self.argument_model = model.Shape('CountArgument', {'type': 'string'})
41 self._name = name
42 self._required = False
43
44 @property
45 def cli_name(self):
46 return '--' + self._name
47
48 @property
49 def cli_type_name(self):
50 return 'string'
51
52 @property
53 def required(self):
54 return self._required
55
56 @required.setter
57 def required(self, value):
58 self._required = value
59
60 @property
61 def documentation(self):
62 return HELP
63
64 def add_to_parser(self, parser):
65 parser.add_argument(self.cli_name, metavar=self.py_name,
66 help='Number of instances to launch',
67 default='1')
68
69 def add_to_params(self, parameters, value):
70 try:
71 if ':' in value:
72 minstr, maxstr = value.split(':')
73 else:
74 minstr, maxstr = (value, value)
75 parameters['MinCount'] = int(minstr)
76 parameters['MaxCount'] = int(maxstr)
77 except:
78 msg = ('count parameter should be of '
79 'form min[:max] (e.g. 1 or 1:10)')
80 raise ValueError(msg)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/customizations/ec2addcount.py b/awscli/customizations/ec2addcount.py
--- a/awscli/customizations/ec2addcount.py
+++ b/awscli/customizations/ec2addcount.py
@@ -20,12 +20,20 @@
logger = logging.getLogger(__name__)
+DEFAULT = 1
HELP = """
<p>Number of instances to launch. If a single number is provided, it
-is assumed to be the minimum to launch (defaults to 1). If a range is
+is assumed to be the minimum to launch (defaults to %d). If a range is
provided in the form <code>min:max</code> then the first number is
interpreted as the minimum number of instances to launch and the second
-is interpreted as the maximum number of instances to launch.</p>"""
+is interpreted as the maximum number of instances to launch.</p>""" % DEFAULT
+
+
+def register_count_events(event_handler):
+ event_handler.register(
+ 'building-argument-table.ec2.run-instances', ec2_add_count)
+ event_handler.register(
+ 'before-parameter-build.ec2.RunInstances', set_default_count)
def ec2_add_count(argument_table, **kwargs):
@@ -34,6 +42,11 @@
del argument_table['max-count']
+def set_default_count(params, **kwargs):
+ params.setdefault('MaxCount', DEFAULT)
+ params.setdefault('MinCount', DEFAULT)
+
+
class CountArgument(BaseCLIArgument):
def __init__(self, name):
@@ -62,11 +75,14 @@
return HELP
def add_to_parser(self, parser):
+ # We do NOT set default value here. It will be set later by event hook.
parser.add_argument(self.cli_name, metavar=self.py_name,
- help='Number of instances to launch',
- default='1')
+ help='Number of instances to launch')
def add_to_params(self, parameters, value):
+ if value is None:
+ # NO-OP if value is not explicitly set by user
+ return
try:
if ':' in value:
minstr, maxstr = value.split(':')
diff --git a/awscli/handlers.py b/awscli/handlers.py
--- a/awscli/handlers.py
+++ b/awscli/handlers.py
@@ -22,7 +22,7 @@
from awscli.customizations.streamingoutputarg import add_streaming_output_arg
from awscli.customizations.addexamples import add_examples
from awscli.customizations.removals import register_removals
-from awscli.customizations.ec2addcount import ec2_add_count
+from awscli.customizations.ec2addcount import register_count_events
from awscli.customizations.paginate import register_pagination
from awscli.customizations.ec2decryptpassword import ec2_add_priv_launch_key
from awscli.customizations.ec2secgroupsimplify import register_secgroup
@@ -92,8 +92,7 @@
register_cli_input_json(event_handlers)
event_handlers.register('building-argument-table.*',
add_streaming_output_arg)
- event_handlers.register('building-argument-table.ec2.run-instances',
- ec2_add_count)
+ register_count_events(event_handlers)
event_handlers.register('building-argument-table.ec2.get-password-data',
ec2_add_priv_launch_key)
register_parse_global_args(event_handlers)
| {"golden_diff": "diff --git a/awscli/customizations/ec2addcount.py b/awscli/customizations/ec2addcount.py\n--- a/awscli/customizations/ec2addcount.py\n+++ b/awscli/customizations/ec2addcount.py\n@@ -20,12 +20,20 @@\n logger = logging.getLogger(__name__)\n \n \n+DEFAULT = 1\n HELP = \"\"\"\n <p>Number of instances to launch. If a single number is provided, it\n-is assumed to be the minimum to launch (defaults to 1). If a range is\n+is assumed to be the minimum to launch (defaults to %d). If a range is\n provided in the form <code>min:max</code> then the first number is\n interpreted as the minimum number of instances to launch and the second\n-is interpreted as the maximum number of instances to launch.</p>\"\"\"\n+is interpreted as the maximum number of instances to launch.</p>\"\"\" % DEFAULT\n+\n+\n+def register_count_events(event_handler):\n+ event_handler.register(\n+ 'building-argument-table.ec2.run-instances', ec2_add_count)\n+ event_handler.register(\n+ 'before-parameter-build.ec2.RunInstances', set_default_count)\n \n \n def ec2_add_count(argument_table, **kwargs):\n@@ -34,6 +42,11 @@\n del argument_table['max-count']\n \n \n+def set_default_count(params, **kwargs):\n+ params.setdefault('MaxCount', DEFAULT)\n+ params.setdefault('MinCount', DEFAULT)\n+\n+\n class CountArgument(BaseCLIArgument):\n \n def __init__(self, name):\n@@ -62,11 +75,14 @@\n return HELP\n \n def add_to_parser(self, parser):\n+ # We do NOT set default value here. It will be set later by event hook.\n parser.add_argument(self.cli_name, metavar=self.py_name,\n- help='Number of instances to launch',\n- default='1')\n+ help='Number of instances to launch')\n \n def add_to_params(self, parameters, value):\n+ if value is None:\n+ # NO-OP if value is not explicitly set by user\n+ return\n try:\n if ':' in value:\n minstr, maxstr = value.split(':')\ndiff --git a/awscli/handlers.py b/awscli/handlers.py\n--- a/awscli/handlers.py\n+++ b/awscli/handlers.py\n@@ -22,7 +22,7 @@\n from awscli.customizations.streamingoutputarg import add_streaming_output_arg\n from awscli.customizations.addexamples import add_examples\n from awscli.customizations.removals import register_removals\n-from awscli.customizations.ec2addcount import ec2_add_count\n+from awscli.customizations.ec2addcount import register_count_events\n from awscli.customizations.paginate import register_pagination\n from awscli.customizations.ec2decryptpassword import ec2_add_priv_launch_key\n from awscli.customizations.ec2secgroupsimplify import register_secgroup\n@@ -92,8 +92,7 @@\n register_cli_input_json(event_handlers)\n event_handlers.register('building-argument-table.*',\n add_streaming_output_arg)\n- event_handlers.register('building-argument-table.ec2.run-instances',\n- ec2_add_count)\n+ register_count_events(event_handlers)\n event_handlers.register('building-argument-table.ec2.get-password-data',\n ec2_add_priv_launch_key)\n register_parse_global_args(event_handlers)\n", "issue": "ec2 run-instances ignores count in its json input\nWhen preparing a reservation with `aws ec2 run-instances --cli-input-json JSONBLOB` the `MinCount` and `MaxCount` parameters are ignored and overwritten by the value 1. The equivalent reservation using command line options and esp. the `--count` option functions as expected.\n\nHow to reproduce:\n\n``` shell\njsonblob()\n{\n cat <<'EOF'\n{\n \"DryRun\": false,\n \"ImageId\": \"ami-XXXXXXXX\",\n \"MinCount\": 3,\n \"MaxCount\": 3,\n \"SubnetId\": \"subnet-XXXXXXXX\",\n \"UserData\": \"IyEvYmluL3NoCnNodXRkb3duIC1QICsyCg==\",\n \"InstanceInitiatedShutdownBehavior\": \"terminate\",\n \"InstanceType\": \"t2.small\"\n}\nEOF\n}\naws --debug ec2 run-instances --cli-input-json \"$(jsonblob)\"\n```\n\n(The user data is a script terminating the instance after a few minutes.)\n\nThe output contains the line\n\n```\n2015-09-23 16:49:09,762 - MainThread - botocore.endpoint - DEBUG - Making request for <botocore.model.OperationModel object at 0xXXXXXXXXXX> (verify_ssl=True) with params: {'body': {u'UserData': u'SXlFdlltbHVMM05vQ25Ob2RYUmtiM2R1SUMxUUlDc3lDZz09', u'DryRun': 'false', u'InstanceInitiatedShutdownBehavior': u'terminate', u'ImageId': u'ami-XXXXXXXX', u'SubnetId': u'subnet-XXXXXXXX', 'Version': u'2015-04-15', 'MaxCount': 1, 'Action': u'RunInstances', 'MinCount': 1, u'InstanceType': u't2.small'}, 'url': u'https://ec2.us-east-1.amazonaws.com/', 'headers': {'User-Agent': 'aws-cli/1.7.32 Python/2.7.10 Darwin/14.5.0'}, 'query_string': '', 'url_path': '/', 'method': u'POST'}\n```\n\nThis demonstrates that the parameters `MaxCount` and `MinCount` in the JSON input are ignored.\n\n---\n\n``` console\n% pip show awscli\n\n---\nMetadata-Version: 1.1\nName: awscli\nVersion: 1.7.32\nSummary: Universal Command Line Environment for AWS.\nHome-page: http://aws.amazon.com/cli/\nAuthor: Mitch Garnaat\nAuthor-email: [email protected]\nLicense: Apache License 2.0\nLocation: /opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages\nRequires: botocore, bcdoc, colorama, docutils, rsa\n```\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Builtin CLI extensions.\n\nThis is a collection of built in CLI extensions that can be automatically\nregistered with the event system.\n\n\"\"\"\nfrom awscli.argprocess import ParamShorthand\nfrom awscli.argprocess import uri_param\nfrom awscli.errorhandler import ErrorHandler\nfrom awscli.customizations.streamingoutputarg import add_streaming_output_arg\nfrom awscli.customizations.addexamples import add_examples\nfrom awscli.customizations.removals import register_removals\nfrom awscli.customizations.ec2addcount import ec2_add_count\nfrom awscli.customizations.paginate import register_pagination\nfrom awscli.customizations.ec2decryptpassword import ec2_add_priv_launch_key\nfrom awscli.customizations.ec2secgroupsimplify import register_secgroup\nfrom awscli.customizations.preview import register_preview_commands\nfrom awscli.customizations.ec2bundleinstance import register_bundleinstance\nfrom awscli.customizations.s3.s3 import s3_plugin_initialize\nfrom awscli.customizations.ec2runinstances import register_runinstances\nfrom awscli.customizations.rds import register_rds_modify_split\nfrom awscli.customizations.putmetricdata import register_put_metric_data\nfrom awscli.customizations.sessendemail import register_ses_send_email\nfrom awscli.customizations.iamvirtmfa import IAMVMFAWrapper\nfrom awscli.customizations.argrename import register_arg_renames\nfrom awscli.customizations.configure import register_configure_cmd\nfrom awscli.customizations.cloudtrail import initialize as cloudtrail_init\nfrom awscli.customizations.toplevelbool import register_bool_params\nfrom awscli.customizations.ec2protocolarg import register_protocol_args\nfrom awscli.customizations import datapipeline\nfrom awscli.customizations.globalargs import register_parse_global_args\nfrom awscli.customizations.cloudsearch import initialize as cloudsearch_init\nfrom awscli.customizations.emr.emr import emr_initialize\nfrom awscli.customizations.cloudsearchdomain import register_cloudsearchdomain\nfrom awscli.customizations.s3endpoint import register_s3_endpoint\nfrom awscli.customizations.s3errormsg import register_s3_error_msg\nfrom awscli.customizations.cliinputjson import register_cli_input_json\nfrom awscli.customizations.generatecliskeleton import \\\n register_generate_cli_skeleton\nfrom awscli.customizations.assumerole import register_assume_role_provider\nfrom awscli.customizations.waiters import register_add_waiters\nfrom awscli.customizations.codedeploy.codedeploy import initialize as \\\n codedeploy_init\nfrom awscli.customizations.configservice.subscribe import register_subscribe\nfrom awscli.customizations.configservice.getstatus import register_get_status\nfrom awscli.customizations.configservice.rename_cmd import \\\n register_rename_config\nfrom awscli.customizations.configservice.putconfigurationrecorder import \\\n register_modify_put_configuration_recorder\nfrom awscli.customizations.scalarparse import register_scalar_parser\nfrom awscli.customizations.opsworks import initialize as opsworks_init\nfrom awscli.customizations.awslambda import register_lambda_create_function\nfrom awscli.customizations.kms import register_fix_kms_create_grant_docs\nfrom awscli.customizations.route53 import register_create_hosted_zone_doc_fix\nfrom awscli.customizations.codecommit import initialize as codecommit_init\nfrom awscli.customizations.iot_data import register_custom_endpoint_note\n\n\ndef awscli_initialize(event_handlers):\n event_handlers.register('load-cli-arg', uri_param)\n param_shorthand = ParamShorthand()\n event_handlers.register('process-cli-arg', param_shorthand)\n # The s3 error mesage needs to registered before the\n # generic error handler.\n register_s3_error_msg(event_handlers)\n error_handler = ErrorHandler()\n event_handlers.register('after-call', error_handler,\n unique_id='awscli-error-handler')\n# # The following will get fired for every option we are\n# # documenting. It will attempt to add an example_fn on to\n# # the parameter object if the parameter supports shorthand\n# # syntax. The documentation event handlers will then use\n# # the examplefn to generate the sample shorthand syntax\n# # in the docs. Registering here should ensure that this\n# # handler gets called first but it still feels a bit brittle.\n# event_handlers.register('doc-option-example.*.*.*',\n# param_shorthand.add_example_fn)\n event_handlers.register('doc-examples.*.*',\n add_examples)\n register_cli_input_json(event_handlers)\n event_handlers.register('building-argument-table.*',\n add_streaming_output_arg)\n event_handlers.register('building-argument-table.ec2.run-instances',\n ec2_add_count)\n event_handlers.register('building-argument-table.ec2.get-password-data',\n ec2_add_priv_launch_key)\n register_parse_global_args(event_handlers)\n register_pagination(event_handlers)\n register_secgroup(event_handlers)\n register_bundleinstance(event_handlers)\n s3_plugin_initialize(event_handlers)\n register_runinstances(event_handlers)\n register_removals(event_handlers)\n register_preview_commands(event_handlers)\n register_rds_modify_split(event_handlers)\n register_put_metric_data(event_handlers)\n register_ses_send_email(event_handlers)\n IAMVMFAWrapper(event_handlers)\n register_arg_renames(event_handlers)\n register_configure_cmd(event_handlers)\n cloudtrail_init(event_handlers)\n register_bool_params(event_handlers)\n register_protocol_args(event_handlers)\n datapipeline.register_customizations(event_handlers)\n cloudsearch_init(event_handlers)\n emr_initialize(event_handlers)\n register_cloudsearchdomain(event_handlers)\n register_s3_endpoint(event_handlers)\n register_generate_cli_skeleton(event_handlers)\n register_assume_role_provider(event_handlers)\n register_add_waiters(event_handlers)\n codedeploy_init(event_handlers)\n register_subscribe(event_handlers)\n register_get_status(event_handlers)\n register_rename_config(event_handlers)\n register_scalar_parser(event_handlers)\n opsworks_init(event_handlers)\n register_lambda_create_function(event_handlers)\n register_fix_kms_create_grant_docs(event_handlers)\n register_create_hosted_zone_doc_fix(event_handlers)\n register_modify_put_configuration_recorder(event_handlers)\n codecommit_init(event_handlers)\n register_custom_endpoint_note(event_handlers)\n", "path": "awscli/handlers.py"}, {"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport logging\n\nfrom botocore import model\n\nfrom awscli.arguments import BaseCLIArgument\n\n\nlogger = logging.getLogger(__name__)\n\n\nHELP = \"\"\"\n<p>Number of instances to launch. If a single number is provided, it\nis assumed to be the minimum to launch (defaults to 1). If a range is\nprovided in the form <code>min:max</code> then the first number is\ninterpreted as the minimum number of instances to launch and the second\nis interpreted as the maximum number of instances to launch.</p>\"\"\"\n\n\ndef ec2_add_count(argument_table, **kwargs):\n argument_table['count'] = CountArgument('count')\n del argument_table['min-count']\n del argument_table['max-count']\n\n\nclass CountArgument(BaseCLIArgument):\n\n def __init__(self, name):\n self.argument_model = model.Shape('CountArgument', {'type': 'string'})\n self._name = name\n self._required = False\n\n @property\n def cli_name(self):\n return '--' + self._name\n\n @property\n def cli_type_name(self):\n return 'string'\n\n @property\n def required(self):\n return self._required\n\n @required.setter\n def required(self, value):\n self._required = value\n\n @property\n def documentation(self):\n return HELP\n\n def add_to_parser(self, parser):\n parser.add_argument(self.cli_name, metavar=self.py_name,\n help='Number of instances to launch',\n default='1')\n\n def add_to_params(self, parameters, value):\n try:\n if ':' in value:\n minstr, maxstr = value.split(':')\n else:\n minstr, maxstr = (value, value)\n parameters['MinCount'] = int(minstr)\n parameters['MaxCount'] = int(maxstr)\n except:\n msg = ('count parameter should be of '\n 'form min[:max] (e.g. 1 or 1:10)')\n raise ValueError(msg)\n", "path": "awscli/customizations/ec2addcount.py"}], "after_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Builtin CLI extensions.\n\nThis is a collection of built in CLI extensions that can be automatically\nregistered with the event system.\n\n\"\"\"\nfrom awscli.argprocess import ParamShorthand\nfrom awscli.argprocess import uri_param\nfrom awscli.errorhandler import ErrorHandler\nfrom awscli.customizations.streamingoutputarg import add_streaming_output_arg\nfrom awscli.customizations.addexamples import add_examples\nfrom awscli.customizations.removals import register_removals\nfrom awscli.customizations.ec2addcount import register_count_events\nfrom awscli.customizations.paginate import register_pagination\nfrom awscli.customizations.ec2decryptpassword import ec2_add_priv_launch_key\nfrom awscli.customizations.ec2secgroupsimplify import register_secgroup\nfrom awscli.customizations.preview import register_preview_commands\nfrom awscli.customizations.ec2bundleinstance import register_bundleinstance\nfrom awscli.customizations.s3.s3 import s3_plugin_initialize\nfrom awscli.customizations.ec2runinstances import register_runinstances\nfrom awscli.customizations.rds import register_rds_modify_split\nfrom awscli.customizations.putmetricdata import register_put_metric_data\nfrom awscli.customizations.sessendemail import register_ses_send_email\nfrom awscli.customizations.iamvirtmfa import IAMVMFAWrapper\nfrom awscli.customizations.argrename import register_arg_renames\nfrom awscli.customizations.configure import register_configure_cmd\nfrom awscli.customizations.cloudtrail import initialize as cloudtrail_init\nfrom awscli.customizations.toplevelbool import register_bool_params\nfrom awscli.customizations.ec2protocolarg import register_protocol_args\nfrom awscli.customizations import datapipeline\nfrom awscli.customizations.globalargs import register_parse_global_args\nfrom awscli.customizations.cloudsearch import initialize as cloudsearch_init\nfrom awscli.customizations.emr.emr import emr_initialize\nfrom awscli.customizations.cloudsearchdomain import register_cloudsearchdomain\nfrom awscli.customizations.s3endpoint import register_s3_endpoint\nfrom awscli.customizations.s3errormsg import register_s3_error_msg\nfrom awscli.customizations.cliinputjson import register_cli_input_json\nfrom awscli.customizations.generatecliskeleton import \\\n register_generate_cli_skeleton\nfrom awscli.customizations.assumerole import register_assume_role_provider\nfrom awscli.customizations.waiters import register_add_waiters\nfrom awscli.customizations.codedeploy.codedeploy import initialize as \\\n codedeploy_init\nfrom awscli.customizations.configservice.subscribe import register_subscribe\nfrom awscli.customizations.configservice.getstatus import register_get_status\nfrom awscli.customizations.configservice.rename_cmd import \\\n register_rename_config\nfrom awscli.customizations.configservice.putconfigurationrecorder import \\\n register_modify_put_configuration_recorder\nfrom awscli.customizations.scalarparse import register_scalar_parser\nfrom awscli.customizations.opsworks import initialize as opsworks_init\nfrom awscli.customizations.awslambda import register_lambda_create_function\nfrom awscli.customizations.kms import register_fix_kms_create_grant_docs\nfrom awscli.customizations.route53 import register_create_hosted_zone_doc_fix\nfrom awscli.customizations.codecommit import initialize as codecommit_init\nfrom awscli.customizations.iot_data import register_custom_endpoint_note\n\n\ndef awscli_initialize(event_handlers):\n event_handlers.register('load-cli-arg', uri_param)\n param_shorthand = ParamShorthand()\n event_handlers.register('process-cli-arg', param_shorthand)\n # The s3 error mesage needs to registered before the\n # generic error handler.\n register_s3_error_msg(event_handlers)\n error_handler = ErrorHandler()\n event_handlers.register('after-call', error_handler,\n unique_id='awscli-error-handler')\n# # The following will get fired for every option we are\n# # documenting. It will attempt to add an example_fn on to\n# # the parameter object if the parameter supports shorthand\n# # syntax. The documentation event handlers will then use\n# # the examplefn to generate the sample shorthand syntax\n# # in the docs. Registering here should ensure that this\n# # handler gets called first but it still feels a bit brittle.\n# event_handlers.register('doc-option-example.*.*.*',\n# param_shorthand.add_example_fn)\n event_handlers.register('doc-examples.*.*',\n add_examples)\n register_cli_input_json(event_handlers)\n event_handlers.register('building-argument-table.*',\n add_streaming_output_arg)\n register_count_events(event_handlers)\n event_handlers.register('building-argument-table.ec2.get-password-data',\n ec2_add_priv_launch_key)\n register_parse_global_args(event_handlers)\n register_pagination(event_handlers)\n register_secgroup(event_handlers)\n register_bundleinstance(event_handlers)\n s3_plugin_initialize(event_handlers)\n register_runinstances(event_handlers)\n register_removals(event_handlers)\n register_preview_commands(event_handlers)\n register_rds_modify_split(event_handlers)\n register_put_metric_data(event_handlers)\n register_ses_send_email(event_handlers)\n IAMVMFAWrapper(event_handlers)\n register_arg_renames(event_handlers)\n register_configure_cmd(event_handlers)\n cloudtrail_init(event_handlers)\n register_bool_params(event_handlers)\n register_protocol_args(event_handlers)\n datapipeline.register_customizations(event_handlers)\n cloudsearch_init(event_handlers)\n emr_initialize(event_handlers)\n register_cloudsearchdomain(event_handlers)\n register_s3_endpoint(event_handlers)\n register_generate_cli_skeleton(event_handlers)\n register_assume_role_provider(event_handlers)\n register_add_waiters(event_handlers)\n codedeploy_init(event_handlers)\n register_subscribe(event_handlers)\n register_get_status(event_handlers)\n register_rename_config(event_handlers)\n register_scalar_parser(event_handlers)\n opsworks_init(event_handlers)\n register_lambda_create_function(event_handlers)\n register_fix_kms_create_grant_docs(event_handlers)\n register_create_hosted_zone_doc_fix(event_handlers)\n register_modify_put_configuration_recorder(event_handlers)\n codecommit_init(event_handlers)\n register_custom_endpoint_note(event_handlers)\n", "path": "awscli/handlers.py"}, {"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport logging\n\nfrom botocore import model\n\nfrom awscli.arguments import BaseCLIArgument\n\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT = 1\nHELP = \"\"\"\n<p>Number of instances to launch. If a single number is provided, it\nis assumed to be the minimum to launch (defaults to %d). If a range is\nprovided in the form <code>min:max</code> then the first number is\ninterpreted as the minimum number of instances to launch and the second\nis interpreted as the maximum number of instances to launch.</p>\"\"\" % DEFAULT\n\n\ndef register_count_events(event_handler):\n event_handler.register(\n 'building-argument-table.ec2.run-instances', ec2_add_count)\n event_handler.register(\n 'before-parameter-build.ec2.RunInstances', set_default_count)\n\n\ndef ec2_add_count(argument_table, **kwargs):\n argument_table['count'] = CountArgument('count')\n del argument_table['min-count']\n del argument_table['max-count']\n\n\ndef set_default_count(params, **kwargs):\n params.setdefault('MaxCount', DEFAULT)\n params.setdefault('MinCount', DEFAULT)\n\n\nclass CountArgument(BaseCLIArgument):\n\n def __init__(self, name):\n self.argument_model = model.Shape('CountArgument', {'type': 'string'})\n self._name = name\n self._required = False\n\n @property\n def cli_name(self):\n return '--' + self._name\n\n @property\n def cli_type_name(self):\n return 'string'\n\n @property\n def required(self):\n return self._required\n\n @required.setter\n def required(self, value):\n self._required = value\n\n @property\n def documentation(self):\n return HELP\n\n def add_to_parser(self, parser):\n # We do NOT set default value here. It will be set later by event hook.\n parser.add_argument(self.cli_name, metavar=self.py_name,\n help='Number of instances to launch')\n\n def add_to_params(self, parameters, value):\n if value is None:\n # NO-OP if value is not explicitly set by user\n return\n try:\n if ':' in value:\n minstr, maxstr = value.split(':')\n else:\n minstr, maxstr = (value, value)\n parameters['MinCount'] = int(minstr)\n parameters['MaxCount'] = int(maxstr)\n except:\n msg = ('count parameter should be of '\n 'form min[:max] (e.g. 1 or 1:10)')\n raise ValueError(msg)\n", "path": "awscli/customizations/ec2addcount.py"}]} | 3,361 | 744 |
gh_patches_debug_32917 | rasdani/github-patches | git_diff | plotly__plotly.py-1981 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] zmin and zmax not working in imshow for 2d data
See for example
```
import plotly.express as px
import numpy as np
img = np.random.random((10, 10))
px.imshow(img, zmin=0.3, zmax=0.7)
```
I think this is because the Heatmap trace uses instead the `range_color` argument which was quite introduced quite late in the process. It's easy to fix, we just have to decide first if the API is ok as it is.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/python/plotly/plotly/express/_imshow.py`
Content:
```
1 import plotly.graph_objs as go
2 from _plotly_utils.basevalidators import ColorscaleValidator
3 from ._core import apply_default_cascade
4 import numpy as np # is it fine to depend on np here?
5
6 _float_types = []
7
8 # Adapted from skimage.util.dtype
9 _integer_types = (
10 np.byte,
11 np.ubyte, # 8 bits
12 np.short,
13 np.ushort, # 16 bits
14 np.intc,
15 np.uintc, # 16 or 32 or 64 bits
16 np.int_,
17 np.uint, # 32 or 64 bits
18 np.longlong,
19 np.ulonglong,
20 ) # 64 bits
21 _integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max) for t in _integer_types}
22
23
24 def _vectorize_zvalue(z):
25 if z is None:
26 return z
27 elif np.isscalar(z):
28 return [z] * 3 + [1]
29 elif len(z) == 1:
30 return list(z) * 3 + [1]
31 elif len(z) == 3:
32 return list(z) + [1]
33 elif len(z) == 4:
34 return z
35 else:
36 raise ValueError(
37 "zmax can be a scalar, or an iterable of length 1, 3 or 4. "
38 "A value of %s was passed for zmax." % str(z)
39 )
40
41
42 def _infer_zmax_from_type(img):
43 dt = img.dtype.type
44 rtol = 1.05
45 if dt in _integer_types:
46 return _integer_ranges[dt][1]
47 else:
48 im_max = img[np.isfinite(img)].max()
49 if im_max <= 1 * rtol:
50 return 1
51 elif im_max <= 255 * rtol:
52 return 255
53 elif im_max <= 65535 * rtol:
54 return 65535
55 else:
56 return 2 ** 32
57
58
59 def imshow(
60 img,
61 zmin=None,
62 zmax=None,
63 origin=None,
64 color_continuous_scale=None,
65 color_continuous_midpoint=None,
66 range_color=None,
67 title=None,
68 template=None,
69 width=None,
70 height=None,
71 ):
72 """
73 Display an image, i.e. data on a 2D regular raster.
74
75 Parameters
76 ----------
77
78 img: array-like image
79 The image data. Supported array shapes are
80
81 - (M, N): an image with scalar data. The data is visualized
82 using a colormap.
83 - (M, N, 3): an image with RGB values.
84 - (M, N, 4): an image with RGBA values, i.e. including transparency.
85
86 zmin, zmax : scalar or iterable, optional
87 zmin and zmax define the scalar range that the colormap covers. By default,
88 zmin and zmax correspond to the min and max values of the datatype for integer
89 datatypes (ie [0-255] for uint8 images, [0, 65535] for uint16 images, etc.). For
90 a multichannel image of floats, the max of the image is computed and zmax is the
91 smallest power of 256 (1, 255, 65535) greater than this max value,
92 with a 5% tolerance. For a single-channel image, the max of the image is used.
93
94 origin : str, 'upper' or 'lower' (default 'upper')
95 position of the [0, 0] pixel of the image array, in the upper left or lower left
96 corner. The convention 'upper' is typically used for matrices and images.
97
98 color_continuous_scale : str or list of str
99 colormap used to map scalar data to colors (for a 2D image). This parameter is
100 not used for RGB or RGBA images. If a string is provided, it should be the name
101 of a known color scale, and if a list is provided, it should be a list of CSS-
102 compatible colors.
103
104 color_continuous_midpoint : number
105 If set, computes the bounds of the continuous color scale to have the desired
106 midpoint.
107
108 range_color : list of two numbers
109 If provided, overrides auto-scaling on the continuous color scale, including
110 overriding `color_continuous_midpoint`.
111
112 title : str
113 The figure title.
114
115 template : str or dict or plotly.graph_objects.layout.Template instance
116 The figure template name or definition.
117
118 width : number
119 The figure width in pixels.
120
121 height: number
122 The figure height in pixels, defaults to 600.
123
124 Returns
125 -------
126 fig : graph_objects.Figure containing the displayed image
127
128 See also
129 --------
130
131 plotly.graph_objects.Image : image trace
132 plotly.graph_objects.Heatmap : heatmap trace
133
134 Notes
135 -----
136
137 In order to update and customize the returned figure, use
138 `go.Figure.update_traces` or `go.Figure.update_layout`.
139 """
140 args = locals()
141 apply_default_cascade(args)
142
143 img = np.asanyarray(img)
144 # Cast bools to uint8 (also one byte)
145 if img.dtype == np.bool:
146 img = 255 * img.astype(np.uint8)
147
148 # For 2d data, use Heatmap trace
149 if img.ndim == 2:
150 trace = go.Heatmap(z=img, zmin=zmin, zmax=zmax, coloraxis="coloraxis1")
151 autorange = True if origin == "lower" else "reversed"
152 layout = dict(
153 xaxis=dict(scaleanchor="y", constrain="domain"),
154 yaxis=dict(autorange=autorange, constrain="domain"),
155 )
156 colorscale_validator = ColorscaleValidator("colorscale", "imshow")
157 range_color = range_color or [None, None]
158 layout["coloraxis1"] = dict(
159 colorscale=colorscale_validator.validate_coerce(
160 args["color_continuous_scale"]
161 ),
162 cmid=color_continuous_midpoint,
163 cmin=range_color[0],
164 cmax=range_color[1],
165 )
166
167 # For 2D+RGB data, use Image trace
168 elif img.ndim == 3 and img.shape[-1] in [3, 4]:
169 if zmax is None and img.dtype is not np.uint8:
170 zmax = _infer_zmax_from_type(img)
171 zmin, zmax = _vectorize_zvalue(zmin), _vectorize_zvalue(zmax)
172 trace = go.Image(z=img, zmin=zmin, zmax=zmax)
173 layout = {}
174 if origin == "lower":
175 layout["yaxis"] = dict(autorange=True)
176 else:
177 raise ValueError(
178 "px.imshow only accepts 2D single-channel, RGB or RGBA images. "
179 "An image of shape %s was provided" % str(img.shape)
180 )
181
182 layout_patch = dict()
183 for v in ["title", "height", "width"]:
184 if args[v]:
185 layout_patch[v] = args[v]
186 if "title" not in layout_patch and args["template"].layout.margin.t is None:
187 layout_patch["margin"] = {"t": 60}
188 fig = go.Figure(data=trace, layout=layout)
189 fig.update_layout(layout_patch)
190 fig.update_layout(template=args["template"], overwrite=True)
191 return fig
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/python/plotly/plotly/express/_imshow.py b/packages/python/plotly/plotly/express/_imshow.py
--- a/packages/python/plotly/plotly/express/_imshow.py
+++ b/packages/python/plotly/plotly/express/_imshow.py
@@ -1,7 +1,7 @@
import plotly.graph_objs as go
from _plotly_utils.basevalidators import ColorscaleValidator
from ._core import apply_default_cascade
-import numpy as np # is it fine to depend on np here?
+import numpy as np
_float_types = []
@@ -107,7 +107,8 @@
range_color : list of two numbers
If provided, overrides auto-scaling on the continuous color scale, including
- overriding `color_continuous_midpoint`.
+ overriding `color_continuous_midpoint`. Also overrides zmin and zmax. Used only
+ for single-channel images.
title : str
The figure title.
@@ -147,14 +148,18 @@
# For 2d data, use Heatmap trace
if img.ndim == 2:
- trace = go.Heatmap(z=img, zmin=zmin, zmax=zmax, coloraxis="coloraxis1")
+ trace = go.Heatmap(z=img, coloraxis="coloraxis1")
autorange = True if origin == "lower" else "reversed"
layout = dict(
xaxis=dict(scaleanchor="y", constrain="domain"),
yaxis=dict(autorange=autorange, constrain="domain"),
)
colorscale_validator = ColorscaleValidator("colorscale", "imshow")
- range_color = range_color or [None, None]
+ if zmin is not None and zmax is None:
+ zmax = img.max()
+ if zmax is not None and zmin is None:
+ zmin = img.min()
+ range_color = range_color or [zmin, zmax]
layout["coloraxis1"] = dict(
colorscale=colorscale_validator.validate_coerce(
args["color_continuous_scale"]
| {"golden_diff": "diff --git a/packages/python/plotly/plotly/express/_imshow.py b/packages/python/plotly/plotly/express/_imshow.py\n--- a/packages/python/plotly/plotly/express/_imshow.py\n+++ b/packages/python/plotly/plotly/express/_imshow.py\n@@ -1,7 +1,7 @@\n import plotly.graph_objs as go\n from _plotly_utils.basevalidators import ColorscaleValidator\n from ._core import apply_default_cascade\n-import numpy as np # is it fine to depend on np here?\n+import numpy as np\n \n _float_types = []\n \n@@ -107,7 +107,8 @@\n \n range_color : list of two numbers\n If provided, overrides auto-scaling on the continuous color scale, including\n- overriding `color_continuous_midpoint`.\n+ overriding `color_continuous_midpoint`. Also overrides zmin and zmax. Used only\n+ for single-channel images.\n \n title : str\n The figure title.\n@@ -147,14 +148,18 @@\n \n # For 2d data, use Heatmap trace\n if img.ndim == 2:\n- trace = go.Heatmap(z=img, zmin=zmin, zmax=zmax, coloraxis=\"coloraxis1\")\n+ trace = go.Heatmap(z=img, coloraxis=\"coloraxis1\")\n autorange = True if origin == \"lower\" else \"reversed\"\n layout = dict(\n xaxis=dict(scaleanchor=\"y\", constrain=\"domain\"),\n yaxis=dict(autorange=autorange, constrain=\"domain\"),\n )\n colorscale_validator = ColorscaleValidator(\"colorscale\", \"imshow\")\n- range_color = range_color or [None, None]\n+ if zmin is not None and zmax is None:\n+ zmax = img.max()\n+ if zmax is not None and zmin is None:\n+ zmin = img.min()\n+ range_color = range_color or [zmin, zmax]\n layout[\"coloraxis1\"] = dict(\n colorscale=colorscale_validator.validate_coerce(\n args[\"color_continuous_scale\"]\n", "issue": "[BUG] zmin and zmax not working in imshow for 2d data\nSee for example\r\n```\r\nimport plotly.express as px\r\nimport numpy as np\r\nimg = np.random.random((10, 10))\r\npx.imshow(img, zmin=0.3, zmax=0.7)\r\n```\r\nI think this is because the Heatmap trace uses instead the `range_color` argument which was quite introduced quite late in the process. It's easy to fix, we just have to decide first if the API is ok as it is. \n", "before_files": [{"content": "import plotly.graph_objs as go\nfrom _plotly_utils.basevalidators import ColorscaleValidator\nfrom ._core import apply_default_cascade\nimport numpy as np # is it fine to depend on np here?\n\n_float_types = []\n\n# Adapted from skimage.util.dtype\n_integer_types = (\n np.byte,\n np.ubyte, # 8 bits\n np.short,\n np.ushort, # 16 bits\n np.intc,\n np.uintc, # 16 or 32 or 64 bits\n np.int_,\n np.uint, # 32 or 64 bits\n np.longlong,\n np.ulonglong,\n) # 64 bits\n_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max) for t in _integer_types}\n\n\ndef _vectorize_zvalue(z):\n if z is None:\n return z\n elif np.isscalar(z):\n return [z] * 3 + [1]\n elif len(z) == 1:\n return list(z) * 3 + [1]\n elif len(z) == 3:\n return list(z) + [1]\n elif len(z) == 4:\n return z\n else:\n raise ValueError(\n \"zmax can be a scalar, or an iterable of length 1, 3 or 4. \"\n \"A value of %s was passed for zmax.\" % str(z)\n )\n\n\ndef _infer_zmax_from_type(img):\n dt = img.dtype.type\n rtol = 1.05\n if dt in _integer_types:\n return _integer_ranges[dt][1]\n else:\n im_max = img[np.isfinite(img)].max()\n if im_max <= 1 * rtol:\n return 1\n elif im_max <= 255 * rtol:\n return 255\n elif im_max <= 65535 * rtol:\n return 65535\n else:\n return 2 ** 32\n\n\ndef imshow(\n img,\n zmin=None,\n zmax=None,\n origin=None,\n color_continuous_scale=None,\n color_continuous_midpoint=None,\n range_color=None,\n title=None,\n template=None,\n width=None,\n height=None,\n):\n \"\"\"\n Display an image, i.e. data on a 2D regular raster.\n\n Parameters\n ----------\n\n img: array-like image\n The image data. Supported array shapes are\n\n - (M, N): an image with scalar data. The data is visualized\n using a colormap.\n - (M, N, 3): an image with RGB values.\n - (M, N, 4): an image with RGBA values, i.e. including transparency.\n\n zmin, zmax : scalar or iterable, optional\n zmin and zmax define the scalar range that the colormap covers. By default,\n zmin and zmax correspond to the min and max values of the datatype for integer\n datatypes (ie [0-255] for uint8 images, [0, 65535] for uint16 images, etc.). For\n a multichannel image of floats, the max of the image is computed and zmax is the\n smallest power of 256 (1, 255, 65535) greater than this max value,\n with a 5% tolerance. For a single-channel image, the max of the image is used.\n\n origin : str, 'upper' or 'lower' (default 'upper')\n position of the [0, 0] pixel of the image array, in the upper left or lower left\n corner. The convention 'upper' is typically used for matrices and images.\n\n color_continuous_scale : str or list of str\n colormap used to map scalar data to colors (for a 2D image). This parameter is\n not used for RGB or RGBA images. If a string is provided, it should be the name\n of a known color scale, and if a list is provided, it should be a list of CSS-\n compatible colors.\n\n color_continuous_midpoint : number\n If set, computes the bounds of the continuous color scale to have the desired\n midpoint.\n\n range_color : list of two numbers\n If provided, overrides auto-scaling on the continuous color scale, including\n overriding `color_continuous_midpoint`.\n\n title : str\n The figure title.\n\n template : str or dict or plotly.graph_objects.layout.Template instance\n The figure template name or definition.\n\n width : number\n The figure width in pixels.\n\n height: number\n The figure height in pixels, defaults to 600.\n\n Returns\n -------\n fig : graph_objects.Figure containing the displayed image\n\n See also\n --------\n\n plotly.graph_objects.Image : image trace\n plotly.graph_objects.Heatmap : heatmap trace\n\n Notes\n -----\n\n In order to update and customize the returned figure, use\n `go.Figure.update_traces` or `go.Figure.update_layout`.\n \"\"\"\n args = locals()\n apply_default_cascade(args)\n\n img = np.asanyarray(img)\n # Cast bools to uint8 (also one byte)\n if img.dtype == np.bool:\n img = 255 * img.astype(np.uint8)\n\n # For 2d data, use Heatmap trace\n if img.ndim == 2:\n trace = go.Heatmap(z=img, zmin=zmin, zmax=zmax, coloraxis=\"coloraxis1\")\n autorange = True if origin == \"lower\" else \"reversed\"\n layout = dict(\n xaxis=dict(scaleanchor=\"y\", constrain=\"domain\"),\n yaxis=dict(autorange=autorange, constrain=\"domain\"),\n )\n colorscale_validator = ColorscaleValidator(\"colorscale\", \"imshow\")\n range_color = range_color or [None, None]\n layout[\"coloraxis1\"] = dict(\n colorscale=colorscale_validator.validate_coerce(\n args[\"color_continuous_scale\"]\n ),\n cmid=color_continuous_midpoint,\n cmin=range_color[0],\n cmax=range_color[1],\n )\n\n # For 2D+RGB data, use Image trace\n elif img.ndim == 3 and img.shape[-1] in [3, 4]:\n if zmax is None and img.dtype is not np.uint8:\n zmax = _infer_zmax_from_type(img)\n zmin, zmax = _vectorize_zvalue(zmin), _vectorize_zvalue(zmax)\n trace = go.Image(z=img, zmin=zmin, zmax=zmax)\n layout = {}\n if origin == \"lower\":\n layout[\"yaxis\"] = dict(autorange=True)\n else:\n raise ValueError(\n \"px.imshow only accepts 2D single-channel, RGB or RGBA images. \"\n \"An image of shape %s was provided\" % str(img.shape)\n )\n\n layout_patch = dict()\n for v in [\"title\", \"height\", \"width\"]:\n if args[v]:\n layout_patch[v] = args[v]\n if \"title\" not in layout_patch and args[\"template\"].layout.margin.t is None:\n layout_patch[\"margin\"] = {\"t\": 60}\n fig = go.Figure(data=trace, layout=layout)\n fig.update_layout(layout_patch)\n fig.update_layout(template=args[\"template\"], overwrite=True)\n return fig\n", "path": "packages/python/plotly/plotly/express/_imshow.py"}], "after_files": [{"content": "import plotly.graph_objs as go\nfrom _plotly_utils.basevalidators import ColorscaleValidator\nfrom ._core import apply_default_cascade\nimport numpy as np\n\n_float_types = []\n\n# Adapted from skimage.util.dtype\n_integer_types = (\n np.byte,\n np.ubyte, # 8 bits\n np.short,\n np.ushort, # 16 bits\n np.intc,\n np.uintc, # 16 or 32 or 64 bits\n np.int_,\n np.uint, # 32 or 64 bits\n np.longlong,\n np.ulonglong,\n) # 64 bits\n_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max) for t in _integer_types}\n\n\ndef _vectorize_zvalue(z):\n if z is None:\n return z\n elif np.isscalar(z):\n return [z] * 3 + [1]\n elif len(z) == 1:\n return list(z) * 3 + [1]\n elif len(z) == 3:\n return list(z) + [1]\n elif len(z) == 4:\n return z\n else:\n raise ValueError(\n \"zmax can be a scalar, or an iterable of length 1, 3 or 4. \"\n \"A value of %s was passed for zmax.\" % str(z)\n )\n\n\ndef _infer_zmax_from_type(img):\n dt = img.dtype.type\n rtol = 1.05\n if dt in _integer_types:\n return _integer_ranges[dt][1]\n else:\n im_max = img[np.isfinite(img)].max()\n if im_max <= 1 * rtol:\n return 1\n elif im_max <= 255 * rtol:\n return 255\n elif im_max <= 65535 * rtol:\n return 65535\n else:\n return 2 ** 32\n\n\ndef imshow(\n img,\n zmin=None,\n zmax=None,\n origin=None,\n color_continuous_scale=None,\n color_continuous_midpoint=None,\n range_color=None,\n title=None,\n template=None,\n width=None,\n height=None,\n):\n \"\"\"\n Display an image, i.e. data on a 2D regular raster.\n\n Parameters\n ----------\n\n img: array-like image\n The image data. Supported array shapes are\n\n - (M, N): an image with scalar data. The data is visualized\n using a colormap.\n - (M, N, 3): an image with RGB values.\n - (M, N, 4): an image with RGBA values, i.e. including transparency.\n\n zmin, zmax : scalar or iterable, optional\n zmin and zmax define the scalar range that the colormap covers. By default,\n zmin and zmax correspond to the min and max values of the datatype for integer\n datatypes (ie [0-255] for uint8 images, [0, 65535] for uint16 images, etc.). For\n a multichannel image of floats, the max of the image is computed and zmax is the\n smallest power of 256 (1, 255, 65535) greater than this max value,\n with a 5% tolerance. For a single-channel image, the max of the image is used.\n\n origin : str, 'upper' or 'lower' (default 'upper')\n position of the [0, 0] pixel of the image array, in the upper left or lower left\n corner. The convention 'upper' is typically used for matrices and images.\n\n color_continuous_scale : str or list of str\n colormap used to map scalar data to colors (for a 2D image). This parameter is\n not used for RGB or RGBA images. If a string is provided, it should be the name\n of a known color scale, and if a list is provided, it should be a list of CSS-\n compatible colors.\n\n color_continuous_midpoint : number\n If set, computes the bounds of the continuous color scale to have the desired\n midpoint.\n\n range_color : list of two numbers\n If provided, overrides auto-scaling on the continuous color scale, including\n overriding `color_continuous_midpoint`. Also overrides zmin and zmax. Used only\n for single-channel images.\n\n title : str\n The figure title.\n\n template : str or dict or plotly.graph_objects.layout.Template instance\n The figure template name or definition.\n\n width : number\n The figure width in pixels.\n\n height: number\n The figure height in pixels, defaults to 600.\n\n Returns\n -------\n fig : graph_objects.Figure containing the displayed image\n\n See also\n --------\n\n plotly.graph_objects.Image : image trace\n plotly.graph_objects.Heatmap : heatmap trace\n\n Notes\n -----\n\n In order to update and customize the returned figure, use\n `go.Figure.update_traces` or `go.Figure.update_layout`.\n \"\"\"\n args = locals()\n apply_default_cascade(args)\n\n img = np.asanyarray(img)\n # Cast bools to uint8 (also one byte)\n if img.dtype == np.bool:\n img = 255 * img.astype(np.uint8)\n\n # For 2d data, use Heatmap trace\n if img.ndim == 2:\n trace = go.Heatmap(z=img, coloraxis=\"coloraxis1\")\n autorange = True if origin == \"lower\" else \"reversed\"\n layout = dict(\n xaxis=dict(scaleanchor=\"y\", constrain=\"domain\"),\n yaxis=dict(autorange=autorange, constrain=\"domain\"),\n )\n colorscale_validator = ColorscaleValidator(\"colorscale\", \"imshow\")\n if zmin is not None and zmax is None:\n zmax = img.max()\n if zmax is not None and zmin is None:\n zmin = img.min()\n range_color = range_color or [zmin, zmax]\n layout[\"coloraxis1\"] = dict(\n colorscale=colorscale_validator.validate_coerce(\n args[\"color_continuous_scale\"]\n ),\n cmid=color_continuous_midpoint,\n cmin=range_color[0],\n cmax=range_color[1],\n )\n\n # For 2D+RGB data, use Image trace\n elif img.ndim == 3 and img.shape[-1] in [3, 4]:\n if zmax is None and img.dtype is not np.uint8:\n zmax = _infer_zmax_from_type(img)\n zmin, zmax = _vectorize_zvalue(zmin), _vectorize_zvalue(zmax)\n trace = go.Image(z=img, zmin=zmin, zmax=zmax)\n layout = {}\n if origin == \"lower\":\n layout[\"yaxis\"] = dict(autorange=True)\n else:\n raise ValueError(\n \"px.imshow only accepts 2D single-channel, RGB or RGBA images. \"\n \"An image of shape %s was provided\" % str(img.shape)\n )\n\n layout_patch = dict()\n for v in [\"title\", \"height\", \"width\"]:\n if args[v]:\n layout_patch[v] = args[v]\n if \"title\" not in layout_patch and args[\"template\"].layout.margin.t is None:\n layout_patch[\"margin\"] = {\"t\": 60}\n fig = go.Figure(data=trace, layout=layout)\n fig.update_layout(layout_patch)\n fig.update_layout(template=args[\"template\"], overwrite=True)\n return fig\n", "path": "packages/python/plotly/plotly/express/_imshow.py"}]} | 2,512 | 466 |
gh_patches_debug_43324 | rasdani/github-patches | git_diff | gratipay__gratipay.com-2531 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update receiving amounts when updating last_bill_result
Should fix #2495.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gittip/billing/__init__.py`
Content:
```
1 """This module encapsulates billing logic and db access.
2
3 There are three pieces of information for each participant related to billing:
4
5 balanced_customer_href
6 * NULL - This participant has never been billed.
7 * 'deadbeef' - This participant has had a Balanced account created for
8 them, either by adding a credit card or a bank account.
9 last_bill_result
10 * NULL - This participant has not had their credit card charged yet.
11 * '' - This participant has a working card.
12 * <message> - An error message.
13 last_ach_result
14 * NULL - This participant has not wired up a bank account yet.
15 * '' - This participant has a working bank account.
16 * <message> - An error message.
17
18 """
19 from __future__ import unicode_literals
20
21 import balanced
22 from aspen.utils import typecheck
23
24
25 def update_receiving_amounts(db, tipper, error):
26 """Update receiving amounts of participants tipped by given tipper.
27 """
28 from gittip.models.participant import Participant
29 tipper = Participant.from_username(tipper)
30 if tipper.is_suspicious or tipper.last_bill_result == error:
31 return
32 if error == '':
33 op = '+'
34 else:
35 op = '-'
36 db.run("""
37 UPDATE participants
38 SET receiving = (receiving {0} amount)
39 FROM ( SELECT DISTINCT ON (tippee) tippee, amount
40 FROM tips
41 WHERE tipper=%(tipper)s
42 ORDER BY tippee, mtime DESC
43 ) foo
44 WHERE tippee = username;
45 """.format(op), dict(tipper=tipper))
46
47
48 def get_balanced_account(db, username, balanced_customer_href):
49 """Find or create a balanced.Account.
50 """
51 typecheck( username, unicode
52 , balanced_customer_href, (unicode, None)
53 )
54
55 if balanced_customer_href is None:
56 customer = balanced.Customer(meta={
57 'username': username,
58 }).save()
59 BALANCED_ACCOUNT = """\
60
61 UPDATE participants
62 SET balanced_customer_href=%s
63 WHERE username=%s
64
65 """
66 db.run(BALANCED_ACCOUNT, (customer.href, username))
67 else:
68 customer = balanced.Customer.fetch(balanced_customer_href)
69 return customer
70
71
72 def associate(db, thing, username, balanced_customer_href, balanced_thing_uri):
73 """Given four unicodes, return a unicode.
74
75 This function attempts to associate the credit card or bank account details
76 referenced by balanced_thing_uri with a Balanced Account. If it fails we
77 log and return a unicode describing the failure. Even for failure we keep
78 balanced_customer_href; we don't reset it to None/NULL. It's useful for
79 loading the previous (bad) info from Balanced in order to prepopulate the
80 form.
81
82 """
83 typecheck( username, unicode
84 , balanced_customer_href, (unicode, None, balanced.Customer)
85 , balanced_thing_uri, unicode
86 , thing, unicode
87 )
88
89 if isinstance(balanced_customer_href, balanced.Customer):
90 balanced_account = balanced_customer_href
91 else:
92 balanced_account = get_balanced_account( db
93 , username
94 , balanced_customer_href
95 )
96 invalidate_on_balanced(thing, balanced_account.href)
97 SQL = "UPDATE participants SET last_%s_result=%%s WHERE username=%%s"
98 try:
99 if thing == "credit card":
100 SQL %= "bill"
101 obj = balanced.Card.fetch(balanced_thing_uri)
102 #add = balanced_account.add_card
103
104 else:
105 assert thing == "bank account", thing # sanity check
106 SQL %= "ach"
107 obj = balanced.BankAccount.fetch(balanced_thing_uri)
108 #add = balanced_account.add_bank_account
109
110 obj.associate_to_customer(balanced_account)
111 except balanced.exc.HTTPError as err:
112 error = err.message.message.decode('UTF-8') # XXX UTF-8?
113 else:
114 error = ''
115 typecheck(error, unicode)
116
117 db.run(SQL, (error, username))
118 if thing == "credit card":
119 update_receiving_amounts(db, username, error)
120 return error
121
122
123 def invalidate_on_balanced(thing, balanced_customer_href):
124 """XXX Things in balanced cannot be deleted at the moment.
125
126 Instead we mark all valid cards as invalid which will restrict against
127 anyone being able to issue charges against them in the future.
128
129 See: https://github.com/balanced/balanced-api/issues/22
130
131 """
132 assert thing in ("credit card", "bank account")
133 typecheck(balanced_customer_href, (str, unicode))
134
135 customer = balanced.Customer.fetch(balanced_customer_href)
136 things = customer.cards if thing == "credit card" else customer.bank_accounts
137
138 for _thing in things:
139 _thing.unstore()
140
141
142 def clear(db, thing, username, balanced_customer_href):
143 typecheck( thing, unicode
144 , username, unicode
145 , balanced_customer_href, (unicode, str)
146 )
147 assert thing in ("credit card", "bank account"), thing
148 invalidate_on_balanced(thing, balanced_customer_href)
149 CLEAR = """\
150
151 UPDATE participants
152 SET last_%s_result=NULL
153 WHERE username=%%s
154
155 """ % ("bill" if thing == "credit card" else "ach")
156 db.run(CLEAR, (username,))
157 if thing == "credit card":
158 update_receiving_amounts(db, username, None)
159
160
161 def store_error(db, thing, username, msg):
162 typecheck(thing, unicode, username, unicode, msg, unicode)
163 assert thing in ("credit card", "bank account"), thing
164 ERROR = """\
165
166 UPDATE participants
167 SET last_%s_result=%%s
168 WHERE username=%%s
169
170 """ % ("bill" if thing == "credit card" else "ach")
171 db.run(ERROR, (msg, username))
172 if thing == "credit card":
173 update_receiving_amounts(db, username, msg)
174
175
176 class BalancedThing(object):
177 """Represent either a credit card or a bank account.
178 """
179
180 thing_type = None # either 'card' or 'bank_account'
181 keys_to_attr_paths = None # set to a mapping in subclasses
182
183 _customer = None # underlying balanced.Customer object
184 _thing = None # underlying balanced.{BankAccount,Card} object
185
186 def __getitem__(self, key):
187 """Given a name, return a unicode.
188
189 Allow subclasses to provide a flat set of keys, which, under the hood,
190 might be nested attributes and/or keys. The traversal path is relative
191 to _thing (not self!).
192
193 """
194 attr_path = self.keys_to_attr_paths.get(key, key)
195
196 out = None
197 if self._customer is not None and self._thing is not None:
198 out = self._thing
199 for val in attr_path.split('.'):
200 if type(out) is dict:
201 # this lets us reach into the meta dict
202 out = out.get(val)
203 else:
204 try:
205 out = getattr(out, val)
206 except AttributeError:
207 raise KeyError("{} not found".format(val))
208 if out is None:
209 break
210
211 if out is None:
212 # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161.
213 out = ''
214
215 return out
216
217 def __init__(self, balanced_customer_href):
218 """Given a Balanced account_uri, load data from Balanced.
219 """
220 if balanced_customer_href is None:
221 return
222
223 # XXX Indexing is borken. See:
224 # https://github.com/balanced/balanced-python/issues/10
225
226 self._customer = balanced.Customer.fetch(balanced_customer_href)
227
228 things = getattr(self._customer, self.thing_type+'s')\
229 .filter(is_valid=True).all()
230 nvalid = len(things)
231
232 if nvalid == 0:
233 self._thing = None
234 elif nvalid == 1:
235 self._thing = things[0]
236 else:
237 msg = "%s has %d valid %ss"
238 msg %= (balanced_customer_href, len(things), self.thing_type)
239 raise RuntimeError(msg)
240
241 @property
242 def is_setup(self):
243 return self._thing is not None
244
245
246 class BalancedCard(BalancedThing):
247 """This is a dict-like wrapper around a Balanced credit card.
248 """
249
250 thing_type = 'card'
251
252 keys_to_attr_paths = {
253 'id': 'customer.href',
254 'address_1': 'address.line1',
255 'address_2': 'meta.address_2',
256 'country': 'meta.country',
257 'city_town': 'meta.city_town',
258 'zip': 'address.postal_code',
259 # gittip is saving the state in the meta field
260 # for compatibility with legacy customers
261 'state': 'meta.region',
262 'last4': 'number',
263 'last_four': 'number',
264 'expiration_month': 'expiration_month',
265 'expiration_year': 'expiration_year',
266 }
267
268
269 class BalancedBankAccount(BalancedThing):
270 """This is a dict-like wrapper around a Balanced bank account.
271 """
272
273 thing_type = 'bank_account'
274
275 keys_to_attr_paths = {
276 'customer_href': 'customer.href',
277 }
278
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gittip/billing/__init__.py b/gittip/billing/__init__.py
--- a/gittip/billing/__init__.py
+++ b/gittip/billing/__init__.py
@@ -22,14 +22,35 @@
from aspen.utils import typecheck
-def update_receiving_amounts(db, tipper, error):
- """Update receiving amounts of participants tipped by given tipper.
+def store_result(db, thing, username, new_result):
+ """Update the participant's last_{ach,bill}_result in the DB.
+
+ Also update receiving amounts of the participant's tippees.
"""
- from gittip.models.participant import Participant
- tipper = Participant.from_username(tipper)
- if tipper.is_suspicious or tipper.last_bill_result == error:
+ assert thing in ("credit card", "bank account"), thing
+ x = "bill" if thing == "credit card" else "ach"
+
+ # Update last_thing_result in the DB
+ SQL = """
+
+ UPDATE participants p
+ SET last_{0}_result=%s
+ WHERE username=%s
+ RETURNING is_suspicious
+ , ( SELECT last_{0}_result
+ FROM participants p2
+ WHERE p2.id = p.id
+ ) AS old_result
+
+ """.format(x)
+ p = db.one(SQL, (new_result, username))
+
+ # Update the receiving amounts of tippees if necessary
+ if thing != "credit card":
+ return
+ if p.is_suspicious or new_result == p.old_result:
return
- if error == '':
+ if new_result == '':
op = '+'
else:
op = '-'
@@ -42,7 +63,7 @@
ORDER BY tippee, mtime DESC
) foo
WHERE tippee = username;
- """.format(op), dict(tipper=tipper))
+ """.format(op), dict(tipper=username))
def get_balanced_account(db, username, balanced_customer_href):
@@ -94,19 +115,12 @@
, balanced_customer_href
)
invalidate_on_balanced(thing, balanced_account.href)
- SQL = "UPDATE participants SET last_%s_result=%%s WHERE username=%%s"
try:
if thing == "credit card":
- SQL %= "bill"
obj = balanced.Card.fetch(balanced_thing_uri)
- #add = balanced_account.add_card
-
else:
assert thing == "bank account", thing # sanity check
- SQL %= "ach"
obj = balanced.BankAccount.fetch(balanced_thing_uri)
- #add = balanced_account.add_bank_account
-
obj.associate_to_customer(balanced_account)
except balanced.exc.HTTPError as err:
error = err.message.message.decode('UTF-8') # XXX UTF-8?
@@ -114,9 +128,7 @@
error = ''
typecheck(error, unicode)
- db.run(SQL, (error, username))
- if thing == "credit card":
- update_receiving_amounts(db, username, error)
+ store_result(db, thing, username, error)
return error
@@ -144,33 +156,8 @@
, username, unicode
, balanced_customer_href, (unicode, str)
)
- assert thing in ("credit card", "bank account"), thing
invalidate_on_balanced(thing, balanced_customer_href)
- CLEAR = """\
-
- UPDATE participants
- SET last_%s_result=NULL
- WHERE username=%%s
-
- """ % ("bill" if thing == "credit card" else "ach")
- db.run(CLEAR, (username,))
- if thing == "credit card":
- update_receiving_amounts(db, username, None)
-
-
-def store_error(db, thing, username, msg):
- typecheck(thing, unicode, username, unicode, msg, unicode)
- assert thing in ("credit card", "bank account"), thing
- ERROR = """\
-
- UPDATE participants
- SET last_%s_result=%%s
- WHERE username=%%s
-
- """ % ("bill" if thing == "credit card" else "ach")
- db.run(ERROR, (msg, username))
- if thing == "credit card":
- update_receiving_amounts(db, username, msg)
+ store_result(db, thing, username, None)
class BalancedThing(object):
| {"golden_diff": "diff --git a/gittip/billing/__init__.py b/gittip/billing/__init__.py\n--- a/gittip/billing/__init__.py\n+++ b/gittip/billing/__init__.py\n@@ -22,14 +22,35 @@\n from aspen.utils import typecheck\n \n \n-def update_receiving_amounts(db, tipper, error):\n- \"\"\"Update receiving amounts of participants tipped by given tipper.\n+def store_result(db, thing, username, new_result):\n+ \"\"\"Update the participant's last_{ach,bill}_result in the DB.\n+\n+ Also update receiving amounts of the participant's tippees.\n \"\"\"\n- from gittip.models.participant import Participant\n- tipper = Participant.from_username(tipper)\n- if tipper.is_suspicious or tipper.last_bill_result == error:\n+ assert thing in (\"credit card\", \"bank account\"), thing\n+ x = \"bill\" if thing == \"credit card\" else \"ach\"\n+\n+ # Update last_thing_result in the DB\n+ SQL = \"\"\"\n+\n+ UPDATE participants p\n+ SET last_{0}_result=%s\n+ WHERE username=%s\n+ RETURNING is_suspicious\n+ , ( SELECT last_{0}_result\n+ FROM participants p2\n+ WHERE p2.id = p.id\n+ ) AS old_result\n+\n+ \"\"\".format(x)\n+ p = db.one(SQL, (new_result, username))\n+\n+ # Update the receiving amounts of tippees if necessary\n+ if thing != \"credit card\":\n+ return\n+ if p.is_suspicious or new_result == p.old_result:\n return\n- if error == '':\n+ if new_result == '':\n op = '+'\n else:\n op = '-'\n@@ -42,7 +63,7 @@\n ORDER BY tippee, mtime DESC\n ) foo\n WHERE tippee = username;\n- \"\"\".format(op), dict(tipper=tipper))\n+ \"\"\".format(op), dict(tipper=username))\n \n \n def get_balanced_account(db, username, balanced_customer_href):\n@@ -94,19 +115,12 @@\n , balanced_customer_href\n )\n invalidate_on_balanced(thing, balanced_account.href)\n- SQL = \"UPDATE participants SET last_%s_result=%%s WHERE username=%%s\"\n try:\n if thing == \"credit card\":\n- SQL %= \"bill\"\n obj = balanced.Card.fetch(balanced_thing_uri)\n- #add = balanced_account.add_card\n-\n else:\n assert thing == \"bank account\", thing # sanity check\n- SQL %= \"ach\"\n obj = balanced.BankAccount.fetch(balanced_thing_uri)\n- #add = balanced_account.add_bank_account\n-\n obj.associate_to_customer(balanced_account)\n except balanced.exc.HTTPError as err:\n error = err.message.message.decode('UTF-8') # XXX UTF-8?\n@@ -114,9 +128,7 @@\n error = ''\n typecheck(error, unicode)\n \n- db.run(SQL, (error, username))\n- if thing == \"credit card\":\n- update_receiving_amounts(db, username, error)\n+ store_result(db, thing, username, error)\n return error\n \n \n@@ -144,33 +156,8 @@\n , username, unicode\n , balanced_customer_href, (unicode, str)\n )\n- assert thing in (\"credit card\", \"bank account\"), thing\n invalidate_on_balanced(thing, balanced_customer_href)\n- CLEAR = \"\"\"\\\n-\n- UPDATE participants\n- SET last_%s_result=NULL\n- WHERE username=%%s\n-\n- \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n- db.run(CLEAR, (username,))\n- if thing == \"credit card\":\n- update_receiving_amounts(db, username, None)\n-\n-\n-def store_error(db, thing, username, msg):\n- typecheck(thing, unicode, username, unicode, msg, unicode)\n- assert thing in (\"credit card\", \"bank account\"), thing\n- ERROR = \"\"\"\\\n-\n- UPDATE participants\n- SET last_%s_result=%%s\n- WHERE username=%%s\n-\n- \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n- db.run(ERROR, (msg, username))\n- if thing == \"credit card\":\n- update_receiving_amounts(db, username, msg)\n+ store_result(db, thing, username, None)\n \n \n class BalancedThing(object):\n", "issue": "Update receiving amounts when updating last_bill_result\nShould fix #2495.\n\n", "before_files": [{"content": "\"\"\"This module encapsulates billing logic and db access.\n\nThere are three pieces of information for each participant related to billing:\n\n balanced_customer_href\n * NULL - This participant has never been billed.\n * 'deadbeef' - This participant has had a Balanced account created for\n them, either by adding a credit card or a bank account.\n last_bill_result\n * NULL - This participant has not had their credit card charged yet.\n * '' - This participant has a working card.\n * <message> - An error message.\n last_ach_result\n * NULL - This participant has not wired up a bank account yet.\n * '' - This participant has a working bank account.\n * <message> - An error message.\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport balanced\nfrom aspen.utils import typecheck\n\n\ndef update_receiving_amounts(db, tipper, error):\n \"\"\"Update receiving amounts of participants tipped by given tipper.\n \"\"\"\n from gittip.models.participant import Participant\n tipper = Participant.from_username(tipper)\n if tipper.is_suspicious or tipper.last_bill_result == error:\n return\n if error == '':\n op = '+'\n else:\n op = '-'\n db.run(\"\"\"\n UPDATE participants\n SET receiving = (receiving {0} amount)\n FROM ( SELECT DISTINCT ON (tippee) tippee, amount\n FROM tips\n WHERE tipper=%(tipper)s\n ORDER BY tippee, mtime DESC\n ) foo\n WHERE tippee = username;\n \"\"\".format(op), dict(tipper=tipper))\n\n\ndef get_balanced_account(db, username, balanced_customer_href):\n \"\"\"Find or create a balanced.Account.\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None)\n )\n\n if balanced_customer_href is None:\n customer = balanced.Customer(meta={\n 'username': username,\n }).save()\n BALANCED_ACCOUNT = \"\"\"\\\n\n UPDATE participants\n SET balanced_customer_href=%s\n WHERE username=%s\n\n \"\"\"\n db.run(BALANCED_ACCOUNT, (customer.href, username))\n else:\n customer = balanced.Customer.fetch(balanced_customer_href)\n return customer\n\n\ndef associate(db, thing, username, balanced_customer_href, balanced_thing_uri):\n \"\"\"Given four unicodes, return a unicode.\n\n This function attempts to associate the credit card or bank account details\n referenced by balanced_thing_uri with a Balanced Account. If it fails we\n log and return a unicode describing the failure. Even for failure we keep\n balanced_customer_href; we don't reset it to None/NULL. It's useful for\n loading the previous (bad) info from Balanced in order to prepopulate the\n form.\n\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None, balanced.Customer)\n , balanced_thing_uri, unicode\n , thing, unicode\n )\n\n if isinstance(balanced_customer_href, balanced.Customer):\n balanced_account = balanced_customer_href\n else:\n balanced_account = get_balanced_account( db\n , username\n , balanced_customer_href\n )\n invalidate_on_balanced(thing, balanced_account.href)\n SQL = \"UPDATE participants SET last_%s_result=%%s WHERE username=%%s\"\n try:\n if thing == \"credit card\":\n SQL %= \"bill\"\n obj = balanced.Card.fetch(balanced_thing_uri)\n #add = balanced_account.add_card\n\n else:\n assert thing == \"bank account\", thing # sanity check\n SQL %= \"ach\"\n obj = balanced.BankAccount.fetch(balanced_thing_uri)\n #add = balanced_account.add_bank_account\n\n obj.associate_to_customer(balanced_account)\n except balanced.exc.HTTPError as err:\n error = err.message.message.decode('UTF-8') # XXX UTF-8?\n else:\n error = ''\n typecheck(error, unicode)\n\n db.run(SQL, (error, username))\n if thing == \"credit card\":\n update_receiving_amounts(db, username, error)\n return error\n\n\ndef invalidate_on_balanced(thing, balanced_customer_href):\n \"\"\"XXX Things in balanced cannot be deleted at the moment.\n\n Instead we mark all valid cards as invalid which will restrict against\n anyone being able to issue charges against them in the future.\n\n See: https://github.com/balanced/balanced-api/issues/22\n\n \"\"\"\n assert thing in (\"credit card\", \"bank account\")\n typecheck(balanced_customer_href, (str, unicode))\n\n customer = balanced.Customer.fetch(balanced_customer_href)\n things = customer.cards if thing == \"credit card\" else customer.bank_accounts\n\n for _thing in things:\n _thing.unstore()\n\n\ndef clear(db, thing, username, balanced_customer_href):\n typecheck( thing, unicode\n , username, unicode\n , balanced_customer_href, (unicode, str)\n )\n assert thing in (\"credit card\", \"bank account\"), thing\n invalidate_on_balanced(thing, balanced_customer_href)\n CLEAR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=NULL\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(CLEAR, (username,))\n if thing == \"credit card\":\n update_receiving_amounts(db, username, None)\n\n\ndef store_error(db, thing, username, msg):\n typecheck(thing, unicode, username, unicode, msg, unicode)\n assert thing in (\"credit card\", \"bank account\"), thing\n ERROR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=%%s\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(ERROR, (msg, username))\n if thing == \"credit card\":\n update_receiving_amounts(db, username, msg)\n\n\nclass BalancedThing(object):\n \"\"\"Represent either a credit card or a bank account.\n \"\"\"\n\n thing_type = None # either 'card' or 'bank_account'\n keys_to_attr_paths = None # set to a mapping in subclasses\n\n _customer = None # underlying balanced.Customer object\n _thing = None # underlying balanced.{BankAccount,Card} object\n\n def __getitem__(self, key):\n \"\"\"Given a name, return a unicode.\n\n Allow subclasses to provide a flat set of keys, which, under the hood,\n might be nested attributes and/or keys. The traversal path is relative\n to _thing (not self!).\n\n \"\"\"\n attr_path = self.keys_to_attr_paths.get(key, key)\n\n out = None\n if self._customer is not None and self._thing is not None:\n out = self._thing\n for val in attr_path.split('.'):\n if type(out) is dict:\n # this lets us reach into the meta dict\n out = out.get(val)\n else:\n try:\n out = getattr(out, val)\n except AttributeError:\n raise KeyError(\"{} not found\".format(val))\n if out is None:\n break\n\n if out is None:\n # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161.\n out = ''\n\n return out\n\n def __init__(self, balanced_customer_href):\n \"\"\"Given a Balanced account_uri, load data from Balanced.\n \"\"\"\n if balanced_customer_href is None:\n return\n\n # XXX Indexing is borken. See:\n # https://github.com/balanced/balanced-python/issues/10\n\n self._customer = balanced.Customer.fetch(balanced_customer_href)\n\n things = getattr(self._customer, self.thing_type+'s')\\\n .filter(is_valid=True).all()\n nvalid = len(things)\n\n if nvalid == 0:\n self._thing = None\n elif nvalid == 1:\n self._thing = things[0]\n else:\n msg = \"%s has %d valid %ss\"\n msg %= (balanced_customer_href, len(things), self.thing_type)\n raise RuntimeError(msg)\n\n @property\n def is_setup(self):\n return self._thing is not None\n\n\nclass BalancedCard(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced credit card.\n \"\"\"\n\n thing_type = 'card'\n\n keys_to_attr_paths = {\n 'id': 'customer.href',\n 'address_1': 'address.line1',\n 'address_2': 'meta.address_2',\n 'country': 'meta.country',\n 'city_town': 'meta.city_town',\n 'zip': 'address.postal_code',\n # gittip is saving the state in the meta field\n # for compatibility with legacy customers\n 'state': 'meta.region',\n 'last4': 'number',\n 'last_four': 'number',\n 'expiration_month': 'expiration_month',\n 'expiration_year': 'expiration_year',\n }\n\n\nclass BalancedBankAccount(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced bank account.\n \"\"\"\n\n thing_type = 'bank_account'\n\n keys_to_attr_paths = {\n 'customer_href': 'customer.href',\n }\n", "path": "gittip/billing/__init__.py"}], "after_files": [{"content": "\"\"\"This module encapsulates billing logic and db access.\n\nThere are three pieces of information for each participant related to billing:\n\n balanced_customer_href\n * NULL - This participant has never been billed.\n * 'deadbeef' - This participant has had a Balanced account created for\n them, either by adding a credit card or a bank account.\n last_bill_result\n * NULL - This participant has not had their credit card charged yet.\n * '' - This participant has a working card.\n * <message> - An error message.\n last_ach_result\n * NULL - This participant has not wired up a bank account yet.\n * '' - This participant has a working bank account.\n * <message> - An error message.\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport balanced\nfrom aspen.utils import typecheck\n\n\ndef store_result(db, thing, username, new_result):\n \"\"\"Update the participant's last_{ach,bill}_result in the DB.\n\n Also update receiving amounts of the participant's tippees.\n \"\"\"\n assert thing in (\"credit card\", \"bank account\"), thing\n x = \"bill\" if thing == \"credit card\" else \"ach\"\n\n # Update last_thing_result in the DB\n SQL = \"\"\"\n\n UPDATE participants p\n SET last_{0}_result=%s\n WHERE username=%s\n RETURNING is_suspicious\n , ( SELECT last_{0}_result\n FROM participants p2\n WHERE p2.id = p.id\n ) AS old_result\n\n \"\"\".format(x)\n p = db.one(SQL, (new_result, username))\n\n # Update the receiving amounts of tippees if necessary\n if thing != \"credit card\":\n return\n if p.is_suspicious or new_result == p.old_result:\n return\n if new_result == '':\n op = '+'\n else:\n op = '-'\n db.run(\"\"\"\n UPDATE participants\n SET receiving = (receiving {0} amount)\n FROM ( SELECT DISTINCT ON (tippee) tippee, amount\n FROM tips\n WHERE tipper=%(tipper)s\n ORDER BY tippee, mtime DESC\n ) foo\n WHERE tippee = username;\n \"\"\".format(op), dict(tipper=username))\n\n\ndef get_balanced_account(db, username, balanced_customer_href):\n \"\"\"Find or create a balanced.Account.\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None)\n )\n\n if balanced_customer_href is None:\n customer = balanced.Customer(meta={\n 'username': username,\n }).save()\n BALANCED_ACCOUNT = \"\"\"\\\n\n UPDATE participants\n SET balanced_customer_href=%s\n WHERE username=%s\n\n \"\"\"\n db.run(BALANCED_ACCOUNT, (customer.href, username))\n else:\n customer = balanced.Customer.fetch(balanced_customer_href)\n return customer\n\n\ndef associate(db, thing, username, balanced_customer_href, balanced_thing_uri):\n \"\"\"Given four unicodes, return a unicode.\n\n This function attempts to associate the credit card or bank account details\n referenced by balanced_thing_uri with a Balanced Account. If it fails we\n log and return a unicode describing the failure. Even for failure we keep\n balanced_customer_href; we don't reset it to None/NULL. It's useful for\n loading the previous (bad) info from Balanced in order to prepopulate the\n form.\n\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None, balanced.Customer)\n , balanced_thing_uri, unicode\n , thing, unicode\n )\n\n if isinstance(balanced_customer_href, balanced.Customer):\n balanced_account = balanced_customer_href\n else:\n balanced_account = get_balanced_account( db\n , username\n , balanced_customer_href\n )\n invalidate_on_balanced(thing, balanced_account.href)\n try:\n if thing == \"credit card\":\n obj = balanced.Card.fetch(balanced_thing_uri)\n else:\n assert thing == \"bank account\", thing # sanity check\n obj = balanced.BankAccount.fetch(balanced_thing_uri)\n obj.associate_to_customer(balanced_account)\n except balanced.exc.HTTPError as err:\n error = err.message.message.decode('UTF-8') # XXX UTF-8?\n else:\n error = ''\n typecheck(error, unicode)\n\n store_result(db, thing, username, error)\n return error\n\n\ndef invalidate_on_balanced(thing, balanced_customer_href):\n \"\"\"XXX Things in balanced cannot be deleted at the moment.\n\n Instead we mark all valid cards as invalid which will restrict against\n anyone being able to issue charges against them in the future.\n\n See: https://github.com/balanced/balanced-api/issues/22\n\n \"\"\"\n assert thing in (\"credit card\", \"bank account\")\n typecheck(balanced_customer_href, (str, unicode))\n\n customer = balanced.Customer.fetch(balanced_customer_href)\n things = customer.cards if thing == \"credit card\" else customer.bank_accounts\n\n for _thing in things:\n _thing.unstore()\n\n\ndef clear(db, thing, username, balanced_customer_href):\n typecheck( thing, unicode\n , username, unicode\n , balanced_customer_href, (unicode, str)\n )\n invalidate_on_balanced(thing, balanced_customer_href)\n store_result(db, thing, username, None)\n\n\nclass BalancedThing(object):\n \"\"\"Represent either a credit card or a bank account.\n \"\"\"\n\n thing_type = None # either 'card' or 'bank_account'\n keys_to_attr_paths = None # set to a mapping in subclasses\n\n _customer = None # underlying balanced.Customer object\n _thing = None # underlying balanced.{BankAccount,Card} object\n\n def __getitem__(self, key):\n \"\"\"Given a name, return a unicode.\n\n Allow subclasses to provide a flat set of keys, which, under the hood,\n might be nested attributes and/or keys. The traversal path is relative\n to _thing (not self!).\n\n \"\"\"\n attr_path = self.keys_to_attr_paths.get(key, key)\n\n out = None\n if self._customer is not None and self._thing is not None:\n out = self._thing\n for val in attr_path.split('.'):\n if type(out) is dict:\n # this lets us reach into the meta dict\n out = out.get(val)\n else:\n try:\n out = getattr(out, val)\n except AttributeError:\n raise KeyError(\"{} not found\".format(val))\n if out is None:\n break\n\n if out is None:\n # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161.\n out = ''\n\n return out\n\n def __init__(self, balanced_customer_href):\n \"\"\"Given a Balanced account_uri, load data from Balanced.\n \"\"\"\n if balanced_customer_href is None:\n return\n\n # XXX Indexing is borken. See:\n # https://github.com/balanced/balanced-python/issues/10\n\n self._customer = balanced.Customer.fetch(balanced_customer_href)\n\n things = getattr(self._customer, self.thing_type+'s')\\\n .filter(is_valid=True).all()\n nvalid = len(things)\n\n if nvalid == 0:\n self._thing = None\n elif nvalid == 1:\n self._thing = things[0]\n else:\n msg = \"%s has %d valid %ss\"\n msg %= (balanced_customer_href, len(things), self.thing_type)\n raise RuntimeError(msg)\n\n @property\n def is_setup(self):\n return self._thing is not None\n\n\nclass BalancedCard(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced credit card.\n \"\"\"\n\n thing_type = 'card'\n\n keys_to_attr_paths = {\n 'id': 'customer.href',\n 'address_1': 'address.line1',\n 'address_2': 'meta.address_2',\n 'country': 'meta.country',\n 'city_town': 'meta.city_town',\n 'zip': 'address.postal_code',\n # gittip is saving the state in the meta field\n # for compatibility with legacy customers\n 'state': 'meta.region',\n 'last4': 'number',\n 'last_four': 'number',\n 'expiration_month': 'expiration_month',\n 'expiration_year': 'expiration_year',\n }\n\n\nclass BalancedBankAccount(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced bank account.\n \"\"\"\n\n thing_type = 'bank_account'\n\n keys_to_attr_paths = {\n 'customer_href': 'customer.href',\n }\n", "path": "gittip/billing/__init__.py"}]} | 3,047 | 1,020 |
gh_patches_debug_37719 | rasdani/github-patches | git_diff | pfnet__pytorch-pfn-extras-382 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OutputComparer: add API to limit `max_pool` size
It can be convenient when comparing against engines with limitation (e.g., memory size)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_pfn_extras/utils/comparer.py`
Content:
```
1 # mypy: ignore-errors
2
3 import threading
4 import concurrent.futures
5
6 import torch.testing
7
8 from pytorch_pfn_extras import handler as _handler_module
9 from pytorch_pfn_extras.training import _trainer
10 from pytorch_pfn_extras.training import _evaluator
11
12
13 class _ComparableHandler(_handler_module.BaseHandler):
14 def __init__(self, handler, name, save_outs_cb):
15 self._handler = handler
16 self._save_outs_cb = save_outs_cb
17 self.name = name
18 self.iteration = 0
19
20 def convert_batch(self, args):
21 return self._handler.convert_batch(args)
22
23 def train_setup(self, trainer, loader):
24 return self._handler.train_setup(trainer, loader)
25
26 def train_epoch_begin(self, trainer, loader):
27 return self._handler.train_epoch_begin(trainer, loader)
28
29 def train_epoch_end(self, trainer):
30 return self._handler.train_epoch_end(trainer)
31
32 def train_validation_begin(self, evaluator):
33 return self._handler.train_validation_begin(evaluator)
34
35 def train_validation_end(self, trainer, evaluator):
36 return self._handler.train_validation_end(trainer, evaluator)
37
38 def train_step(self, trainer, batch_idx, batch, complete_fn):
39 return self._handler.train_step(trainer, batch_idx, batch, complete_fn)
40
41 def train_post_step(self, trainer, batch_idx, batch, outputs):
42 self._handler.train_post_step(trainer, batch_idx, batch, outputs)
43 self.iteration += 1
44 return self._save_outs_cb(self, batch_idx, outputs)
45
46 def eval_loop_begin(self, evaluator):
47 return self._handler.eval_loop_begin(evaluator)
48
49 def eval_step(self, evaluator, batch_idx, batch, complete_fn):
50 return self._handler.eval_step(
51 evaluator, batch_idx, batch, complete_fn)
52
53 def eval_loop_end(self, evaluator):
54 return self._handler.eval_loop_end(evaluator)
55
56 def eval_post_step(self, evaluator, batch_idx, batch, outputs):
57 self._handler.eval_post_step(evaluator, batch_idx, batch, outputs)
58 self.iteration += 1
59 return self._save_outs_cb(self, batch_idx, outputs)
60
61
62 def get_default_comparer(rtol=1e-07, atol=0, equal_nan=True, msg=None):
63 """Creates default comparer function.
64
65 The created function will compare the outputs by using
66 `torch.testing.assert_allclose` with specified options.
67
68 Args:
69 rtol (float): Relative tolerance.
70 atol (float): Absolute tolerance.
71 equal_nan (bool): If ``True``, NaNs will be ignored.
72 msg (str): Error message to be printed in case of failure.
73 """
74 def compare_fn(backend1, backend2, out_name, out1, out2):
75 err_msg = msg or f" comparing {backend1} and {backend2} in {out_name}"
76 torch.testing.assert_allclose(
77 # TODO select the device where
78 # the tensors will be compared?
79 out1.cpu().detach(),
80 out2.cpu().detach(),
81 rtol=rtol,
82 atol=atol,
83 equal_nan=equal_nan,
84 msg=err_msg,
85 )
86 return compare_fn
87
88
89 _default_comparer = get_default_comparer()
90
91
92 class OutputsComparer:
93 def __init__(
94 self, engines, to_compare_keys=None, *,
95 compare_fn=_default_comparer,
96 ):
97 """A class for comparison of iteration outputs.
98
99 This class is mainly used to compare results between different devices.
100
101 Args:
102 engines (dict of Engines):
103 Trainers or Evaluators to compare outputs.
104 to_compare_keys (tuple of str, optional):
105 A set of keys of output dict to compare.
106 compare_fn (function):
107 Comparison function. Default is ``get_default_comparer()``.
108
109 Examples:
110 >>> trainer_cpu = ppe.engine.create_trainer(
111 model, optimizer, 1, device='cpu')
112 >>> trainer_gpu = ppe.engine.create_trainer(
113 model, optimizer, 1, device='cuda:0')
114 >>> comp = ppe.utils.comparer.OutputsComparer(
115 {"cpu": trainer_cpu, "gpu": trainer_gpu})
116 >>> comp.compare({"cpu": loader, "gpu": loader}])
117 """
118 e_type = type(next(iter(engines.values())))
119 if e_type not in (
120 _trainer._Trainer,
121 _evaluator._Evaluator,
122 ):
123 raise ValueError(f"Engine type {e_type} is not supported")
124 if not all((isinstance(e, e_type) for e in engines.values())):
125 raise ValueError("All the engines must be of the same type")
126 # engines must be a dict
127 for name, engine in engines.items():
128 engine.handler = _ComparableHandler(
129 engine.handler, name, self.report_output
130 )
131
132 self.engines = engines # Need to wrap the handle with ours
133 # If to_compare_key is None, then we compare all
134 self.outputs = {}
135 self.to_compare_keys = to_compare_keys
136 self.barrier = threading.Barrier(len(engines))
137 self.report_lock = threading.Lock()
138 self.compare_fn = compare_fn
139 self._finalized = False
140
141 def _assert_incompatible_trigger(self, condition):
142 if not condition:
143 raise ValueError('Engines have different triggers.')
144
145 def report_output(self, handle, batch_idx, outputs):
146 if (self.n_iters is None) or (handle.iteration % self.n_iters == 0):
147 keys = (
148 self.to_compare_keys
149 if self.to_compare_keys is not None
150 else outputs.keys()
151 )
152
153 # Save the outputs of this iteration
154 with self.report_lock:
155 self.outputs[handle.name] = {key: outputs[key] for key in keys}
156 if len(self.outputs.keys()) == len(self.engines.keys()):
157 # all outputs have been filled, lets compare and reset
158 self._compare_outs()
159 self.outputs = {}
160 self._assert_incompatible_trigger(not self._finalized)
161
162 # Excplicitly synchronize
163 self.barrier.wait()
164
165 def _compare_outs(self):
166 names = list(self.outputs.keys())
167 for i, name in enumerate(names):
168 for out in self.outputs[name]:
169 for j in range(i + 1, len(names)):
170 to_compare = names[j]
171 out_1 = self.outputs[name][out]
172 out_2 = self.outputs[to_compare][out]
173 self.compare_fn(name, to_compare, out, out_1, out_2)
174
175 def run_engine(self, engine, loaders):
176 try:
177 if isinstance(loaders, tuple):
178 engine.run(*loaders)
179 elif isinstance(loaders, dict):
180 engine.run(**loaders)
181 else:
182 engine.run(loaders)
183 with self.report_lock:
184 self._finalized = True
185 self._assert_incompatible_trigger(len(self.outputs) == 0)
186 except Exception:
187 self.barrier.abort()
188 raise
189
190 def compare(self, loaders, n_iters=None):
191 """Compares outputs.
192
193 Args:
194 loaders (dict of loaders):
195 Data loaders used as input for each engine.
196 """
197 # n_iters is the number of iterations that we wait for
198 # compare
199 self.n_iters = n_iters
200 # We need to use a thread pool because is not easy at all to sync
201 # the run method of different engines to compare every n iterations
202 for name in self.engines.keys():
203 if name not in loaders:
204 raise KeyError(f"'{name}' is not in `loaders`")
205 with concurrent.futures.ThreadPoolExecutor(
206 max_workers=len(self.engines)
207 ) as executor:
208 futures = []
209 for name, engine in self.engines.items():
210 futures.append(executor.submit(
211 self.run_engine, engine, loaders[name]))
212 for future in concurrent.futures.as_completed(futures):
213 future.result()
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_pfn_extras/utils/comparer.py b/pytorch_pfn_extras/utils/comparer.py
--- a/pytorch_pfn_extras/utils/comparer.py
+++ b/pytorch_pfn_extras/utils/comparer.py
@@ -93,6 +93,7 @@
def __init__(
self, engines, to_compare_keys=None, *,
compare_fn=_default_comparer,
+ concurrency=None,
):
"""A class for comparison of iteration outputs.
@@ -105,6 +106,9 @@
A set of keys of output dict to compare.
compare_fn (function):
Comparison function. Default is ``get_default_comparer()``.
+ concurrency (int, optional):
+ The upper bound limit on the number of workers that run concurrently.
+ If ``None``, inferred from the size of ``engines``.
Examples:
>>> trainer_cpu = ppe.engine.create_trainer(
@@ -137,6 +141,8 @@
self.report_lock = threading.Lock()
self.compare_fn = compare_fn
self._finalized = False
+ self._semaphore = threading.Semaphore(
+ len(engines) if concurrency is None else concurrency)
def _assert_incompatible_trigger(self, condition):
if not condition:
@@ -160,7 +166,9 @@
self._assert_incompatible_trigger(not self._finalized)
# Excplicitly synchronize
+ self._semaphore.release()
self.barrier.wait()
+ self._semaphore.acquire()
def _compare_outs(self):
names = list(self.outputs.keys())
@@ -174,6 +182,7 @@
def run_engine(self, engine, loaders):
try:
+ self._semaphore.acquire()
if isinstance(loaders, tuple):
engine.run(*loaders)
elif isinstance(loaders, dict):
@@ -186,6 +195,8 @@
except Exception:
self.barrier.abort()
raise
+ finally:
+ self._semaphore.release()
def compare(self, loaders, n_iters=None):
"""Compares outputs.
| {"golden_diff": "diff --git a/pytorch_pfn_extras/utils/comparer.py b/pytorch_pfn_extras/utils/comparer.py\n--- a/pytorch_pfn_extras/utils/comparer.py\n+++ b/pytorch_pfn_extras/utils/comparer.py\n@@ -93,6 +93,7 @@\n def __init__(\n self, engines, to_compare_keys=None, *,\n compare_fn=_default_comparer,\n+ concurrency=None,\n ):\n \"\"\"A class for comparison of iteration outputs.\n \n@@ -105,6 +106,9 @@\n A set of keys of output dict to compare.\n compare_fn (function):\n Comparison function. Default is ``get_default_comparer()``.\n+ concurrency (int, optional):\n+ The upper bound limit on the number of workers that run concurrently.\n+ If ``None``, inferred from the size of ``engines``.\n \n Examples:\n >>> trainer_cpu = ppe.engine.create_trainer(\n@@ -137,6 +141,8 @@\n self.report_lock = threading.Lock()\n self.compare_fn = compare_fn\n self._finalized = False\n+ self._semaphore = threading.Semaphore(\n+ len(engines) if concurrency is None else concurrency)\n \n def _assert_incompatible_trigger(self, condition):\n if not condition:\n@@ -160,7 +166,9 @@\n self._assert_incompatible_trigger(not self._finalized)\n \n # Excplicitly synchronize\n+ self._semaphore.release()\n self.barrier.wait()\n+ self._semaphore.acquire()\n \n def _compare_outs(self):\n names = list(self.outputs.keys())\n@@ -174,6 +182,7 @@\n \n def run_engine(self, engine, loaders):\n try:\n+ self._semaphore.acquire()\n if isinstance(loaders, tuple):\n engine.run(*loaders)\n elif isinstance(loaders, dict):\n@@ -186,6 +195,8 @@\n except Exception:\n self.barrier.abort()\n raise\n+ finally:\n+ self._semaphore.release()\n \n def compare(self, loaders, n_iters=None):\n \"\"\"Compares outputs.\n", "issue": "OutputComparer: add API to limit `max_pool` size\nIt can be convenient when comparing against engines with limitation (e.g., memory size)\n", "before_files": [{"content": "# mypy: ignore-errors\n\nimport threading\nimport concurrent.futures\n\nimport torch.testing\n\nfrom pytorch_pfn_extras import handler as _handler_module\nfrom pytorch_pfn_extras.training import _trainer\nfrom pytorch_pfn_extras.training import _evaluator\n\n\nclass _ComparableHandler(_handler_module.BaseHandler):\n def __init__(self, handler, name, save_outs_cb):\n self._handler = handler\n self._save_outs_cb = save_outs_cb\n self.name = name\n self.iteration = 0\n\n def convert_batch(self, args):\n return self._handler.convert_batch(args)\n\n def train_setup(self, trainer, loader):\n return self._handler.train_setup(trainer, loader)\n\n def train_epoch_begin(self, trainer, loader):\n return self._handler.train_epoch_begin(trainer, loader)\n\n def train_epoch_end(self, trainer):\n return self._handler.train_epoch_end(trainer)\n\n def train_validation_begin(self, evaluator):\n return self._handler.train_validation_begin(evaluator)\n\n def train_validation_end(self, trainer, evaluator):\n return self._handler.train_validation_end(trainer, evaluator)\n\n def train_step(self, trainer, batch_idx, batch, complete_fn):\n return self._handler.train_step(trainer, batch_idx, batch, complete_fn)\n\n def train_post_step(self, trainer, batch_idx, batch, outputs):\n self._handler.train_post_step(trainer, batch_idx, batch, outputs)\n self.iteration += 1\n return self._save_outs_cb(self, batch_idx, outputs)\n\n def eval_loop_begin(self, evaluator):\n return self._handler.eval_loop_begin(evaluator)\n\n def eval_step(self, evaluator, batch_idx, batch, complete_fn):\n return self._handler.eval_step(\n evaluator, batch_idx, batch, complete_fn)\n\n def eval_loop_end(self, evaluator):\n return self._handler.eval_loop_end(evaluator)\n\n def eval_post_step(self, evaluator, batch_idx, batch, outputs):\n self._handler.eval_post_step(evaluator, batch_idx, batch, outputs)\n self.iteration += 1\n return self._save_outs_cb(self, batch_idx, outputs)\n\n\ndef get_default_comparer(rtol=1e-07, atol=0, equal_nan=True, msg=None):\n \"\"\"Creates default comparer function.\n\n The created function will compare the outputs by using\n `torch.testing.assert_allclose` with specified options.\n\n Args:\n rtol (float): Relative tolerance.\n atol (float): Absolute tolerance.\n equal_nan (bool): If ``True``, NaNs will be ignored.\n msg (str): Error message to be printed in case of failure.\n \"\"\"\n def compare_fn(backend1, backend2, out_name, out1, out2):\n err_msg = msg or f\" comparing {backend1} and {backend2} in {out_name}\"\n torch.testing.assert_allclose(\n # TODO select the device where\n # the tensors will be compared?\n out1.cpu().detach(),\n out2.cpu().detach(),\n rtol=rtol,\n atol=atol,\n equal_nan=equal_nan,\n msg=err_msg,\n )\n return compare_fn\n\n\n_default_comparer = get_default_comparer()\n\n\nclass OutputsComparer:\n def __init__(\n self, engines, to_compare_keys=None, *,\n compare_fn=_default_comparer,\n ):\n \"\"\"A class for comparison of iteration outputs.\n\n This class is mainly used to compare results between different devices.\n\n Args:\n engines (dict of Engines):\n Trainers or Evaluators to compare outputs.\n to_compare_keys (tuple of str, optional):\n A set of keys of output dict to compare.\n compare_fn (function):\n Comparison function. Default is ``get_default_comparer()``.\n\n Examples:\n >>> trainer_cpu = ppe.engine.create_trainer(\n model, optimizer, 1, device='cpu')\n >>> trainer_gpu = ppe.engine.create_trainer(\n model, optimizer, 1, device='cuda:0')\n >>> comp = ppe.utils.comparer.OutputsComparer(\n {\"cpu\": trainer_cpu, \"gpu\": trainer_gpu})\n >>> comp.compare({\"cpu\": loader, \"gpu\": loader}])\n \"\"\"\n e_type = type(next(iter(engines.values())))\n if e_type not in (\n _trainer._Trainer,\n _evaluator._Evaluator,\n ):\n raise ValueError(f\"Engine type {e_type} is not supported\")\n if not all((isinstance(e, e_type) for e in engines.values())):\n raise ValueError(\"All the engines must be of the same type\")\n # engines must be a dict\n for name, engine in engines.items():\n engine.handler = _ComparableHandler(\n engine.handler, name, self.report_output\n )\n\n self.engines = engines # Need to wrap the handle with ours\n # If to_compare_key is None, then we compare all\n self.outputs = {}\n self.to_compare_keys = to_compare_keys\n self.barrier = threading.Barrier(len(engines))\n self.report_lock = threading.Lock()\n self.compare_fn = compare_fn\n self._finalized = False\n\n def _assert_incompatible_trigger(self, condition):\n if not condition:\n raise ValueError('Engines have different triggers.')\n\n def report_output(self, handle, batch_idx, outputs):\n if (self.n_iters is None) or (handle.iteration % self.n_iters == 0):\n keys = (\n self.to_compare_keys\n if self.to_compare_keys is not None\n else outputs.keys()\n )\n\n # Save the outputs of this iteration\n with self.report_lock:\n self.outputs[handle.name] = {key: outputs[key] for key in keys}\n if len(self.outputs.keys()) == len(self.engines.keys()):\n # all outputs have been filled, lets compare and reset\n self._compare_outs()\n self.outputs = {}\n self._assert_incompatible_trigger(not self._finalized)\n\n # Excplicitly synchronize\n self.barrier.wait()\n\n def _compare_outs(self):\n names = list(self.outputs.keys())\n for i, name in enumerate(names):\n for out in self.outputs[name]:\n for j in range(i + 1, len(names)):\n to_compare = names[j]\n out_1 = self.outputs[name][out]\n out_2 = self.outputs[to_compare][out]\n self.compare_fn(name, to_compare, out, out_1, out_2)\n\n def run_engine(self, engine, loaders):\n try:\n if isinstance(loaders, tuple):\n engine.run(*loaders)\n elif isinstance(loaders, dict):\n engine.run(**loaders)\n else:\n engine.run(loaders)\n with self.report_lock:\n self._finalized = True\n self._assert_incompatible_trigger(len(self.outputs) == 0)\n except Exception:\n self.barrier.abort()\n raise\n\n def compare(self, loaders, n_iters=None):\n \"\"\"Compares outputs.\n\n Args:\n loaders (dict of loaders):\n Data loaders used as input for each engine.\n \"\"\"\n # n_iters is the number of iterations that we wait for\n # compare\n self.n_iters = n_iters\n # We need to use a thread pool because is not easy at all to sync\n # the run method of different engines to compare every n iterations\n for name in self.engines.keys():\n if name not in loaders:\n raise KeyError(f\"'{name}' is not in `loaders`\")\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=len(self.engines)\n ) as executor:\n futures = []\n for name, engine in self.engines.items():\n futures.append(executor.submit(\n self.run_engine, engine, loaders[name]))\n for future in concurrent.futures.as_completed(futures):\n future.result()\n", "path": "pytorch_pfn_extras/utils/comparer.py"}], "after_files": [{"content": "# mypy: ignore-errors\n\nimport threading\nimport concurrent.futures\n\nimport torch.testing\n\nfrom pytorch_pfn_extras import handler as _handler_module\nfrom pytorch_pfn_extras.training import _trainer\nfrom pytorch_pfn_extras.training import _evaluator\n\n\nclass _ComparableHandler(_handler_module.BaseHandler):\n def __init__(self, handler, name, save_outs_cb):\n self._handler = handler\n self._save_outs_cb = save_outs_cb\n self.name = name\n self.iteration = 0\n\n def convert_batch(self, args):\n return self._handler.convert_batch(args)\n\n def train_setup(self, trainer, loader):\n return self._handler.train_setup(trainer, loader)\n\n def train_epoch_begin(self, trainer, loader):\n return self._handler.train_epoch_begin(trainer, loader)\n\n def train_epoch_end(self, trainer):\n return self._handler.train_epoch_end(trainer)\n\n def train_validation_begin(self, evaluator):\n return self._handler.train_validation_begin(evaluator)\n\n def train_validation_end(self, trainer, evaluator):\n return self._handler.train_validation_end(trainer, evaluator)\n\n def train_step(self, trainer, batch_idx, batch, complete_fn):\n return self._handler.train_step(trainer, batch_idx, batch, complete_fn)\n\n def train_post_step(self, trainer, batch_idx, batch, outputs):\n self._handler.train_post_step(trainer, batch_idx, batch, outputs)\n self.iteration += 1\n return self._save_outs_cb(self, batch_idx, outputs)\n\n def eval_loop_begin(self, evaluator):\n return self._handler.eval_loop_begin(evaluator)\n\n def eval_step(self, evaluator, batch_idx, batch, complete_fn):\n return self._handler.eval_step(\n evaluator, batch_idx, batch, complete_fn)\n\n def eval_loop_end(self, evaluator):\n return self._handler.eval_loop_end(evaluator)\n\n def eval_post_step(self, evaluator, batch_idx, batch, outputs):\n self._handler.eval_post_step(evaluator, batch_idx, batch, outputs)\n self.iteration += 1\n return self._save_outs_cb(self, batch_idx, outputs)\n\n\ndef get_default_comparer(rtol=1e-07, atol=0, equal_nan=True, msg=None):\n \"\"\"Creates default comparer function.\n\n The created function will compare the outputs by using\n `torch.testing.assert_allclose` with specified options.\n\n Args:\n rtol (float): Relative tolerance.\n atol (float): Absolute tolerance.\n equal_nan (bool): If ``True``, NaNs will be ignored.\n msg (str): Error message to be printed in case of failure.\n \"\"\"\n def compare_fn(backend1, backend2, out_name, out1, out2):\n err_msg = msg or f\" comparing {backend1} and {backend2} in {out_name}\"\n torch.testing.assert_allclose(\n # TODO select the device where\n # the tensors will be compared?\n out1.cpu().detach(),\n out2.cpu().detach(),\n rtol=rtol,\n atol=atol,\n equal_nan=equal_nan,\n msg=err_msg,\n )\n return compare_fn\n\n\n_default_comparer = get_default_comparer()\n\n\nclass OutputsComparer:\n def __init__(\n self, engines, to_compare_keys=None, *,\n compare_fn=_default_comparer,\n concurrency=None,\n ):\n \"\"\"A class for comparison of iteration outputs.\n\n This class is mainly used to compare results between different devices.\n\n Args:\n engines (dict of Engines):\n Trainers or Evaluators to compare outputs.\n to_compare_keys (tuple of str, optional):\n A set of keys of output dict to compare.\n compare_fn (function):\n Comparison function. Default is ``get_default_comparer()``.\n concurrency (int, optional):\n The upper bound limit on the number of workers that run concurrently.\n If ``None``, inferred from the size of ``engines``.\n\n Examples:\n >>> trainer_cpu = ppe.engine.create_trainer(\n model, optimizer, 1, device='cpu')\n >>> trainer_gpu = ppe.engine.create_trainer(\n model, optimizer, 1, device='cuda:0')\n >>> comp = ppe.utils.comparer.OutputsComparer(\n {\"cpu\": trainer_cpu, \"gpu\": trainer_gpu})\n >>> comp.compare({\"cpu\": loader, \"gpu\": loader}])\n \"\"\"\n e_type = type(next(iter(engines.values())))\n if e_type not in (\n _trainer._Trainer,\n _evaluator._Evaluator,\n ):\n raise ValueError(f\"Engine type {e_type} is not supported\")\n if not all((isinstance(e, e_type) for e in engines.values())):\n raise ValueError(\"All the engines must be of the same type\")\n # engines must be a dict\n for name, engine in engines.items():\n engine.handler = _ComparableHandler(\n engine.handler, name, self.report_output\n )\n\n self.engines = engines # Need to wrap the handle with ours\n # If to_compare_key is None, then we compare all\n self.outputs = {}\n self.to_compare_keys = to_compare_keys\n self.barrier = threading.Barrier(len(engines))\n self.report_lock = threading.Lock()\n self.compare_fn = compare_fn\n self._finalized = False\n self._semaphore = threading.Semaphore(\n len(engines) if concurrency is None else concurrency)\n\n def _assert_incompatible_trigger(self, condition):\n if not condition:\n raise ValueError('Engines have different triggers.')\n\n def report_output(self, handle, batch_idx, outputs):\n if (self.n_iters is None) or (handle.iteration % self.n_iters == 0):\n keys = (\n self.to_compare_keys\n if self.to_compare_keys is not None\n else outputs.keys()\n )\n\n # Save the outputs of this iteration\n with self.report_lock:\n self.outputs[handle.name] = {key: outputs[key] for key in keys}\n if len(self.outputs.keys()) == len(self.engines.keys()):\n # all outputs have been filled, lets compare and reset\n self._compare_outs()\n self.outputs = {}\n self._assert_incompatible_trigger(not self._finalized)\n\n # Excplicitly synchronize\n self._semaphore.release()\n self.barrier.wait()\n self._semaphore.acquire()\n\n def _compare_outs(self):\n names = list(self.outputs.keys())\n for i, name in enumerate(names):\n for out in self.outputs[name]:\n for j in range(i + 1, len(names)):\n to_compare = names[j]\n out_1 = self.outputs[name][out]\n out_2 = self.outputs[to_compare][out]\n self.compare_fn(name, to_compare, out, out_1, out_2)\n\n def run_engine(self, engine, loaders):\n try:\n self._semaphore.acquire()\n if isinstance(loaders, tuple):\n engine.run(*loaders)\n elif isinstance(loaders, dict):\n engine.run(**loaders)\n else:\n engine.run(loaders)\n with self.report_lock:\n self._finalized = True\n self._assert_incompatible_trigger(len(self.outputs) == 0)\n except Exception:\n self.barrier.abort()\n raise\n finally:\n self._semaphore.release()\n\n def compare(self, loaders, n_iters=None):\n \"\"\"Compares outputs.\n\n Args:\n loaders (dict of loaders):\n Data loaders used as input for each engine.\n \"\"\"\n # n_iters is the number of iterations that we wait for\n # compare\n self.n_iters = n_iters\n # We need to use a thread pool because is not easy at all to sync\n # the run method of different engines to compare every n iterations\n for name in self.engines.keys():\n if name not in loaders:\n raise KeyError(f\"'{name}' is not in `loaders`\")\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=len(self.engines)\n ) as executor:\n futures = []\n for name, engine in self.engines.items():\n futures.append(executor.submit(\n self.run_engine, engine, loaders[name]))\n for future in concurrent.futures.as_completed(futures):\n future.result()\n", "path": "pytorch_pfn_extras/utils/comparer.py"}]} | 2,551 | 478 |
gh_patches_debug_13046 | rasdani/github-patches | git_diff | getsentry__sentry-python-309 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Send events without proxy when http_proxy is configured as env variable
Hi,
I have problem with configuration of sentry_sdk. My code is behind corporate proxy and `http_proxy` is configured on machine as env variable (required when I want to install something via pip for example). Also I have env variable `no_proxy` where I added ip address for host where sentry is working (this ip is inside corporate network). When I try send event to sentry I got return code 403 (proxy refuse request). Probably sentry_sdk automacally fetch proxy from http_proxy env but ignore no_proxy. When I remove http_proxy from env everything is working. How can I configure python_sdk to ignore my http_proxy env variable? My code:
```
import logging
import sentry_sdk
from sentry_sdk.integrations.logging import LoggingIntegration
sentry_logging = LoggingIntegration(
level=logging.INFO,
event_level=logging.ERROR
)
sentry_sdk.init(
dsn="<dsn>",
integrations=[sentry_logging],
debug=True,
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/transport.py`
Content:
```
1 from __future__ import print_function
2
3 import json
4 import io
5 import urllib3 # type: ignore
6 import certifi
7 import gzip
8
9 from datetime import datetime, timedelta
10
11 from sentry_sdk.consts import VERSION
12 from sentry_sdk.utils import Dsn, logger, capture_internal_exceptions
13 from sentry_sdk.worker import BackgroundWorker
14
15 if False:
16 from sentry_sdk.consts import ClientOptions
17 from typing import Type
18 from typing import Any
19 from typing import Optional
20 from typing import Dict
21 from typing import Union
22 from typing import Callable
23 from urllib3.poolmanager import PoolManager # type: ignore
24 from urllib3.poolmanager import ProxyManager # type: ignore
25
26 try:
27 from urllib.request import getproxies
28 except ImportError:
29 from urllib import getproxies # type: ignore
30
31
32 class Transport(object):
33 """Baseclass for all transports.
34
35 A transport is used to send an event to sentry.
36 """
37
38 parsed_dsn = None # type: Dsn
39
40 def __init__(self, options=None):
41 # type: (Optional[ClientOptions]) -> None
42 self.options = options
43 if options and options["dsn"]:
44 self.parsed_dsn = Dsn(options["dsn"])
45 else:
46 self.parsed_dsn = None # type: ignore
47
48 def capture_event(self, event):
49 """This gets invoked with the event dictionary when an event should
50 be sent to sentry.
51 """
52 raise NotImplementedError()
53
54 def flush(self, timeout, callback=None):
55 """Wait `timeout` seconds for the current events to be sent out."""
56 pass
57
58 def kill(self):
59 # type: () -> None
60 """Forcefully kills the transport."""
61 pass
62
63 def __del__(self):
64 # type: () -> None
65 try:
66 self.kill()
67 except Exception:
68 pass
69
70
71 class HttpTransport(Transport):
72 """The default HTTP transport."""
73
74 def __init__(self, options):
75 # type: (ClientOptions) -> None
76 Transport.__init__(self, options)
77 self._worker = BackgroundWorker()
78 self._auth = self.parsed_dsn.to_auth("sentry.python/%s" % VERSION)
79 self._disabled_until = None # type: Optional[datetime]
80 self._retry = urllib3.util.Retry()
81 self.options = options
82
83 self._pool = self._make_pool(
84 self.parsed_dsn,
85 http_proxy=options["http_proxy"],
86 https_proxy=options["https_proxy"],
87 ca_certs=options["ca_certs"],
88 )
89
90 from sentry_sdk import Hub
91
92 self.hub_cls = Hub
93
94 def _send_event(self, event):
95 # type: (Dict[str, Any]) -> None
96 if self._disabled_until is not None:
97 if datetime.utcnow() < self._disabled_until:
98 return
99 self._disabled_until = None
100
101 body = io.BytesIO()
102 with gzip.GzipFile(fileobj=body, mode="w") as f:
103 f.write(json.dumps(event, allow_nan=False).encode("utf-8"))
104
105 logger.debug(
106 "Sending %s event [%s] to %s project:%s"
107 % (
108 event.get("level") or "error",
109 event["event_id"],
110 self.parsed_dsn.host,
111 self.parsed_dsn.project_id,
112 )
113 )
114 response = self._pool.request(
115 "POST",
116 str(self._auth.store_api_url),
117 body=body.getvalue(),
118 headers={
119 "X-Sentry-Auth": str(self._auth.to_header()),
120 "Content-Type": "application/json",
121 "Content-Encoding": "gzip",
122 },
123 )
124
125 try:
126 if response.status == 429:
127 self._disabled_until = datetime.utcnow() + timedelta(
128 seconds=self._retry.get_retry_after(response) or 60
129 )
130 return
131
132 elif response.status >= 300 or response.status < 200:
133 logger.error(
134 "Unexpected status code: %s (body: %s)",
135 response.status,
136 response.data,
137 )
138 finally:
139 response.close()
140
141 self._disabled_until = None
142
143 def _get_pool_options(self, ca_certs):
144 # type: (Optional[Any]) -> Dict[str, Any]
145 return {
146 "num_pools": 2,
147 "cert_reqs": "CERT_REQUIRED",
148 "ca_certs": ca_certs or certifi.where(),
149 }
150
151 def _make_pool(
152 self,
153 parsed_dsn, # type: Dsn
154 http_proxy, # type: Optional[str]
155 https_proxy, # type: Optional[str]
156 ca_certs, # type: Optional[Any]
157 ):
158 # type: (...) -> Union[PoolManager, ProxyManager]
159 # Use http_proxy if scheme is https and https_proxy is not set
160 proxy = parsed_dsn.scheme == "https" and https_proxy or http_proxy
161 if not proxy:
162 proxy = getproxies().get(parsed_dsn.scheme)
163
164 opts = self._get_pool_options(ca_certs)
165
166 if proxy:
167 return urllib3.ProxyManager(proxy, **opts)
168 else:
169 return urllib3.PoolManager(**opts)
170
171 def capture_event(self, event):
172 # type: (Dict[str, Any]) -> None
173 hub = self.hub_cls.current
174
175 def send_event_wrapper():
176 # type: () -> None
177 with hub:
178 with capture_internal_exceptions():
179 self._send_event(event)
180
181 self._worker.submit(send_event_wrapper)
182
183 def flush(self, timeout, callback=None):
184 # type: (float, Optional[Any]) -> None
185 logger.debug("Flushing HTTP transport")
186 if timeout > 0:
187 self._worker.flush(timeout, callback)
188
189 def kill(self):
190 # type: () -> None
191 logger.debug("Killing HTTP transport")
192 self._worker.kill()
193
194
195 class _FunctionTransport(Transport):
196 def __init__(self, func):
197 # type: (Callable[[Dict[str, Any]], None]) -> None
198 Transport.__init__(self)
199 self._func = func
200
201 def capture_event(self, event):
202 # type: (Dict[str, Any]) -> None
203 self._func(event)
204 return None
205
206
207 def make_transport(options):
208 # type: (ClientOptions) -> Optional[Transport]
209 ref_transport = options["transport"]
210
211 # If no transport is given, we use the http transport class
212 if ref_transport is None:
213 transport_cls = HttpTransport # type: Type[Transport]
214 else:
215 try:
216 issubclass(ref_transport, type) # type: ignore
217 except TypeError:
218 # if we are not a class but we are a callable, assume a
219 # function that acts as capture_event
220 if callable(ref_transport):
221 return _FunctionTransport(ref_transport)
222 # otherwise assume an object fulfilling the transport contract
223 return ref_transport
224 transport_cls = ref_transport # type: ignore
225
226 # if a transport class is given only instanciate it if the dsn is not
227 # empty or None
228 if options["dsn"]:
229 return transport_cls(options)
230
231 return None
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py
--- a/sentry_sdk/transport.py
+++ b/sentry_sdk/transport.py
@@ -156,10 +156,15 @@
ca_certs, # type: Optional[Any]
):
# type: (...) -> Union[PoolManager, ProxyManager]
- # Use http_proxy if scheme is https and https_proxy is not set
- proxy = parsed_dsn.scheme == "https" and https_proxy or http_proxy
- if not proxy:
- proxy = getproxies().get(parsed_dsn.scheme)
+ proxy = None
+
+ # try HTTPS first
+ if parsed_dsn.scheme == "https" and (https_proxy != ""):
+ proxy = https_proxy or getproxies().get("https")
+
+ # maybe fallback to HTTP proxy
+ if not proxy and (http_proxy != ""):
+ proxy = http_proxy or getproxies().get("http")
opts = self._get_pool_options(ca_certs)
| {"golden_diff": "diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py\n--- a/sentry_sdk/transport.py\n+++ b/sentry_sdk/transport.py\n@@ -156,10 +156,15 @@\n ca_certs, # type: Optional[Any]\n ):\n # type: (...) -> Union[PoolManager, ProxyManager]\n- # Use http_proxy if scheme is https and https_proxy is not set\n- proxy = parsed_dsn.scheme == \"https\" and https_proxy or http_proxy\n- if not proxy:\n- proxy = getproxies().get(parsed_dsn.scheme)\n+ proxy = None\n+\n+ # try HTTPS first\n+ if parsed_dsn.scheme == \"https\" and (https_proxy != \"\"):\n+ proxy = https_proxy or getproxies().get(\"https\")\n+\n+ # maybe fallback to HTTP proxy\n+ if not proxy and (http_proxy != \"\"):\n+ proxy = http_proxy or getproxies().get(\"http\")\n \n opts = self._get_pool_options(ca_certs)\n", "issue": "Send events without proxy when http_proxy is configured as env variable\nHi,\r\nI have problem with configuration of sentry_sdk. My code is behind corporate proxy and `http_proxy` is configured on machine as env variable (required when I want to install something via pip for example). Also I have env variable `no_proxy` where I added ip address for host where sentry is working (this ip is inside corporate network). When I try send event to sentry I got return code 403 (proxy refuse request). Probably sentry_sdk automacally fetch proxy from http_proxy env but ignore no_proxy. When I remove http_proxy from env everything is working. How can I configure python_sdk to ignore my http_proxy env variable? My code: \r\n\r\n```\r\nimport logging\r\nimport sentry_sdk\r\nfrom sentry_sdk.integrations.logging import LoggingIntegration\r\n\r\n\r\nsentry_logging = LoggingIntegration(\r\n level=logging.INFO, \r\n event_level=logging.ERROR \r\n)\r\nsentry_sdk.init(\r\n dsn=\"<dsn>\",\r\n integrations=[sentry_logging],\r\n debug=True,\r\n)\r\n```\n", "before_files": [{"content": "from __future__ import print_function\n\nimport json\nimport io\nimport urllib3 # type: ignore\nimport certifi\nimport gzip\n\nfrom datetime import datetime, timedelta\n\nfrom sentry_sdk.consts import VERSION\nfrom sentry_sdk.utils import Dsn, logger, capture_internal_exceptions\nfrom sentry_sdk.worker import BackgroundWorker\n\nif False:\n from sentry_sdk.consts import ClientOptions\n from typing import Type\n from typing import Any\n from typing import Optional\n from typing import Dict\n from typing import Union\n from typing import Callable\n from urllib3.poolmanager import PoolManager # type: ignore\n from urllib3.poolmanager import ProxyManager # type: ignore\n\ntry:\n from urllib.request import getproxies\nexcept ImportError:\n from urllib import getproxies # type: ignore\n\n\nclass Transport(object):\n \"\"\"Baseclass for all transports.\n\n A transport is used to send an event to sentry.\n \"\"\"\n\n parsed_dsn = None # type: Dsn\n\n def __init__(self, options=None):\n # type: (Optional[ClientOptions]) -> None\n self.options = options\n if options and options[\"dsn\"]:\n self.parsed_dsn = Dsn(options[\"dsn\"])\n else:\n self.parsed_dsn = None # type: ignore\n\n def capture_event(self, event):\n \"\"\"This gets invoked with the event dictionary when an event should\n be sent to sentry.\n \"\"\"\n raise NotImplementedError()\n\n def flush(self, timeout, callback=None):\n \"\"\"Wait `timeout` seconds for the current events to be sent out.\"\"\"\n pass\n\n def kill(self):\n # type: () -> None\n \"\"\"Forcefully kills the transport.\"\"\"\n pass\n\n def __del__(self):\n # type: () -> None\n try:\n self.kill()\n except Exception:\n pass\n\n\nclass HttpTransport(Transport):\n \"\"\"The default HTTP transport.\"\"\"\n\n def __init__(self, options):\n # type: (ClientOptions) -> None\n Transport.__init__(self, options)\n self._worker = BackgroundWorker()\n self._auth = self.parsed_dsn.to_auth(\"sentry.python/%s\" % VERSION)\n self._disabled_until = None # type: Optional[datetime]\n self._retry = urllib3.util.Retry()\n self.options = options\n\n self._pool = self._make_pool(\n self.parsed_dsn,\n http_proxy=options[\"http_proxy\"],\n https_proxy=options[\"https_proxy\"],\n ca_certs=options[\"ca_certs\"],\n )\n\n from sentry_sdk import Hub\n\n self.hub_cls = Hub\n\n def _send_event(self, event):\n # type: (Dict[str, Any]) -> None\n if self._disabled_until is not None:\n if datetime.utcnow() < self._disabled_until:\n return\n self._disabled_until = None\n\n body = io.BytesIO()\n with gzip.GzipFile(fileobj=body, mode=\"w\") as f:\n f.write(json.dumps(event, allow_nan=False).encode(\"utf-8\"))\n\n logger.debug(\n \"Sending %s event [%s] to %s project:%s\"\n % (\n event.get(\"level\") or \"error\",\n event[\"event_id\"],\n self.parsed_dsn.host,\n self.parsed_dsn.project_id,\n )\n )\n response = self._pool.request(\n \"POST\",\n str(self._auth.store_api_url),\n body=body.getvalue(),\n headers={\n \"X-Sentry-Auth\": str(self._auth.to_header()),\n \"Content-Type\": \"application/json\",\n \"Content-Encoding\": \"gzip\",\n },\n )\n\n try:\n if response.status == 429:\n self._disabled_until = datetime.utcnow() + timedelta(\n seconds=self._retry.get_retry_after(response) or 60\n )\n return\n\n elif response.status >= 300 or response.status < 200:\n logger.error(\n \"Unexpected status code: %s (body: %s)\",\n response.status,\n response.data,\n )\n finally:\n response.close()\n\n self._disabled_until = None\n\n def _get_pool_options(self, ca_certs):\n # type: (Optional[Any]) -> Dict[str, Any]\n return {\n \"num_pools\": 2,\n \"cert_reqs\": \"CERT_REQUIRED\",\n \"ca_certs\": ca_certs or certifi.where(),\n }\n\n def _make_pool(\n self,\n parsed_dsn, # type: Dsn\n http_proxy, # type: Optional[str]\n https_proxy, # type: Optional[str]\n ca_certs, # type: Optional[Any]\n ):\n # type: (...) -> Union[PoolManager, ProxyManager]\n # Use http_proxy if scheme is https and https_proxy is not set\n proxy = parsed_dsn.scheme == \"https\" and https_proxy or http_proxy\n if not proxy:\n proxy = getproxies().get(parsed_dsn.scheme)\n\n opts = self._get_pool_options(ca_certs)\n\n if proxy:\n return urllib3.ProxyManager(proxy, **opts)\n else:\n return urllib3.PoolManager(**opts)\n\n def capture_event(self, event):\n # type: (Dict[str, Any]) -> None\n hub = self.hub_cls.current\n\n def send_event_wrapper():\n # type: () -> None\n with hub:\n with capture_internal_exceptions():\n self._send_event(event)\n\n self._worker.submit(send_event_wrapper)\n\n def flush(self, timeout, callback=None):\n # type: (float, Optional[Any]) -> None\n logger.debug(\"Flushing HTTP transport\")\n if timeout > 0:\n self._worker.flush(timeout, callback)\n\n def kill(self):\n # type: () -> None\n logger.debug(\"Killing HTTP transport\")\n self._worker.kill()\n\n\nclass _FunctionTransport(Transport):\n def __init__(self, func):\n # type: (Callable[[Dict[str, Any]], None]) -> None\n Transport.__init__(self)\n self._func = func\n\n def capture_event(self, event):\n # type: (Dict[str, Any]) -> None\n self._func(event)\n return None\n\n\ndef make_transport(options):\n # type: (ClientOptions) -> Optional[Transport]\n ref_transport = options[\"transport\"]\n\n # If no transport is given, we use the http transport class\n if ref_transport is None:\n transport_cls = HttpTransport # type: Type[Transport]\n else:\n try:\n issubclass(ref_transport, type) # type: ignore\n except TypeError:\n # if we are not a class but we are a callable, assume a\n # function that acts as capture_event\n if callable(ref_transport):\n return _FunctionTransport(ref_transport)\n # otherwise assume an object fulfilling the transport contract\n return ref_transport\n transport_cls = ref_transport # type: ignore\n\n # if a transport class is given only instanciate it if the dsn is not\n # empty or None\n if options[\"dsn\"]:\n return transport_cls(options)\n\n return None\n", "path": "sentry_sdk/transport.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport json\nimport io\nimport urllib3 # type: ignore\nimport certifi\nimport gzip\n\nfrom datetime import datetime, timedelta\n\nfrom sentry_sdk.consts import VERSION\nfrom sentry_sdk.utils import Dsn, logger, capture_internal_exceptions\nfrom sentry_sdk.worker import BackgroundWorker\n\nif False:\n from sentry_sdk.consts import ClientOptions\n from typing import Type\n from typing import Any\n from typing import Optional\n from typing import Dict\n from typing import Union\n from typing import Callable\n from urllib3.poolmanager import PoolManager # type: ignore\n from urllib3.poolmanager import ProxyManager # type: ignore\n\ntry:\n from urllib.request import getproxies\nexcept ImportError:\n from urllib import getproxies # type: ignore\n\n\nclass Transport(object):\n \"\"\"Baseclass for all transports.\n\n A transport is used to send an event to sentry.\n \"\"\"\n\n parsed_dsn = None # type: Dsn\n\n def __init__(self, options=None):\n # type: (Optional[ClientOptions]) -> None\n self.options = options\n if options and options[\"dsn\"]:\n self.parsed_dsn = Dsn(options[\"dsn\"])\n else:\n self.parsed_dsn = None # type: ignore\n\n def capture_event(self, event):\n \"\"\"This gets invoked with the event dictionary when an event should\n be sent to sentry.\n \"\"\"\n raise NotImplementedError()\n\n def flush(self, timeout, callback=None):\n \"\"\"Wait `timeout` seconds for the current events to be sent out.\"\"\"\n pass\n\n def kill(self):\n # type: () -> None\n \"\"\"Forcefully kills the transport.\"\"\"\n pass\n\n def __del__(self):\n # type: () -> None\n try:\n self.kill()\n except Exception:\n pass\n\n\nclass HttpTransport(Transport):\n \"\"\"The default HTTP transport.\"\"\"\n\n def __init__(self, options):\n # type: (ClientOptions) -> None\n Transport.__init__(self, options)\n self._worker = BackgroundWorker()\n self._auth = self.parsed_dsn.to_auth(\"sentry.python/%s\" % VERSION)\n self._disabled_until = None # type: Optional[datetime]\n self._retry = urllib3.util.Retry()\n self.options = options\n\n self._pool = self._make_pool(\n self.parsed_dsn,\n http_proxy=options[\"http_proxy\"],\n https_proxy=options[\"https_proxy\"],\n ca_certs=options[\"ca_certs\"],\n )\n\n from sentry_sdk import Hub\n\n self.hub_cls = Hub\n\n def _send_event(self, event):\n # type: (Dict[str, Any]) -> None\n if self._disabled_until is not None:\n if datetime.utcnow() < self._disabled_until:\n return\n self._disabled_until = None\n\n body = io.BytesIO()\n with gzip.GzipFile(fileobj=body, mode=\"w\") as f:\n f.write(json.dumps(event, allow_nan=False).encode(\"utf-8\"))\n\n logger.debug(\n \"Sending %s event [%s] to %s project:%s\"\n % (\n event.get(\"level\") or \"error\",\n event[\"event_id\"],\n self.parsed_dsn.host,\n self.parsed_dsn.project_id,\n )\n )\n response = self._pool.request(\n \"POST\",\n str(self._auth.store_api_url),\n body=body.getvalue(),\n headers={\n \"X-Sentry-Auth\": str(self._auth.to_header()),\n \"Content-Type\": \"application/json\",\n \"Content-Encoding\": \"gzip\",\n },\n )\n\n try:\n if response.status == 429:\n self._disabled_until = datetime.utcnow() + timedelta(\n seconds=self._retry.get_retry_after(response) or 60\n )\n return\n\n elif response.status >= 300 or response.status < 200:\n logger.error(\n \"Unexpected status code: %s (body: %s)\",\n response.status,\n response.data,\n )\n finally:\n response.close()\n\n self._disabled_until = None\n\n def _get_pool_options(self, ca_certs):\n # type: (Optional[Any]) -> Dict[str, Any]\n return {\n \"num_pools\": 2,\n \"cert_reqs\": \"CERT_REQUIRED\",\n \"ca_certs\": ca_certs or certifi.where(),\n }\n\n def _make_pool(\n self,\n parsed_dsn, # type: Dsn\n http_proxy, # type: Optional[str]\n https_proxy, # type: Optional[str]\n ca_certs, # type: Optional[Any]\n ):\n # type: (...) -> Union[PoolManager, ProxyManager]\n proxy = None\n\n # try HTTPS first\n if parsed_dsn.scheme == \"https\" and (https_proxy != \"\"):\n proxy = https_proxy or getproxies().get(\"https\")\n\n # maybe fallback to HTTP proxy\n if not proxy and (http_proxy != \"\"):\n proxy = http_proxy or getproxies().get(\"http\")\n\n opts = self._get_pool_options(ca_certs)\n\n if proxy:\n return urllib3.ProxyManager(proxy, **opts)\n else:\n return urllib3.PoolManager(**opts)\n\n def capture_event(self, event):\n # type: (Dict[str, Any]) -> None\n hub = self.hub_cls.current\n\n def send_event_wrapper():\n # type: () -> None\n with hub:\n with capture_internal_exceptions():\n self._send_event(event)\n\n self._worker.submit(send_event_wrapper)\n\n def flush(self, timeout, callback=None):\n # type: (float, Optional[Any]) -> None\n logger.debug(\"Flushing HTTP transport\")\n if timeout > 0:\n self._worker.flush(timeout, callback)\n\n def kill(self):\n # type: () -> None\n logger.debug(\"Killing HTTP transport\")\n self._worker.kill()\n\n\nclass _FunctionTransport(Transport):\n def __init__(self, func):\n # type: (Callable[[Dict[str, Any]], None]) -> None\n Transport.__init__(self)\n self._func = func\n\n def capture_event(self, event):\n # type: (Dict[str, Any]) -> None\n self._func(event)\n return None\n\n\ndef make_transport(options):\n # type: (ClientOptions) -> Optional[Transport]\n ref_transport = options[\"transport\"]\n\n # If no transport is given, we use the http transport class\n if ref_transport is None:\n transport_cls = HttpTransport # type: Type[Transport]\n else:\n try:\n issubclass(ref_transport, type) # type: ignore\n except TypeError:\n # if we are not a class but we are a callable, assume a\n # function that acts as capture_event\n if callable(ref_transport):\n return _FunctionTransport(ref_transport)\n # otherwise assume an object fulfilling the transport contract\n return ref_transport\n transport_cls = ref_transport # type: ignore\n\n # if a transport class is given only instanciate it if the dsn is not\n # empty or None\n if options[\"dsn\"]:\n return transport_cls(options)\n\n return None\n", "path": "sentry_sdk/transport.py"}]} | 2,654 | 233 |
gh_patches_debug_352 | rasdani/github-patches | git_diff | edgedb__edgedb-1057 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bad pretty printing of datetime
Here is what I get:
```
edgedb> SELECT <datetime>'2020-01-08T17:03:06.026178+00:00';
{<local_date>'2020-01-08T17:03:06.026178+00:00'}
```
Well the `datetime` in python is a subclass of `date` but `singledispatch` is supposed to handle that well. Do we have a patched singledispatch now?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `edb/repl/render_binary.py`
Content:
```
1 #
2 # This source file is part of the EdgeDB open source project.
3 #
4 # Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18
19
20 from __future__ import annotations
21 from typing import * # NoQA
22
23 import datetime
24 import decimal
25 import functools
26 import uuid
27
28 import edgedb
29 from edgedb import introspect
30
31 from edb.common.markup.renderers import terminal
32 from edb.common.markup.renderers import styles
33
34 from . import context
35
36
37 style = styles.Dark256
38
39
40 @functools.singledispatch
41 def walk(
42 o: Any,
43 repl_ctx: context.ReplContext,
44 buf: terminal.Buffer
45 ) -> None:
46 # The default renderer. Shouldn't be ever called,
47 # but if for some reason we haven't defined a renderer
48 # for some edgedb type it's better to render something
49 # than crash.
50 buf.write(str(o))
51
52
53 def _object_guts(
54 o: edgedb.Object,
55 repl_ctx: context.ReplContext,
56 buf: terminal.Buffer,
57 *,
58 include_id_when_empty: bool
59 ) -> bool:
60 pointers = introspect.introspect_object(o).pointers
61 if not repl_ctx.show_implicit_fields:
62 pointers = tuple(ptr for ptr in pointers if not ptr.implicit)
63 pointers_len = len(pointers)
64
65 pointers_rendered = 0
66 for ptr in pointers:
67 buf.write(ptr.name, style.key)
68 buf.write(': ')
69
70 if ptr.kind is introspect.PointerKind.LINK:
71 link = o[ptr.name]
72 walk(link, repl_ctx, buf)
73 else:
74 val = getattr(o, ptr.name)
75 walk(val, repl_ctx, buf)
76
77 pointers_rendered += 1
78 if pointers_rendered < pointers_len:
79 buf.write(',')
80 buf.mark_line_break()
81
82 if pointers_rendered == 0 and include_id_when_empty:
83 buf.write('id', style.key)
84 buf.write(': ')
85 walk(o.id, repl_ctx, buf)
86 pointers_rendered = 1
87
88 return pointers_rendered > 0
89
90
91 def _object_name(o: edgedb.Object, repl_ctx: context.ReplContext) -> str:
92 if not repl_ctx.introspect_types:
93 return 'Object'
94 assert repl_ctx.typenames
95 return repl_ctx.typenames.get(o.__tid__, 'Object')
96
97
98 @walk.register
99 def _link(
100 o: edgedb.Link,
101 repl_ctx: context.ReplContext,
102 buf: terminal.Buffer,
103 ) -> None:
104 with buf.foldable_lines():
105 buf.write(_object_name(o.target, repl_ctx), style.tree_node)
106 buf.write(' {', style.tree_node)
107 buf.folded_space()
108 with buf.indent():
109 pointers = o.__dir__()
110 pointers = tuple(ptr for ptr in pointers
111 if ptr not in {'source', 'target'})
112 pointers_len = len(pointers)
113
114 non_empty = _object_guts(
115 o.target, repl_ctx, buf,
116 include_id_when_empty=pointers_len == 0)
117
118 if pointers_len > 0:
119 if non_empty:
120 buf.write(',')
121 buf.mark_line_break()
122
123 i = 0
124 for name in pointers:
125 val = getattr(o, name)
126
127 buf.write(f'@{name}', style.code_tag)
128 buf.write(': ')
129 walk(val, repl_ctx, buf)
130 non_empty = True
131
132 i += 1
133 if i < pointers_len:
134 buf.write(',')
135 buf.mark_line_break()
136
137 if non_empty:
138 buf.folded_space()
139 buf.write('}', style.tree_node)
140
141
142 @walk.register
143 def _object(
144 o: edgedb.Object,
145 repl_ctx: context.ReplContext,
146 buf: terminal.Buffer
147 ) -> None:
148 with buf.foldable_lines():
149 buf.write(_object_name(o, repl_ctx), style.tree_node)
150 buf.write(' {', style.tree_node)
151 buf.folded_space()
152 with buf.indent():
153 non_empty = _object_guts(
154 o, repl_ctx, buf, include_id_when_empty=True)
155 if non_empty:
156 buf.folded_space()
157 buf.write('}', style.tree_node)
158
159
160 @walk.register
161 def _namedtuple(
162 o: edgedb.NamedTuple,
163 repl_ctx: context.ReplContext,
164 buf: terminal.Buffer
165 ) -> None:
166 with buf.foldable_lines():
167 buf.write('(', style.bracket)
168 with buf.indent():
169 # Call __dir__ directly as dir() scrambles the order.
170 for idx, name in enumerate(o.__dir__()):
171 val = getattr(o, name)
172
173 buf.write(name)
174 buf.write(' := ')
175 walk(val, repl_ctx, buf)
176
177 if idx < (len(o) - 1):
178 buf.write(',')
179 buf.mark_line_break()
180 buf.write(')', style.bracket)
181
182
183 @walk.register(edgedb.Array)
184 @walk.register(edgedb.Tuple)
185 @walk.register(edgedb.Set)
186 @walk.register(edgedb.LinkSet)
187 def _set(
188 o: Union[edgedb.Array, edgedb.Tuple, edgedb.Set, edgedb.LinkSet],
189 repl_ctx: context.ReplContext,
190 buf: terminal.Buffer
191 ) -> None:
192 if isinstance(o, edgedb.Array):
193 begin, end = '[', ']'
194 elif isinstance(o, edgedb.Tuple):
195 begin, end = '(', ')'
196 else:
197 begin, end = '{', '}'
198
199 with buf.foldable_lines():
200 buf.write(begin, style.bracket)
201 with buf.indent():
202 for idx, el in enumerate(o):
203 walk(el, repl_ctx, buf)
204 if idx < (len(o) - 1):
205 buf.write(',')
206 buf.mark_line_break()
207 buf.write(end, style.bracket)
208
209
210 @walk.register
211 def _uuid(
212 o: uuid.UUID,
213 repl_ctx: context.ReplContext,
214 buf: terminal.Buffer
215 ) -> None:
216 buf.write(f'<uuid>{repr(str(o))}', style.code_comment)
217
218
219 @walk.register(int)
220 @walk.register(float)
221 def _numeric(
222 o: Union[int, float],
223 repl_ctx: context.ReplContext,
224 buf: terminal.Buffer
225 ) -> None:
226 buf.write(str(o), style.code_number)
227
228
229 @walk.register
230 def _str(
231 o: str,
232 repl_ctx: context.ReplContext,
233 buf: terminal.Buffer
234 ) -> None:
235 if "'" in o:
236 rs = '"' + o.replace('"', r'\"') + '"'
237 else:
238 rs = "'" + o.replace("'", r"\'") + "'"
239 buf.write(rs, style.code_string)
240
241
242 @walk.register
243 def _bytes(
244 o: bytes,
245 repl_ctx: context.ReplContext,
246 buf: terminal.Buffer
247 ) -> None:
248 buf.write(repr(o), style.code_string)
249
250
251 @walk.register
252 def _bool(
253 o: bool,
254 repl_ctx: context.ReplContext,
255 buf: terminal.Buffer
256 ) -> None:
257 buf.write(str(o).lower(), style.code_constant)
258
259
260 @walk.register
261 def _decimal(
262 o: decimal.Decimal,
263 repl_ctx: context.ReplContext,
264 buf: terminal.Buffer
265 ) -> None:
266 buf.write(f'{o}n', style.code_number)
267
268
269 @walk.register
270 def _empty(
271 o: None,
272 repl_ctx: context.ReplContext,
273 buf: terminal.Buffer
274 ) -> None:
275 buf.write('{}', style.bracket)
276
277
278 def _datetime(
279 o: datetime.datetime,
280 repl_ctx: context.ReplContext,
281 buf: terminal.Buffer
282 ) -> None:
283 if o.tzinfo:
284 buf.write("<datetime>", style.code_comment)
285 else:
286 buf.write("<local_datetime>", style.code_comment)
287
288 buf.write(repr(o.isoformat()), style.code_string)
289
290
291 @walk.register
292 def _date(
293 o: datetime.date,
294 repl_ctx: context.ReplContext,
295 buf: terminal.Buffer
296 ) -> None:
297 buf.write("<local_date>", style.code_comment)
298 buf.write(repr(o.isoformat()), style.code_string)
299
300
301 @walk.register
302 def _time(
303 o: datetime.time,
304 repl_ctx: context.ReplContext,
305 buf: terminal.Buffer
306 ) -> None:
307 buf.write("<local_time>", style.code_comment)
308 buf.write(repr(o.isoformat()), style.code_string)
309
310
311 @walk.register
312 def _duration(
313 o: datetime.timedelta,
314 repl_ctx: context.ReplContext,
315 buf: terminal.Buffer
316 ) -> None:
317 buf.write("<duration>", style.code_comment)
318 buf.write(repr(str(o)), style.code_string)
319
320
321 @walk.register
322 def _enum(
323 o: edgedb.EnumValue,
324 repl_ctx: context.ReplContext,
325 buf: terminal.Buffer
326 ) -> None:
327 if not repl_ctx.introspect_types:
328 typename = 'enum'
329 else:
330 assert repl_ctx.typenames
331 typename = repl_ctx.typenames.get(o.__tid__, 'enum')
332
333 buf.write(f"<{typename}>", style.code_comment)
334 buf.write(f"'{o}'", style.code_string)
335
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/edb/repl/render_binary.py b/edb/repl/render_binary.py
--- a/edb/repl/render_binary.py
+++ b/edb/repl/render_binary.py
@@ -275,6 +275,7 @@
buf.write('{}', style.bracket)
[email protected]
def _datetime(
o: datetime.datetime,
repl_ctx: context.ReplContext,
| {"golden_diff": "diff --git a/edb/repl/render_binary.py b/edb/repl/render_binary.py\n--- a/edb/repl/render_binary.py\n+++ b/edb/repl/render_binary.py\n@@ -275,6 +275,7 @@\n buf.write('{}', style.bracket)\n \n \[email protected]\n def _datetime(\n o: datetime.datetime,\n repl_ctx: context.ReplContext,\n", "issue": "Bad pretty printing of datetime\nHere is what I get:\r\n```\r\nedgedb> SELECT <datetime>'2020-01-08T17:03:06.026178+00:00';\r\n{<local_date>'2020-01-08T17:03:06.026178+00:00'}\r\n```\r\nWell the `datetime` in python is a subclass of `date` but `singledispatch` is supposed to handle that well. Do we have a patched singledispatch now?\n", "before_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\nfrom typing import * # NoQA\n\nimport datetime\nimport decimal\nimport functools\nimport uuid\n\nimport edgedb\nfrom edgedb import introspect\n\nfrom edb.common.markup.renderers import terminal\nfrom edb.common.markup.renderers import styles\n\nfrom . import context\n\n\nstyle = styles.Dark256\n\n\[email protected]\ndef walk(\n o: Any,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n # The default renderer. Shouldn't be ever called,\n # but if for some reason we haven't defined a renderer\n # for some edgedb type it's better to render something\n # than crash.\n buf.write(str(o))\n\n\ndef _object_guts(\n o: edgedb.Object,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer,\n *,\n include_id_when_empty: bool\n) -> bool:\n pointers = introspect.introspect_object(o).pointers\n if not repl_ctx.show_implicit_fields:\n pointers = tuple(ptr for ptr in pointers if not ptr.implicit)\n pointers_len = len(pointers)\n\n pointers_rendered = 0\n for ptr in pointers:\n buf.write(ptr.name, style.key)\n buf.write(': ')\n\n if ptr.kind is introspect.PointerKind.LINK:\n link = o[ptr.name]\n walk(link, repl_ctx, buf)\n else:\n val = getattr(o, ptr.name)\n walk(val, repl_ctx, buf)\n\n pointers_rendered += 1\n if pointers_rendered < pointers_len:\n buf.write(',')\n buf.mark_line_break()\n\n if pointers_rendered == 0 and include_id_when_empty:\n buf.write('id', style.key)\n buf.write(': ')\n walk(o.id, repl_ctx, buf)\n pointers_rendered = 1\n\n return pointers_rendered > 0\n\n\ndef _object_name(o: edgedb.Object, repl_ctx: context.ReplContext) -> str:\n if not repl_ctx.introspect_types:\n return 'Object'\n assert repl_ctx.typenames\n return repl_ctx.typenames.get(o.__tid__, 'Object')\n\n\[email protected]\ndef _link(\n o: edgedb.Link,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer,\n) -> None:\n with buf.foldable_lines():\n buf.write(_object_name(o.target, repl_ctx), style.tree_node)\n buf.write(' {', style.tree_node)\n buf.folded_space()\n with buf.indent():\n pointers = o.__dir__()\n pointers = tuple(ptr for ptr in pointers\n if ptr not in {'source', 'target'})\n pointers_len = len(pointers)\n\n non_empty = _object_guts(\n o.target, repl_ctx, buf,\n include_id_when_empty=pointers_len == 0)\n\n if pointers_len > 0:\n if non_empty:\n buf.write(',')\n buf.mark_line_break()\n\n i = 0\n for name in pointers:\n val = getattr(o, name)\n\n buf.write(f'@{name}', style.code_tag)\n buf.write(': ')\n walk(val, repl_ctx, buf)\n non_empty = True\n\n i += 1\n if i < pointers_len:\n buf.write(',')\n buf.mark_line_break()\n\n if non_empty:\n buf.folded_space()\n buf.write('}', style.tree_node)\n\n\[email protected]\ndef _object(\n o: edgedb.Object,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n with buf.foldable_lines():\n buf.write(_object_name(o, repl_ctx), style.tree_node)\n buf.write(' {', style.tree_node)\n buf.folded_space()\n with buf.indent():\n non_empty = _object_guts(\n o, repl_ctx, buf, include_id_when_empty=True)\n if non_empty:\n buf.folded_space()\n buf.write('}', style.tree_node)\n\n\[email protected]\ndef _namedtuple(\n o: edgedb.NamedTuple,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n with buf.foldable_lines():\n buf.write('(', style.bracket)\n with buf.indent():\n # Call __dir__ directly as dir() scrambles the order.\n for idx, name in enumerate(o.__dir__()):\n val = getattr(o, name)\n\n buf.write(name)\n buf.write(' := ')\n walk(val, repl_ctx, buf)\n\n if idx < (len(o) - 1):\n buf.write(',')\n buf.mark_line_break()\n buf.write(')', style.bracket)\n\n\[email protected](edgedb.Array)\[email protected](edgedb.Tuple)\[email protected](edgedb.Set)\[email protected](edgedb.LinkSet)\ndef _set(\n o: Union[edgedb.Array, edgedb.Tuple, edgedb.Set, edgedb.LinkSet],\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if isinstance(o, edgedb.Array):\n begin, end = '[', ']'\n elif isinstance(o, edgedb.Tuple):\n begin, end = '(', ')'\n else:\n begin, end = '{', '}'\n\n with buf.foldable_lines():\n buf.write(begin, style.bracket)\n with buf.indent():\n for idx, el in enumerate(o):\n walk(el, repl_ctx, buf)\n if idx < (len(o) - 1):\n buf.write(',')\n buf.mark_line_break()\n buf.write(end, style.bracket)\n\n\[email protected]\ndef _uuid(\n o: uuid.UUID,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(f'<uuid>{repr(str(o))}', style.code_comment)\n\n\[email protected](int)\[email protected](float)\ndef _numeric(\n o: Union[int, float],\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(str(o), style.code_number)\n\n\[email protected]\ndef _str(\n o: str,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if \"'\" in o:\n rs = '\"' + o.replace('\"', r'\\\"') + '\"'\n else:\n rs = \"'\" + o.replace(\"'\", r\"\\'\") + \"'\"\n buf.write(rs, style.code_string)\n\n\[email protected]\ndef _bytes(\n o: bytes,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(repr(o), style.code_string)\n\n\[email protected]\ndef _bool(\n o: bool,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(str(o).lower(), style.code_constant)\n\n\[email protected]\ndef _decimal(\n o: decimal.Decimal,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(f'{o}n', style.code_number)\n\n\[email protected]\ndef _empty(\n o: None,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write('{}', style.bracket)\n\n\ndef _datetime(\n o: datetime.datetime,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if o.tzinfo:\n buf.write(\"<datetime>\", style.code_comment)\n else:\n buf.write(\"<local_datetime>\", style.code_comment)\n\n buf.write(repr(o.isoformat()), style.code_string)\n\n\[email protected]\ndef _date(\n o: datetime.date,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(\"<local_date>\", style.code_comment)\n buf.write(repr(o.isoformat()), style.code_string)\n\n\[email protected]\ndef _time(\n o: datetime.time,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(\"<local_time>\", style.code_comment)\n buf.write(repr(o.isoformat()), style.code_string)\n\n\[email protected]\ndef _duration(\n o: datetime.timedelta,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(\"<duration>\", style.code_comment)\n buf.write(repr(str(o)), style.code_string)\n\n\[email protected]\ndef _enum(\n o: edgedb.EnumValue,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if not repl_ctx.introspect_types:\n typename = 'enum'\n else:\n assert repl_ctx.typenames\n typename = repl_ctx.typenames.get(o.__tid__, 'enum')\n\n buf.write(f\"<{typename}>\", style.code_comment)\n buf.write(f\"'{o}'\", style.code_string)\n", "path": "edb/repl/render_binary.py"}], "after_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\nfrom typing import * # NoQA\n\nimport datetime\nimport decimal\nimport functools\nimport uuid\n\nimport edgedb\nfrom edgedb import introspect\n\nfrom edb.common.markup.renderers import terminal\nfrom edb.common.markup.renderers import styles\n\nfrom . import context\n\n\nstyle = styles.Dark256\n\n\[email protected]\ndef walk(\n o: Any,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n # The default renderer. Shouldn't be ever called,\n # but if for some reason we haven't defined a renderer\n # for some edgedb type it's better to render something\n # than crash.\n buf.write(str(o))\n\n\ndef _object_guts(\n o: edgedb.Object,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer,\n *,\n include_id_when_empty: bool\n) -> bool:\n pointers = introspect.introspect_object(o).pointers\n if not repl_ctx.show_implicit_fields:\n pointers = tuple(ptr for ptr in pointers if not ptr.implicit)\n pointers_len = len(pointers)\n\n pointers_rendered = 0\n for ptr in pointers:\n buf.write(ptr.name, style.key)\n buf.write(': ')\n\n if ptr.kind is introspect.PointerKind.LINK:\n link = o[ptr.name]\n walk(link, repl_ctx, buf)\n else:\n val = getattr(o, ptr.name)\n walk(val, repl_ctx, buf)\n\n pointers_rendered += 1\n if pointers_rendered < pointers_len:\n buf.write(',')\n buf.mark_line_break()\n\n if pointers_rendered == 0 and include_id_when_empty:\n buf.write('id', style.key)\n buf.write(': ')\n walk(o.id, repl_ctx, buf)\n pointers_rendered = 1\n\n return pointers_rendered > 0\n\n\ndef _object_name(o: edgedb.Object, repl_ctx: context.ReplContext) -> str:\n if not repl_ctx.introspect_types:\n return 'Object'\n assert repl_ctx.typenames\n return repl_ctx.typenames.get(o.__tid__, 'Object')\n\n\[email protected]\ndef _link(\n o: edgedb.Link,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer,\n) -> None:\n with buf.foldable_lines():\n buf.write(_object_name(o.target, repl_ctx), style.tree_node)\n buf.write(' {', style.tree_node)\n buf.folded_space()\n with buf.indent():\n pointers = o.__dir__()\n pointers = tuple(ptr for ptr in pointers\n if ptr not in {'source', 'target'})\n pointers_len = len(pointers)\n\n non_empty = _object_guts(\n o.target, repl_ctx, buf,\n include_id_when_empty=pointers_len == 0)\n\n if pointers_len > 0:\n if non_empty:\n buf.write(',')\n buf.mark_line_break()\n\n i = 0\n for name in pointers:\n val = getattr(o, name)\n\n buf.write(f'@{name}', style.code_tag)\n buf.write(': ')\n walk(val, repl_ctx, buf)\n non_empty = True\n\n i += 1\n if i < pointers_len:\n buf.write(',')\n buf.mark_line_break()\n\n if non_empty:\n buf.folded_space()\n buf.write('}', style.tree_node)\n\n\[email protected]\ndef _object(\n o: edgedb.Object,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n with buf.foldable_lines():\n buf.write(_object_name(o, repl_ctx), style.tree_node)\n buf.write(' {', style.tree_node)\n buf.folded_space()\n with buf.indent():\n non_empty = _object_guts(\n o, repl_ctx, buf, include_id_when_empty=True)\n if non_empty:\n buf.folded_space()\n buf.write('}', style.tree_node)\n\n\[email protected]\ndef _namedtuple(\n o: edgedb.NamedTuple,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n with buf.foldable_lines():\n buf.write('(', style.bracket)\n with buf.indent():\n # Call __dir__ directly as dir() scrambles the order.\n for idx, name in enumerate(o.__dir__()):\n val = getattr(o, name)\n\n buf.write(name)\n buf.write(' := ')\n walk(val, repl_ctx, buf)\n\n if idx < (len(o) - 1):\n buf.write(',')\n buf.mark_line_break()\n buf.write(')', style.bracket)\n\n\[email protected](edgedb.Array)\[email protected](edgedb.Tuple)\[email protected](edgedb.Set)\[email protected](edgedb.LinkSet)\ndef _set(\n o: Union[edgedb.Array, edgedb.Tuple, edgedb.Set, edgedb.LinkSet],\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if isinstance(o, edgedb.Array):\n begin, end = '[', ']'\n elif isinstance(o, edgedb.Tuple):\n begin, end = '(', ')'\n else:\n begin, end = '{', '}'\n\n with buf.foldable_lines():\n buf.write(begin, style.bracket)\n with buf.indent():\n for idx, el in enumerate(o):\n walk(el, repl_ctx, buf)\n if idx < (len(o) - 1):\n buf.write(',')\n buf.mark_line_break()\n buf.write(end, style.bracket)\n\n\[email protected]\ndef _uuid(\n o: uuid.UUID,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(f'<uuid>{repr(str(o))}', style.code_comment)\n\n\[email protected](int)\[email protected](float)\ndef _numeric(\n o: Union[int, float],\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(str(o), style.code_number)\n\n\[email protected]\ndef _str(\n o: str,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if \"'\" in o:\n rs = '\"' + o.replace('\"', r'\\\"') + '\"'\n else:\n rs = \"'\" + o.replace(\"'\", r\"\\'\") + \"'\"\n buf.write(rs, style.code_string)\n\n\[email protected]\ndef _bytes(\n o: bytes,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(repr(o), style.code_string)\n\n\[email protected]\ndef _bool(\n o: bool,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(str(o).lower(), style.code_constant)\n\n\[email protected]\ndef _decimal(\n o: decimal.Decimal,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(f'{o}n', style.code_number)\n\n\[email protected]\ndef _empty(\n o: None,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write('{}', style.bracket)\n\n\[email protected]\ndef _datetime(\n o: datetime.datetime,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if o.tzinfo:\n buf.write(\"<datetime>\", style.code_comment)\n else:\n buf.write(\"<local_datetime>\", style.code_comment)\n\n buf.write(repr(o.isoformat()), style.code_string)\n\n\[email protected]\ndef _date(\n o: datetime.date,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(\"<local_date>\", style.code_comment)\n buf.write(repr(o.isoformat()), style.code_string)\n\n\[email protected]\ndef _time(\n o: datetime.time,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(\"<local_time>\", style.code_comment)\n buf.write(repr(o.isoformat()), style.code_string)\n\n\[email protected]\ndef _duration(\n o: datetime.timedelta,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(\"<duration>\", style.code_comment)\n buf.write(repr(str(o)), style.code_string)\n\n\[email protected]\ndef _enum(\n o: edgedb.EnumValue,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if not repl_ctx.introspect_types:\n typename = 'enum'\n else:\n assert repl_ctx.typenames\n typename = repl_ctx.typenames.get(o.__tid__, 'enum')\n\n buf.write(f\"<{typename}>\", style.code_comment)\n buf.write(f\"'{o}'\", style.code_string)\n", "path": "edb/repl/render_binary.py"}]} | 3,403 | 88 |
gh_patches_debug_57605 | rasdani/github-patches | git_diff | mdn__kuma-7036 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
T - Add opt-in checkbox for marketing email to account creation page.
**Acceptance Criteria**
- [ ] Add opt-in checkbox for marketing email to account creation page.
- [ ] Upon acct creation opt-in/out value is stored in our database for the given user
**Design comp**
- see comp [https://app.zenhub.com/workspaces/mdn-dev-5d6ea9fb3fe11605a63cea86/issues/mdn/sprints/313$](https://app.zenhub.com/workspaces/mdn-dev-5d6ea9fb3fe11605a63cea86/issues/mdn/sprints/3134)
T - Add opt-in checkbox for marketing email to the Edit Profile Page.
**Acceptance Criteria**
- [ ] Add opt-in checkbox for marketing email to the Edit Profile Page
- [ ] Saves user's selection (opt-in/out) to the database
- [ ] Stub out functionality: add user to correct email group in sendinblue (*feedback needed from Kadir)
- [ ] Update 'Publish' button to read 'Save', see comps
- [ ] Update 'Delete Account' to be a button, see comps
- [ ] This functionality should be behind a waffle flag allowing us to turn on once sendinblue is official
**Comps**
[https://app.zenhub.com/workspaces/mdn-dev-5d6ea9fb3fe11605a63cea86/issues/mdn/sprints/3136](https://app.zenhub.com/workspaces/mdn-dev-5d6ea9fb3fe11605a63cea86/issues/mdn/sprints/3136)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/users/forms.py`
Content:
```
1 from django import forms
2 from django.conf import settings
3 from django.utils.translation import gettext_lazy as _
4 from sundial.forms import TimezoneChoiceField
5 from sundial.zones import COMMON_GROUPED_CHOICES
6
7
8 from .constants import USERNAME_CHARACTERS, USERNAME_LEGACY_REGEX, USERNAME_REGEX
9 from .models import User
10 from .tasks import send_recovery_email
11
12
13 class UserBanForm(forms.Form):
14 """
15 The form used in the view that enables admins to ban users.
16 """
17
18 reason = forms.CharField(widget=forms.Textarea)
19
20
21 class UserEditForm(forms.ModelForm):
22 """
23 The main form to edit user profile data.
24
25 It dynamically adds a bunch of fields for maintaining information
26 about a user's websites
27 """
28
29 timezone = TimezoneChoiceField(
30 label=_("Timezone"),
31 initial=settings.TIME_ZONE,
32 choices=COMMON_GROUPED_CHOICES,
33 required=False,
34 )
35 beta = forms.BooleanField(label=_("Beta tester"), required=False,)
36 username = forms.RegexField(
37 label=_("Username"),
38 regex=USERNAME_REGEX,
39 max_length=30,
40 required=False,
41 error_messages={"invalid": USERNAME_CHARACTERS},
42 )
43 twitter_url = forms.CharField(
44 label=_("Twitter"),
45 required=False,
46 validators=[User.WEBSITE_VALIDATORS["twitter"]],
47 widget=forms.TextInput(
48 attrs={
49 "placeholder": "https://twitter.com/",
50 "data-fa-icon": "icon-twitter",
51 }
52 ),
53 )
54 github_url = forms.CharField(
55 label=_("GitHub"),
56 required=False,
57 validators=[User.WEBSITE_VALIDATORS["github"]],
58 widget=forms.TextInput(
59 attrs={"placeholder": "https://github.com/", "data-fa-icon": "icon-github"}
60 ),
61 )
62 is_github_url_public = forms.BooleanField(
63 label=_("Display a link to GitHub on my profile"), required=False
64 )
65 stackoverflow_url = forms.CharField(
66 label=_("Stack Overflow"),
67 required=False,
68 validators=[User.WEBSITE_VALIDATORS["stackoverflow"]],
69 widget=forms.TextInput(
70 attrs={
71 "placeholder": "https://stackoverflow.com/users/",
72 "data-fa-icon": "icon-stackexchange",
73 }
74 ),
75 )
76 linkedin_url = forms.CharField(
77 label=_("LinkedIn"),
78 required=False,
79 validators=[User.WEBSITE_VALIDATORS["linkedin"]],
80 widget=forms.TextInput(
81 attrs={
82 "placeholder": "https://www.linkedin.com/",
83 "data-fa-icon": "icon-linkedin",
84 }
85 ),
86 )
87 mozillians_url = forms.CharField(
88 label=_("Mozillians"),
89 required=False,
90 validators=[User.WEBSITE_VALIDATORS["mozillians"]],
91 widget=forms.TextInput(
92 attrs={
93 "placeholder": "https://mozillians.org/u/",
94 "data-fa-icon": "icon-group",
95 }
96 ),
97 )
98 facebook_url = forms.CharField(
99 label=_("Facebook"),
100 required=False,
101 validators=[User.WEBSITE_VALIDATORS["facebook"]],
102 widget=forms.TextInput(
103 attrs={
104 "placeholder": "https://www.facebook.com/",
105 "data-fa-icon": "icon-facebook",
106 }
107 ),
108 )
109 discourse_url = forms.CharField(
110 label=_("Discourse"),
111 required=False,
112 validators=[User.WEBSITE_VALIDATORS["discourse"]],
113 widget=forms.TextInput(
114 attrs={
115 "placeholder": "https://discourse.mozilla.org/u/",
116 "data-fa-icon": "icon-discourse",
117 }
118 ),
119 )
120
121 class Meta:
122 model = User
123 fields = (
124 "fullname",
125 "title",
126 "organization",
127 "location",
128 "locale",
129 "timezone",
130 "irc_nickname",
131 "twitter_url",
132 "github_url",
133 "is_github_url_public",
134 "stackoverflow_url",
135 "linkedin_url",
136 "mozillians_url",
137 "facebook_url",
138 "discourse_url",
139 "username",
140 )
141
142 def __init__(self, *args, **kwargs):
143 super(UserEditForm, self).__init__(*args, **kwargs)
144 # in case the username is not changed and the user has a legacy
145 # username we want to disarm the username regex
146 if (
147 "username" not in self.changed_data
148 and self.instance
149 and self.instance.has_legacy_username
150 ):
151 self.fields["username"].regex = USERNAME_LEGACY_REGEX
152
153 def clean_username(self):
154 new_username = self.cleaned_data["username"]
155
156 if not new_username:
157 raise forms.ValidationError(_("This field cannot be blank."))
158
159 if (
160 self.instance is not None
161 and User.objects.exclude(pk=self.instance.pk)
162 .filter(username=new_username)
163 .exists()
164 ):
165 raise forms.ValidationError(_("Username already in use."))
166 return new_username
167
168
169 class UserRecoveryEmailForm(forms.Form):
170 """
171 Send email(s) with an account recovery link.
172
173 Modeled after django.contrib.auth.forms.PasswordResetForm
174 """
175
176 email = forms.EmailField(label=_("Email"), max_length=254)
177
178 def save(self, request):
179 """
180 Send email(s) with an account recovery link.
181 """
182 email = self.cleaned_data["email"]
183
184 # Gather matching active users
185 active_users = User.objects.filter(is_active=True)
186 # Users using email as the primary contact email
187 primary_users = active_users.filter(email__iexact=email)
188 # Users with a matching Persona account
189 personas = active_users.filter(
190 socialaccount__uid__iexact=email, socialaccount__provider="persona"
191 )
192 # Users with that confirmed email
193 confirmed = active_users.filter(emailaddress__email__iexact=email)
194
195 # Send one account recovery email to each matching user
196 user_pks = set()
197 user_pks.update(primary_users.values_list("pk", flat=True))
198 user_pks.update(personas.values_list("pk", flat=True))
199 user_pks.update(confirmed.values_list("pk", flat=True))
200 for user_pk in sorted(user_pks):
201 # TODO figure out why this isn't a .delay() call.
202 # https://bugzilla.mozilla.org/show_bug.cgi?id=1544925
203 send_recovery_email(user_pk, email, request.LANGUAGE_CODE)
204
205
206 class UserDeleteForm(forms.Form):
207 def __init__(self, *args, username, **kwargs):
208 super().__init__(*args, **kwargs)
209
210 choices = {
211 "keep": (
212 _("Keep my attribution for my page changes (%(username)s)")
213 % {"username": username}
214 ),
215 "donate": _('Switch my attribution for my page changes to "Anonymous"'),
216 }
217
218 self.fields["attributions"].choices = choices.items()
219
220 attributions = forms.ChoiceField(required=True, widget=forms.widgets.RadioSelect())
221
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/users/forms.py b/kuma/users/forms.py
--- a/kuma/users/forms.py
+++ b/kuma/users/forms.py
@@ -117,6 +117,7 @@
}
),
)
+ is_newsletter_subscribed = forms.BooleanField(required=False)
class Meta:
model = User
@@ -137,6 +138,7 @@
"facebook_url",
"discourse_url",
"username",
+ "is_newsletter_subscribed",
)
def __init__(self, *args, **kwargs):
| {"golden_diff": "diff --git a/kuma/users/forms.py b/kuma/users/forms.py\n--- a/kuma/users/forms.py\n+++ b/kuma/users/forms.py\n@@ -117,6 +117,7 @@\n }\n ),\n )\n+ is_newsletter_subscribed = forms.BooleanField(required=False)\n \n class Meta:\n model = User\n@@ -137,6 +138,7 @@\n \"facebook_url\",\n \"discourse_url\",\n \"username\",\n+ \"is_newsletter_subscribed\",\n )\n \n def __init__(self, *args, **kwargs):\n", "issue": "T - Add opt-in checkbox for marketing email to account creation page. \n**Acceptance Criteria**\r\n\r\n- [ ] Add opt-in checkbox for marketing email to account creation page. \r\n- [ ] Upon acct creation opt-in/out value is stored in our database for the given user\r\n\r\n**Design comp**\r\n- see comp [https://app.zenhub.com/workspaces/mdn-dev-5d6ea9fb3fe11605a63cea86/issues/mdn/sprints/313$](https://app.zenhub.com/workspaces/mdn-dev-5d6ea9fb3fe11605a63cea86/issues/mdn/sprints/3134)\nT - Add opt-in checkbox for marketing email to the Edit Profile Page. \n**Acceptance Criteria**\r\n\r\n- [ ] Add opt-in checkbox for marketing email to the Edit Profile Page\r\n- [ ] Saves user's selection (opt-in/out) to the database\r\n- [ ] Stub out functionality: add user to correct email group in sendinblue (*feedback needed from Kadir)\r\n- [ ] Update 'Publish' button to read 'Save', see comps\r\n- [ ] Update 'Delete Account' to be a button, see comps\r\n- [ ] This functionality should be behind a waffle flag allowing us to turn on once sendinblue is official\r\n\r\n**Comps**\r\n[https://app.zenhub.com/workspaces/mdn-dev-5d6ea9fb3fe11605a63cea86/issues/mdn/sprints/3136](https://app.zenhub.com/workspaces/mdn-dev-5d6ea9fb3fe11605a63cea86/issues/mdn/sprints/3136)\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy as _\nfrom sundial.forms import TimezoneChoiceField\nfrom sundial.zones import COMMON_GROUPED_CHOICES\n\n\nfrom .constants import USERNAME_CHARACTERS, USERNAME_LEGACY_REGEX, USERNAME_REGEX\nfrom .models import User\nfrom .tasks import send_recovery_email\n\n\nclass UserBanForm(forms.Form):\n \"\"\"\n The form used in the view that enables admins to ban users.\n \"\"\"\n\n reason = forms.CharField(widget=forms.Textarea)\n\n\nclass UserEditForm(forms.ModelForm):\n \"\"\"\n The main form to edit user profile data.\n\n It dynamically adds a bunch of fields for maintaining information\n about a user's websites\n \"\"\"\n\n timezone = TimezoneChoiceField(\n label=_(\"Timezone\"),\n initial=settings.TIME_ZONE,\n choices=COMMON_GROUPED_CHOICES,\n required=False,\n )\n beta = forms.BooleanField(label=_(\"Beta tester\"), required=False,)\n username = forms.RegexField(\n label=_(\"Username\"),\n regex=USERNAME_REGEX,\n max_length=30,\n required=False,\n error_messages={\"invalid\": USERNAME_CHARACTERS},\n )\n twitter_url = forms.CharField(\n label=_(\"Twitter\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"twitter\"]],\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"https://twitter.com/\",\n \"data-fa-icon\": \"icon-twitter\",\n }\n ),\n )\n github_url = forms.CharField(\n label=_(\"GitHub\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"github\"]],\n widget=forms.TextInput(\n attrs={\"placeholder\": \"https://github.com/\", \"data-fa-icon\": \"icon-github\"}\n ),\n )\n is_github_url_public = forms.BooleanField(\n label=_(\"Display a link to GitHub on my profile\"), required=False\n )\n stackoverflow_url = forms.CharField(\n label=_(\"Stack Overflow\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"stackoverflow\"]],\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"https://stackoverflow.com/users/\",\n \"data-fa-icon\": \"icon-stackexchange\",\n }\n ),\n )\n linkedin_url = forms.CharField(\n label=_(\"LinkedIn\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"linkedin\"]],\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"https://www.linkedin.com/\",\n \"data-fa-icon\": \"icon-linkedin\",\n }\n ),\n )\n mozillians_url = forms.CharField(\n label=_(\"Mozillians\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"mozillians\"]],\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"https://mozillians.org/u/\",\n \"data-fa-icon\": \"icon-group\",\n }\n ),\n )\n facebook_url = forms.CharField(\n label=_(\"Facebook\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"facebook\"]],\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"https://www.facebook.com/\",\n \"data-fa-icon\": \"icon-facebook\",\n }\n ),\n )\n discourse_url = forms.CharField(\n label=_(\"Discourse\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"discourse\"]],\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"https://discourse.mozilla.org/u/\",\n \"data-fa-icon\": \"icon-discourse\",\n }\n ),\n )\n\n class Meta:\n model = User\n fields = (\n \"fullname\",\n \"title\",\n \"organization\",\n \"location\",\n \"locale\",\n \"timezone\",\n \"irc_nickname\",\n \"twitter_url\",\n \"github_url\",\n \"is_github_url_public\",\n \"stackoverflow_url\",\n \"linkedin_url\",\n \"mozillians_url\",\n \"facebook_url\",\n \"discourse_url\",\n \"username\",\n )\n\n def __init__(self, *args, **kwargs):\n super(UserEditForm, self).__init__(*args, **kwargs)\n # in case the username is not changed and the user has a legacy\n # username we want to disarm the username regex\n if (\n \"username\" not in self.changed_data\n and self.instance\n and self.instance.has_legacy_username\n ):\n self.fields[\"username\"].regex = USERNAME_LEGACY_REGEX\n\n def clean_username(self):\n new_username = self.cleaned_data[\"username\"]\n\n if not new_username:\n raise forms.ValidationError(_(\"This field cannot be blank.\"))\n\n if (\n self.instance is not None\n and User.objects.exclude(pk=self.instance.pk)\n .filter(username=new_username)\n .exists()\n ):\n raise forms.ValidationError(_(\"Username already in use.\"))\n return new_username\n\n\nclass UserRecoveryEmailForm(forms.Form):\n \"\"\"\n Send email(s) with an account recovery link.\n\n Modeled after django.contrib.auth.forms.PasswordResetForm\n \"\"\"\n\n email = forms.EmailField(label=_(\"Email\"), max_length=254)\n\n def save(self, request):\n \"\"\"\n Send email(s) with an account recovery link.\n \"\"\"\n email = self.cleaned_data[\"email\"]\n\n # Gather matching active users\n active_users = User.objects.filter(is_active=True)\n # Users using email as the primary contact email\n primary_users = active_users.filter(email__iexact=email)\n # Users with a matching Persona account\n personas = active_users.filter(\n socialaccount__uid__iexact=email, socialaccount__provider=\"persona\"\n )\n # Users with that confirmed email\n confirmed = active_users.filter(emailaddress__email__iexact=email)\n\n # Send one account recovery email to each matching user\n user_pks = set()\n user_pks.update(primary_users.values_list(\"pk\", flat=True))\n user_pks.update(personas.values_list(\"pk\", flat=True))\n user_pks.update(confirmed.values_list(\"pk\", flat=True))\n for user_pk in sorted(user_pks):\n # TODO figure out why this isn't a .delay() call.\n # https://bugzilla.mozilla.org/show_bug.cgi?id=1544925\n send_recovery_email(user_pk, email, request.LANGUAGE_CODE)\n\n\nclass UserDeleteForm(forms.Form):\n def __init__(self, *args, username, **kwargs):\n super().__init__(*args, **kwargs)\n\n choices = {\n \"keep\": (\n _(\"Keep my attribution for my page changes (%(username)s)\")\n % {\"username\": username}\n ),\n \"donate\": _('Switch my attribution for my page changes to \"Anonymous\"'),\n }\n\n self.fields[\"attributions\"].choices = choices.items()\n\n attributions = forms.ChoiceField(required=True, widget=forms.widgets.RadioSelect())\n", "path": "kuma/users/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy as _\nfrom sundial.forms import TimezoneChoiceField\nfrom sundial.zones import COMMON_GROUPED_CHOICES\n\n\nfrom .constants import USERNAME_CHARACTERS, USERNAME_LEGACY_REGEX, USERNAME_REGEX\nfrom .models import User\nfrom .tasks import send_recovery_email\n\n\nclass UserBanForm(forms.Form):\n \"\"\"\n The form used in the view that enables admins to ban users.\n \"\"\"\n\n reason = forms.CharField(widget=forms.Textarea)\n\n\nclass UserEditForm(forms.ModelForm):\n \"\"\"\n The main form to edit user profile data.\n\n It dynamically adds a bunch of fields for maintaining information\n about a user's websites\n \"\"\"\n\n timezone = TimezoneChoiceField(\n label=_(\"Timezone\"),\n initial=settings.TIME_ZONE,\n choices=COMMON_GROUPED_CHOICES,\n required=False,\n )\n beta = forms.BooleanField(label=_(\"Beta tester\"), required=False,)\n username = forms.RegexField(\n label=_(\"Username\"),\n regex=USERNAME_REGEX,\n max_length=30,\n required=False,\n error_messages={\"invalid\": USERNAME_CHARACTERS},\n )\n twitter_url = forms.CharField(\n label=_(\"Twitter\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"twitter\"]],\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"https://twitter.com/\",\n \"data-fa-icon\": \"icon-twitter\",\n }\n ),\n )\n github_url = forms.CharField(\n label=_(\"GitHub\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"github\"]],\n widget=forms.TextInput(\n attrs={\"placeholder\": \"https://github.com/\", \"data-fa-icon\": \"icon-github\"}\n ),\n )\n is_github_url_public = forms.BooleanField(\n label=_(\"Display a link to GitHub on my profile\"), required=False\n )\n stackoverflow_url = forms.CharField(\n label=_(\"Stack Overflow\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"stackoverflow\"]],\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"https://stackoverflow.com/users/\",\n \"data-fa-icon\": \"icon-stackexchange\",\n }\n ),\n )\n linkedin_url = forms.CharField(\n label=_(\"LinkedIn\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"linkedin\"]],\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"https://www.linkedin.com/\",\n \"data-fa-icon\": \"icon-linkedin\",\n }\n ),\n )\n mozillians_url = forms.CharField(\n label=_(\"Mozillians\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"mozillians\"]],\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"https://mozillians.org/u/\",\n \"data-fa-icon\": \"icon-group\",\n }\n ),\n )\n facebook_url = forms.CharField(\n label=_(\"Facebook\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"facebook\"]],\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"https://www.facebook.com/\",\n \"data-fa-icon\": \"icon-facebook\",\n }\n ),\n )\n discourse_url = forms.CharField(\n label=_(\"Discourse\"),\n required=False,\n validators=[User.WEBSITE_VALIDATORS[\"discourse\"]],\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"https://discourse.mozilla.org/u/\",\n \"data-fa-icon\": \"icon-discourse\",\n }\n ),\n )\n is_newsletter_subscribed = forms.BooleanField(required=False)\n\n class Meta:\n model = User\n fields = (\n \"fullname\",\n \"title\",\n \"organization\",\n \"location\",\n \"locale\",\n \"timezone\",\n \"irc_nickname\",\n \"twitter_url\",\n \"github_url\",\n \"is_github_url_public\",\n \"stackoverflow_url\",\n \"linkedin_url\",\n \"mozillians_url\",\n \"facebook_url\",\n \"discourse_url\",\n \"username\",\n \"is_newsletter_subscribed\",\n )\n\n def __init__(self, *args, **kwargs):\n super(UserEditForm, self).__init__(*args, **kwargs)\n # in case the username is not changed and the user has a legacy\n # username we want to disarm the username regex\n if (\n \"username\" not in self.changed_data\n and self.instance\n and self.instance.has_legacy_username\n ):\n self.fields[\"username\"].regex = USERNAME_LEGACY_REGEX\n\n def clean_username(self):\n new_username = self.cleaned_data[\"username\"]\n\n if not new_username:\n raise forms.ValidationError(_(\"This field cannot be blank.\"))\n\n if (\n self.instance is not None\n and User.objects.exclude(pk=self.instance.pk)\n .filter(username=new_username)\n .exists()\n ):\n raise forms.ValidationError(_(\"Username already in use.\"))\n return new_username\n\n\nclass UserRecoveryEmailForm(forms.Form):\n \"\"\"\n Send email(s) with an account recovery link.\n\n Modeled after django.contrib.auth.forms.PasswordResetForm\n \"\"\"\n\n email = forms.EmailField(label=_(\"Email\"), max_length=254)\n\n def save(self, request):\n \"\"\"\n Send email(s) with an account recovery link.\n \"\"\"\n email = self.cleaned_data[\"email\"]\n\n # Gather matching active users\n active_users = User.objects.filter(is_active=True)\n # Users using email as the primary contact email\n primary_users = active_users.filter(email__iexact=email)\n # Users with a matching Persona account\n personas = active_users.filter(\n socialaccount__uid__iexact=email, socialaccount__provider=\"persona\"\n )\n # Users with that confirmed email\n confirmed = active_users.filter(emailaddress__email__iexact=email)\n\n # Send one account recovery email to each matching user\n user_pks = set()\n user_pks.update(primary_users.values_list(\"pk\", flat=True))\n user_pks.update(personas.values_list(\"pk\", flat=True))\n user_pks.update(confirmed.values_list(\"pk\", flat=True))\n for user_pk in sorted(user_pks):\n # TODO figure out why this isn't a .delay() call.\n # https://bugzilla.mozilla.org/show_bug.cgi?id=1544925\n send_recovery_email(user_pk, email, request.LANGUAGE_CODE)\n\n\nclass UserDeleteForm(forms.Form):\n def __init__(self, *args, username, **kwargs):\n super().__init__(*args, **kwargs)\n\n choices = {\n \"keep\": (\n _(\"Keep my attribution for my page changes (%(username)s)\")\n % {\"username\": username}\n ),\n \"donate\": _('Switch my attribution for my page changes to \"Anonymous\"'),\n }\n\n self.fields[\"attributions\"].choices = choices.items()\n\n attributions = forms.ChoiceField(required=True, widget=forms.widgets.RadioSelect())\n", "path": "kuma/users/forms.py"}]} | 2,639 | 129 |
gh_patches_debug_32186 | rasdani/github-patches | git_diff | great-expectations__great_expectations-4511 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/expectations/regex_based_column_map_expectation_template.py`
Content:
```
1 """
2 This is a template for creating custom RegexBasedColumnMapExpectations.
3 For detailed instructions on how to use it, please see:
4 https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_regex_based_column_map_expectations
5 """
6
7 from typing import Dict, Optional
8
9 from great_expectations.core.expectation_configuration import ExpectationConfiguration
10 from great_expectations.exceptions.exceptions import (
11 InvalidExpectationConfigurationError,
12 )
13 from great_expectations.expectations.regex_based_column_map_expectation import (
14 RegexBasedColumnMapExpectation,
15 RegexColumnMapMetricProvider,
16 )
17
18
19 # <snippet>
20 # This class defines the Expectation itself
21 class ExpectColumnValuesToMatchSomeRegex(RegexBasedColumnMapExpectation):
22 """TODO: Add a docstring here"""
23
24 # These values will be used to configure the metric created by your expectation
25 regex_snake_name = "regex_name"
26 regex_camel_name = "RegexName"
27 regex = "regex pattern"
28 semantic_type_name_plural = None
29
30 # These examples will be shown in the public gallery.
31 # They will also be executed as unit tests for your Expectation.
32 examples = []
33
34 # Here your regex is used to create a custom metric for this expectation
35 map_metric = RegexBasedColumnMapExpectation.register_metric(
36 regex_snake_name=regex_snake_name,
37 regex_camel_name=regex_camel_name,
38 regex_=regex,
39 )
40
41 # This object contains metadata for display in the public Gallery
42 library_metadata = {
43 "tags": [], # Tags for this Expectation in the Gallery
44 "contributors": [ # Github handles for all contributors to this Expectation.
45 "@your_name_here", # Don't forget to add your github handle here!
46 ],
47 }
48
49
50 # </snippet>
51 if __name__ == "__main__":
52 ExpectColumnValuesToMatchSomeRegex().print_diagnostic_checklist()
53
```
Path: `great_expectations/expectations/regex_based_column_map_expectation.py`
Content:
```
1 import json
2 import logging
3 from abc import ABC
4 from typing import Optional
5
6 from great_expectations.core.expectation_configuration import ExpectationConfiguration
7 from great_expectations.exceptions.exceptions import (
8 InvalidExpectationConfigurationError,
9 )
10 from great_expectations.execution_engine import (
11 PandasExecutionEngine,
12 SparkDFExecutionEngine,
13 SqlAlchemyExecutionEngine,
14 )
15 from great_expectations.expectations.expectation import (
16 ColumnMapExpectation,
17 ExpectationConfiguration,
18 )
19 from great_expectations.expectations.metrics.map_metric_provider import (
20 ColumnMapMetricProvider,
21 column_condition_partial,
22 )
23 from great_expectations.expectations.metrics.util import get_dialect_regex_expression
24 from great_expectations.expectations.util import render_evaluation_parameter_string
25 from great_expectations.render.renderer.renderer import renderer
26 from great_expectations.render.types import RenderedStringTemplateContent
27 from great_expectations.render.util import (
28 handle_strict_min_max,
29 parse_row_condition_string_pandas_engine,
30 substitute_none_for_missing,
31 )
32
33 logger = logging.getLogger(__name__)
34
35
36 class RegexColumnMapMetricProvider(ColumnMapMetricProvider):
37 condition_value_keys = ()
38
39 @column_condition_partial(engine=PandasExecutionEngine)
40 def _pandas(cls, column, **kwargs):
41 return column.astype(str).str.contains(cls.regex)
42
43 @column_condition_partial(engine=SqlAlchemyExecutionEngine)
44 def _sqlalchemy(cls, column, _dialect, **kwargs):
45 regex_expression = get_dialect_regex_expression(column, cls.regex, _dialect)
46
47 if regex_expression is None:
48 logger.warning(
49 "Regex is not supported for dialect %s" % str(_dialect.dialect.name)
50 )
51 raise NotImplementedError
52
53 return regex_expression
54
55 @column_condition_partial(engine=SparkDFExecutionEngine)
56 def _spark(cls, column, **kwargs):
57 return column.rlike(cls.regex)
58
59
60 class RegexBasedColumnMapExpectation(ColumnMapExpectation, ABC):
61 @staticmethod
62 def register_metric(
63 regex_snake_name: str,
64 regex_camel_name: str,
65 regex_: str,
66 ):
67 map_metric = "column_values.match_" + regex_snake_name + "_regex"
68
69 # Define the class using `type`. This allows us to name it dynamically.
70 new_column_regex_metric_provider = type(
71 f"(ColumnValuesMatch{regex_camel_name}Regex",
72 (RegexColumnMapMetricProvider,),
73 {
74 "condition_metric_name": map_metric,
75 "regex": regex_,
76 },
77 )
78
79 return map_metric
80
81 def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
82 super().validate_configuration(configuration)
83 try:
84 assert (
85 getattr(self, "regex", None) is not None
86 ), "regex is required for RegexBasedColumnMap Expectations"
87 assert (
88 "column" in configuration.kwargs
89 ), "'column' parameter is required for column map expectations"
90 if "mostly" in configuration.kwargs:
91 mostly = configuration.kwargs["mostly"]
92 assert isinstance(
93 mostly, (int, float)
94 ), "'mostly' parameter must be an integer or float"
95 assert 0 <= mostly <= 1, "'mostly' parameter must be between 0 and 1"
96 except AssertionError as e:
97 raise InvalidExpectationConfigurationError(str(e))
98 return True
99
100 # question, descriptive, prescriptive, diagnostic
101 @classmethod
102 @renderer(renderer_type="renderer.question")
103 def _question_renderer(
104 cls, configuration, result=None, language=None, runtime_configuration=None
105 ):
106 column = configuration.kwargs.get("column")
107 mostly = configuration.kwargs.get("mostly")
108 regex = getattr(cls, "regex")
109 semantic_type_name_plural = getattr(cls, "semantic_type_name_plural", None)
110
111 if mostly == 1 or mostly is None:
112 if semantic_type_name_plural is not None:
113 return f'Are all values in column "{column}" valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}?'
114 else:
115 return f'Do all values in column "{column}" match the regular expression {regex}?'
116 else:
117 if semantic_type_name_plural is not None:
118 return f'Are at least {mostly * 100}% of values in column "{column}" valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}?'
119 else:
120 return f'Do at least {mostly * 100}% of values in column "{column}" match the regular expression {regex}?'
121
122 @classmethod
123 @renderer(renderer_type="renderer.answer")
124 def _answer_renderer(
125 cls, configuration=None, result=None, language=None, runtime_configuration=None
126 ):
127 column = result.expectation_config.kwargs.get("column")
128 mostly = result.expectation_config.kwargs.get("mostly")
129 regex = result.expectation_config.kwargs.get("regex")
130 semantic_type_name_plural = configuration.kwargs.get(
131 "semantic_type_name_plural"
132 )
133
134 if result.success:
135 if mostly == 1 or mostly is None:
136 if semantic_type_name_plural is not None:
137 return f'All values in column "{column}" are valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}.'
138 else:
139 return f'All values in column "{column}" match the regular expression {regex}.'
140 else:
141 if semantic_type_name_plural is not None:
142 return f'At least {mostly * 100}% of values in column "{column}" are valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}.'
143 else:
144 return f'At least {mostly * 100}% of values in column "{column}" match the regular expression {regex}.'
145 else:
146 if semantic_type_name_plural is not None:
147 return f' Less than {mostly * 100}% of values in column "{column}" are valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}.'
148 else:
149 return f'Less than {mostly * 100}% of values in column "{column}" match the regular expression {regex}.'
150
151 @classmethod
152 def _atomic_prescriptive_template(
153 cls,
154 configuration=None,
155 result=None,
156 language=None,
157 runtime_configuration=None,
158 **kwargs,
159 ):
160 runtime_configuration = runtime_configuration or {}
161 include_column_name = runtime_configuration.get("include_column_name", True)
162 include_column_name = (
163 include_column_name if include_column_name is not None else True
164 )
165 styling = runtime_configuration.get("styling")
166 params = substitute_none_for_missing(
167 configuration.kwargs,
168 ["column", "regex", "mostly", "row_condition", "condition_parser"],
169 )
170 params_with_json_schema = {
171 "column": {"schema": {"type": "string"}, "value": params.get("column")},
172 "mostly": {"schema": {"type": "number"}, "value": params.get("mostly")},
173 "mostly_pct": {
174 "schema": {"type": "number"},
175 "value": params.get("mostly_pct"),
176 },
177 "regex": {"schema": {"type": "string"}, "value": params.get("regex")},
178 "row_condition": {
179 "schema": {"type": "string"},
180 "value": params.get("row_condition"),
181 },
182 "condition_parser": {
183 "schema": {"type": "string"},
184 "value": params.get("condition_parser"),
185 },
186 }
187
188 if not params.get("regex"):
189 template_str = (
190 "values must match a regular expression but none was specified."
191 )
192 else:
193 template_str = "values must match this regular expression: $regex"
194 if params["mostly"] is not None:
195 params_with_json_schema["mostly_pct"]["value"] = num_to_str(
196 params["mostly"] * 100, precision=15, no_scientific=True
197 )
198 template_str += ", at least $mostly_pct % of the time."
199 else:
200 template_str += "."
201
202 if include_column_name:
203 template_str = "$column " + template_str
204
205 if params["row_condition"] is not None:
206 (
207 conditional_template_str,
208 conditional_params,
209 ) = parse_row_condition_string_pandas_engine(
210 params["row_condition"], with_schema=True
211 )
212 template_str = conditional_template_str + ", then " + template_str
213 params_with_json_schema.update(conditional_params)
214
215 return (template_str, params_with_json_schema, styling)
216
217 @classmethod
218 @renderer(renderer_type="renderer.prescriptive")
219 @render_evaluation_parameter_string
220 def _prescriptive_renderer(
221 cls,
222 configuration=None,
223 result=None,
224 language=None,
225 runtime_configuration=None,
226 **kwargs,
227 ):
228 runtime_configuration = runtime_configuration or {}
229 include_column_name = runtime_configuration.get("include_column_name", True)
230 include_column_name = (
231 include_column_name if include_column_name is not None else True
232 )
233 styling = runtime_configuration.get("styling")
234 params = substitute_none_for_missing(
235 configuration.kwargs,
236 ["column", "regex", "mostly", "row_condition", "condition_parser"],
237 )
238
239 if not params.get("regex"):
240 template_str = (
241 "values must match a regular expression but none was specified."
242 )
243 else:
244 template_str = "values must match this regular expression: $regex"
245 if params["mostly"] is not None:
246 params["mostly_pct"] = num_to_str(
247 params["mostly"] * 100, precision=15, no_scientific=True
248 )
249 # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
250 template_str += ", at least $mostly_pct % of the time."
251 else:
252 template_str += "."
253
254 if include_column_name:
255 template_str = "$column " + template_str
256
257 if params["row_condition"] is not None:
258 (
259 conditional_template_str,
260 conditional_params,
261 ) = parse_row_condition_string_pandas_engine(params["row_condition"])
262 template_str = conditional_template_str + ", then " + template_str
263 params.update(conditional_params)
264
265 params_with_json_schema = {
266 "column": {"schema": {"type": "string"}, "value": params.get("column")},
267 "mostly": {"schema": {"type": "number"}, "value": params.get("mostly")},
268 "mostly_pct": {
269 "schema": {"type": "number"},
270 "value": params.get("mostly_pct"),
271 },
272 "regex": {"schema": {"type": "string"}, "value": params.get("regex")},
273 "row_condition": {
274 "schema": {"type": "string"},
275 "value": params.get("row_condition"),
276 },
277 "condition_parser": {
278 "schema": {"type": "string"},
279 "value": params.get("condition_parser"),
280 },
281 }
282
283 return [
284 RenderedStringTemplateContent(
285 **{
286 "content_block_type": "string_template",
287 "string_template": {
288 "template": template_str,
289 "params": params,
290 "styling": styling,
291 },
292 }
293 )
294 ]
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/expectations/regex_based_column_map_expectation_template.py b/examples/expectations/regex_based_column_map_expectation_template.py
--- a/examples/expectations/regex_based_column_map_expectation_template.py
+++ b/examples/expectations/regex_based_column_map_expectation_template.py
@@ -22,7 +22,6 @@
"""TODO: Add a docstring here"""
# These values will be used to configure the metric created by your expectation
- regex_snake_name = "regex_name"
regex_camel_name = "RegexName"
regex = "regex pattern"
semantic_type_name_plural = None
@@ -33,7 +32,6 @@
# Here your regex is used to create a custom metric for this expectation
map_metric = RegexBasedColumnMapExpectation.register_metric(
- regex_snake_name=regex_snake_name,
regex_camel_name=regex_camel_name,
regex_=regex,
)
diff --git a/great_expectations/expectations/regex_based_column_map_expectation.py b/great_expectations/expectations/regex_based_column_map_expectation.py
--- a/great_expectations/expectations/regex_based_column_map_expectation.py
+++ b/great_expectations/expectations/regex_based_column_map_expectation.py
@@ -29,6 +29,7 @@
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
+from great_expectations.util import camel_to_snake
logger = logging.getLogger(__name__)
@@ -60,10 +61,10 @@
class RegexBasedColumnMapExpectation(ColumnMapExpectation, ABC):
@staticmethod
def register_metric(
- regex_snake_name: str,
regex_camel_name: str,
regex_: str,
):
+ regex_snake_name = camel_to_snake(regex_camel_name)
map_metric = "column_values.match_" + regex_snake_name + "_regex"
# Define the class using `type`. This allows us to name it dynamically.
| {"golden_diff": "diff --git a/examples/expectations/regex_based_column_map_expectation_template.py b/examples/expectations/regex_based_column_map_expectation_template.py\n--- a/examples/expectations/regex_based_column_map_expectation_template.py\n+++ b/examples/expectations/regex_based_column_map_expectation_template.py\n@@ -22,7 +22,6 @@\n \"\"\"TODO: Add a docstring here\"\"\"\n \n # These values will be used to configure the metric created by your expectation\n- regex_snake_name = \"regex_name\"\n regex_camel_name = \"RegexName\"\n regex = \"regex pattern\"\n semantic_type_name_plural = None\n@@ -33,7 +32,6 @@\n \n # Here your regex is used to create a custom metric for this expectation\n map_metric = RegexBasedColumnMapExpectation.register_metric(\n- regex_snake_name=regex_snake_name,\n regex_camel_name=regex_camel_name,\n regex_=regex,\n )\ndiff --git a/great_expectations/expectations/regex_based_column_map_expectation.py b/great_expectations/expectations/regex_based_column_map_expectation.py\n--- a/great_expectations/expectations/regex_based_column_map_expectation.py\n+++ b/great_expectations/expectations/regex_based_column_map_expectation.py\n@@ -29,6 +29,7 @@\n parse_row_condition_string_pandas_engine,\n substitute_none_for_missing,\n )\n+from great_expectations.util import camel_to_snake\n \n logger = logging.getLogger(__name__)\n \n@@ -60,10 +61,10 @@\n class RegexBasedColumnMapExpectation(ColumnMapExpectation, ABC):\n @staticmethod\n def register_metric(\n- regex_snake_name: str,\n regex_camel_name: str,\n regex_: str,\n ):\n+ regex_snake_name = camel_to_snake(regex_camel_name)\n map_metric = \"column_values.match_\" + regex_snake_name + \"_regex\"\n \n # Define the class using `type`. This allows us to name it dynamically.\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "\"\"\"\nThis is a template for creating custom RegexBasedColumnMapExpectations.\nFor detailed instructions on how to use it, please see:\n https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_regex_based_column_map_expectations\n\"\"\"\n\nfrom typing import Dict, Optional\n\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.exceptions.exceptions import (\n InvalidExpectationConfigurationError,\n)\nfrom great_expectations.expectations.regex_based_column_map_expectation import (\n RegexBasedColumnMapExpectation,\n RegexColumnMapMetricProvider,\n)\n\n\n# <snippet>\n# This class defines the Expectation itself\nclass ExpectColumnValuesToMatchSomeRegex(RegexBasedColumnMapExpectation):\n \"\"\"TODO: Add a docstring here\"\"\"\n\n # These values will be used to configure the metric created by your expectation\n regex_snake_name = \"regex_name\"\n regex_camel_name = \"RegexName\"\n regex = \"regex pattern\"\n semantic_type_name_plural = None\n\n # These examples will be shown in the public gallery.\n # They will also be executed as unit tests for your Expectation.\n examples = []\n\n # Here your regex is used to create a custom metric for this expectation\n map_metric = RegexBasedColumnMapExpectation.register_metric(\n regex_snake_name=regex_snake_name,\n regex_camel_name=regex_camel_name,\n regex_=regex,\n )\n\n # This object contains metadata for display in the public Gallery\n library_metadata = {\n \"tags\": [], # Tags for this Expectation in the Gallery\n \"contributors\": [ # Github handles for all contributors to this Expectation.\n \"@your_name_here\", # Don't forget to add your github handle here!\n ],\n }\n\n\n# </snippet>\nif __name__ == \"__main__\":\n ExpectColumnValuesToMatchSomeRegex().print_diagnostic_checklist()\n", "path": "examples/expectations/regex_based_column_map_expectation_template.py"}, {"content": "import json\nimport logging\nfrom abc import ABC\nfrom typing import Optional\n\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.exceptions.exceptions import (\n InvalidExpectationConfigurationError,\n)\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.expectation import (\n ColumnMapExpectation,\n ExpectationConfiguration,\n)\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n ColumnMapMetricProvider,\n column_condition_partial,\n)\nfrom great_expectations.expectations.metrics.util import get_dialect_regex_expression\nfrom great_expectations.expectations.util import render_evaluation_parameter_string\nfrom great_expectations.render.renderer.renderer import renderer\nfrom great_expectations.render.types import RenderedStringTemplateContent\nfrom great_expectations.render.util import (\n handle_strict_min_max,\n parse_row_condition_string_pandas_engine,\n substitute_none_for_missing,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass RegexColumnMapMetricProvider(ColumnMapMetricProvider):\n condition_value_keys = ()\n\n @column_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column, **kwargs):\n return column.astype(str).str.contains(cls.regex)\n\n @column_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column, _dialect, **kwargs):\n regex_expression = get_dialect_regex_expression(column, cls.regex, _dialect)\n\n if regex_expression is None:\n logger.warning(\n \"Regex is not supported for dialect %s\" % str(_dialect.dialect.name)\n )\n raise NotImplementedError\n\n return regex_expression\n\n @column_condition_partial(engine=SparkDFExecutionEngine)\n def _spark(cls, column, **kwargs):\n return column.rlike(cls.regex)\n\n\nclass RegexBasedColumnMapExpectation(ColumnMapExpectation, ABC):\n @staticmethod\n def register_metric(\n regex_snake_name: str,\n regex_camel_name: str,\n regex_: str,\n ):\n map_metric = \"column_values.match_\" + regex_snake_name + \"_regex\"\n\n # Define the class using `type`. This allows us to name it dynamically.\n new_column_regex_metric_provider = type(\n f\"(ColumnValuesMatch{regex_camel_name}Regex\",\n (RegexColumnMapMetricProvider,),\n {\n \"condition_metric_name\": map_metric,\n \"regex\": regex_,\n },\n )\n\n return map_metric\n\n def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):\n super().validate_configuration(configuration)\n try:\n assert (\n getattr(self, \"regex\", None) is not None\n ), \"regex is required for RegexBasedColumnMap Expectations\"\n assert (\n \"column\" in configuration.kwargs\n ), \"'column' parameter is required for column map expectations\"\n if \"mostly\" in configuration.kwargs:\n mostly = configuration.kwargs[\"mostly\"]\n assert isinstance(\n mostly, (int, float)\n ), \"'mostly' parameter must be an integer or float\"\n assert 0 <= mostly <= 1, \"'mostly' parameter must be between 0 and 1\"\n except AssertionError as e:\n raise InvalidExpectationConfigurationError(str(e))\n return True\n\n # question, descriptive, prescriptive, diagnostic\n @classmethod\n @renderer(renderer_type=\"renderer.question\")\n def _question_renderer(\n cls, configuration, result=None, language=None, runtime_configuration=None\n ):\n column = configuration.kwargs.get(\"column\")\n mostly = configuration.kwargs.get(\"mostly\")\n regex = getattr(cls, \"regex\")\n semantic_type_name_plural = getattr(cls, \"semantic_type_name_plural\", None)\n\n if mostly == 1 or mostly is None:\n if semantic_type_name_plural is not None:\n return f'Are all values in column \"{column}\" valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}?'\n else:\n return f'Do all values in column \"{column}\" match the regular expression {regex}?'\n else:\n if semantic_type_name_plural is not None:\n return f'Are at least {mostly * 100}% of values in column \"{column}\" valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}?'\n else:\n return f'Do at least {mostly * 100}% of values in column \"{column}\" match the regular expression {regex}?'\n\n @classmethod\n @renderer(renderer_type=\"renderer.answer\")\n def _answer_renderer(\n cls, configuration=None, result=None, language=None, runtime_configuration=None\n ):\n column = result.expectation_config.kwargs.get(\"column\")\n mostly = result.expectation_config.kwargs.get(\"mostly\")\n regex = result.expectation_config.kwargs.get(\"regex\")\n semantic_type_name_plural = configuration.kwargs.get(\n \"semantic_type_name_plural\"\n )\n\n if result.success:\n if mostly == 1 or mostly is None:\n if semantic_type_name_plural is not None:\n return f'All values in column \"{column}\" are valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}.'\n else:\n return f'All values in column \"{column}\" match the regular expression {regex}.'\n else:\n if semantic_type_name_plural is not None:\n return f'At least {mostly * 100}% of values in column \"{column}\" are valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}.'\n else:\n return f'At least {mostly * 100}% of values in column \"{column}\" match the regular expression {regex}.'\n else:\n if semantic_type_name_plural is not None:\n return f' Less than {mostly * 100}% of values in column \"{column}\" are valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}.'\n else:\n return f'Less than {mostly * 100}% of values in column \"{column}\" match the regular expression {regex}.'\n\n @classmethod\n def _atomic_prescriptive_template(\n cls,\n configuration=None,\n result=None,\n language=None,\n runtime_configuration=None,\n **kwargs,\n ):\n runtime_configuration = runtime_configuration or {}\n include_column_name = runtime_configuration.get(\"include_column_name\", True)\n include_column_name = (\n include_column_name if include_column_name is not None else True\n )\n styling = runtime_configuration.get(\"styling\")\n params = substitute_none_for_missing(\n configuration.kwargs,\n [\"column\", \"regex\", \"mostly\", \"row_condition\", \"condition_parser\"],\n )\n params_with_json_schema = {\n \"column\": {\"schema\": {\"type\": \"string\"}, \"value\": params.get(\"column\")},\n \"mostly\": {\"schema\": {\"type\": \"number\"}, \"value\": params.get(\"mostly\")},\n \"mostly_pct\": {\n \"schema\": {\"type\": \"number\"},\n \"value\": params.get(\"mostly_pct\"),\n },\n \"regex\": {\"schema\": {\"type\": \"string\"}, \"value\": params.get(\"regex\")},\n \"row_condition\": {\n \"schema\": {\"type\": \"string\"},\n \"value\": params.get(\"row_condition\"),\n },\n \"condition_parser\": {\n \"schema\": {\"type\": \"string\"},\n \"value\": params.get(\"condition_parser\"),\n },\n }\n\n if not params.get(\"regex\"):\n template_str = (\n \"values must match a regular expression but none was specified.\"\n )\n else:\n template_str = \"values must match this regular expression: $regex\"\n if params[\"mostly\"] is not None:\n params_with_json_schema[\"mostly_pct\"][\"value\"] = num_to_str(\n params[\"mostly\"] * 100, precision=15, no_scientific=True\n )\n template_str += \", at least $mostly_pct % of the time.\"\n else:\n template_str += \".\"\n\n if include_column_name:\n template_str = \"$column \" + template_str\n\n if params[\"row_condition\"] is not None:\n (\n conditional_template_str,\n conditional_params,\n ) = parse_row_condition_string_pandas_engine(\n params[\"row_condition\"], with_schema=True\n )\n template_str = conditional_template_str + \", then \" + template_str\n params_with_json_schema.update(conditional_params)\n\n return (template_str, params_with_json_schema, styling)\n\n @classmethod\n @renderer(renderer_type=\"renderer.prescriptive\")\n @render_evaluation_parameter_string\n def _prescriptive_renderer(\n cls,\n configuration=None,\n result=None,\n language=None,\n runtime_configuration=None,\n **kwargs,\n ):\n runtime_configuration = runtime_configuration or {}\n include_column_name = runtime_configuration.get(\"include_column_name\", True)\n include_column_name = (\n include_column_name if include_column_name is not None else True\n )\n styling = runtime_configuration.get(\"styling\")\n params = substitute_none_for_missing(\n configuration.kwargs,\n [\"column\", \"regex\", \"mostly\", \"row_condition\", \"condition_parser\"],\n )\n\n if not params.get(\"regex\"):\n template_str = (\n \"values must match a regular expression but none was specified.\"\n )\n else:\n template_str = \"values must match this regular expression: $regex\"\n if params[\"mostly\"] is not None:\n params[\"mostly_pct\"] = num_to_str(\n params[\"mostly\"] * 100, precision=15, no_scientific=True\n )\n # params[\"mostly_pct\"] = \"{:.14f}\".format(params[\"mostly\"]*100).rstrip(\"0\").rstrip(\".\")\n template_str += \", at least $mostly_pct % of the time.\"\n else:\n template_str += \".\"\n\n if include_column_name:\n template_str = \"$column \" + template_str\n\n if params[\"row_condition\"] is not None:\n (\n conditional_template_str,\n conditional_params,\n ) = parse_row_condition_string_pandas_engine(params[\"row_condition\"])\n template_str = conditional_template_str + \", then \" + template_str\n params.update(conditional_params)\n\n params_with_json_schema = {\n \"column\": {\"schema\": {\"type\": \"string\"}, \"value\": params.get(\"column\")},\n \"mostly\": {\"schema\": {\"type\": \"number\"}, \"value\": params.get(\"mostly\")},\n \"mostly_pct\": {\n \"schema\": {\"type\": \"number\"},\n \"value\": params.get(\"mostly_pct\"),\n },\n \"regex\": {\"schema\": {\"type\": \"string\"}, \"value\": params.get(\"regex\")},\n \"row_condition\": {\n \"schema\": {\"type\": \"string\"},\n \"value\": params.get(\"row_condition\"),\n },\n \"condition_parser\": {\n \"schema\": {\"type\": \"string\"},\n \"value\": params.get(\"condition_parser\"),\n },\n }\n\n return [\n RenderedStringTemplateContent(\n **{\n \"content_block_type\": \"string_template\",\n \"string_template\": {\n \"template\": template_str,\n \"params\": params,\n \"styling\": styling,\n },\n }\n )\n ]\n", "path": "great_expectations/expectations/regex_based_column_map_expectation.py"}], "after_files": [{"content": "\"\"\"\nThis is a template for creating custom RegexBasedColumnMapExpectations.\nFor detailed instructions on how to use it, please see:\n https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_regex_based_column_map_expectations\n\"\"\"\n\nfrom typing import Dict, Optional\n\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.exceptions.exceptions import (\n InvalidExpectationConfigurationError,\n)\nfrom great_expectations.expectations.regex_based_column_map_expectation import (\n RegexBasedColumnMapExpectation,\n RegexColumnMapMetricProvider,\n)\n\n\n# <snippet>\n# This class defines the Expectation itself\nclass ExpectColumnValuesToMatchSomeRegex(RegexBasedColumnMapExpectation):\n \"\"\"TODO: Add a docstring here\"\"\"\n\n # These values will be used to configure the metric created by your expectation\n regex_camel_name = \"RegexName\"\n regex = \"regex pattern\"\n semantic_type_name_plural = None\n\n # These examples will be shown in the public gallery.\n # They will also be executed as unit tests for your Expectation.\n examples = []\n\n # Here your regex is used to create a custom metric for this expectation\n map_metric = RegexBasedColumnMapExpectation.register_metric(\n regex_camel_name=regex_camel_name,\n regex_=regex,\n )\n\n # This object contains metadata for display in the public Gallery\n library_metadata = {\n \"tags\": [], # Tags for this Expectation in the Gallery\n \"contributors\": [ # Github handles for all contributors to this Expectation.\n \"@your_name_here\", # Don't forget to add your github handle here!\n ],\n }\n\n\n# </snippet>\nif __name__ == \"__main__\":\n ExpectColumnValuesToMatchSomeRegex().print_diagnostic_checklist()\n", "path": "examples/expectations/regex_based_column_map_expectation_template.py"}, {"content": "import json\nimport logging\nfrom abc import ABC\nfrom typing import Optional\n\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.exceptions.exceptions import (\n InvalidExpectationConfigurationError,\n)\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.expectation import (\n ColumnMapExpectation,\n ExpectationConfiguration,\n)\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n ColumnMapMetricProvider,\n column_condition_partial,\n)\nfrom great_expectations.expectations.metrics.util import get_dialect_regex_expression\nfrom great_expectations.expectations.util import render_evaluation_parameter_string\nfrom great_expectations.render.renderer.renderer import renderer\nfrom great_expectations.render.types import RenderedStringTemplateContent\nfrom great_expectations.render.util import (\n handle_strict_min_max,\n parse_row_condition_string_pandas_engine,\n substitute_none_for_missing,\n)\nfrom great_expectations.util import camel_to_snake\n\nlogger = logging.getLogger(__name__)\n\n\nclass RegexColumnMapMetricProvider(ColumnMapMetricProvider):\n condition_value_keys = ()\n\n @column_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column, **kwargs):\n return column.astype(str).str.contains(cls.regex)\n\n @column_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column, _dialect, **kwargs):\n regex_expression = get_dialect_regex_expression(column, cls.regex, _dialect)\n\n if regex_expression is None:\n logger.warning(\n \"Regex is not supported for dialect %s\" % str(_dialect.dialect.name)\n )\n raise NotImplementedError\n\n return regex_expression\n\n @column_condition_partial(engine=SparkDFExecutionEngine)\n def _spark(cls, column, **kwargs):\n return column.rlike(cls.regex)\n\n\nclass RegexBasedColumnMapExpectation(ColumnMapExpectation, ABC):\n @staticmethod\n def register_metric(\n regex_camel_name: str,\n regex_: str,\n ):\n regex_snake_name = camel_to_snake(regex_camel_name)\n map_metric = \"column_values.match_\" + regex_snake_name + \"_regex\"\n\n # Define the class using `type`. This allows us to name it dynamically.\n new_column_regex_metric_provider = type(\n f\"(ColumnValuesMatch{regex_camel_name}Regex\",\n (RegexColumnMapMetricProvider,),\n {\n \"condition_metric_name\": map_metric,\n \"regex\": regex_,\n },\n )\n\n return map_metric\n\n def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):\n super().validate_configuration(configuration)\n try:\n assert (\n getattr(self, \"regex\", None) is not None\n ), \"regex is required for RegexBasedColumnMap Expectations\"\n assert (\n \"column\" in configuration.kwargs\n ), \"'column' parameter is required for column map expectations\"\n if \"mostly\" in configuration.kwargs:\n mostly = configuration.kwargs[\"mostly\"]\n assert isinstance(\n mostly, (int, float)\n ), \"'mostly' parameter must be an integer or float\"\n assert 0 <= mostly <= 1, \"'mostly' parameter must be between 0 and 1\"\n except AssertionError as e:\n raise InvalidExpectationConfigurationError(str(e))\n return True\n\n # question, descriptive, prescriptive, diagnostic\n @classmethod\n @renderer(renderer_type=\"renderer.question\")\n def _question_renderer(\n cls, configuration, result=None, language=None, runtime_configuration=None\n ):\n column = configuration.kwargs.get(\"column\")\n mostly = configuration.kwargs.get(\"mostly\")\n regex = getattr(cls, \"regex\")\n semantic_type_name_plural = getattr(cls, \"semantic_type_name_plural\", None)\n\n if mostly == 1 or mostly is None:\n if semantic_type_name_plural is not None:\n return f'Are all values in column \"{column}\" valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}?'\n else:\n return f'Do all values in column \"{column}\" match the regular expression {regex}?'\n else:\n if semantic_type_name_plural is not None:\n return f'Are at least {mostly * 100}% of values in column \"{column}\" valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}?'\n else:\n return f'Do at least {mostly * 100}% of values in column \"{column}\" match the regular expression {regex}?'\n\n @classmethod\n @renderer(renderer_type=\"renderer.answer\")\n def _answer_renderer(\n cls, configuration=None, result=None, language=None, runtime_configuration=None\n ):\n column = result.expectation_config.kwargs.get(\"column\")\n mostly = result.expectation_config.kwargs.get(\"mostly\")\n regex = result.expectation_config.kwargs.get(\"regex\")\n semantic_type_name_plural = configuration.kwargs.get(\n \"semantic_type_name_plural\"\n )\n\n if result.success:\n if mostly == 1 or mostly is None:\n if semantic_type_name_plural is not None:\n return f'All values in column \"{column}\" are valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}.'\n else:\n return f'All values in column \"{column}\" match the regular expression {regex}.'\n else:\n if semantic_type_name_plural is not None:\n return f'At least {mostly * 100}% of values in column \"{column}\" are valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}.'\n else:\n return f'At least {mostly * 100}% of values in column \"{column}\" match the regular expression {regex}.'\n else:\n if semantic_type_name_plural is not None:\n return f' Less than {mostly * 100}% of values in column \"{column}\" are valid {semantic_type_name_plural}, as judged by matching the regular expression {regex}.'\n else:\n return f'Less than {mostly * 100}% of values in column \"{column}\" match the regular expression {regex}.'\n\n @classmethod\n def _atomic_prescriptive_template(\n cls,\n configuration=None,\n result=None,\n language=None,\n runtime_configuration=None,\n **kwargs,\n ):\n runtime_configuration = runtime_configuration or {}\n include_column_name = runtime_configuration.get(\"include_column_name\", True)\n include_column_name = (\n include_column_name if include_column_name is not None else True\n )\n styling = runtime_configuration.get(\"styling\")\n params = substitute_none_for_missing(\n configuration.kwargs,\n [\"column\", \"regex\", \"mostly\", \"row_condition\", \"condition_parser\"],\n )\n params_with_json_schema = {\n \"column\": {\"schema\": {\"type\": \"string\"}, \"value\": params.get(\"column\")},\n \"mostly\": {\"schema\": {\"type\": \"number\"}, \"value\": params.get(\"mostly\")},\n \"mostly_pct\": {\n \"schema\": {\"type\": \"number\"},\n \"value\": params.get(\"mostly_pct\"),\n },\n \"regex\": {\"schema\": {\"type\": \"string\"}, \"value\": params.get(\"regex\")},\n \"row_condition\": {\n \"schema\": {\"type\": \"string\"},\n \"value\": params.get(\"row_condition\"),\n },\n \"condition_parser\": {\n \"schema\": {\"type\": \"string\"},\n \"value\": params.get(\"condition_parser\"),\n },\n }\n\n if not params.get(\"regex\"):\n template_str = (\n \"values must match a regular expression but none was specified.\"\n )\n else:\n template_str = \"values must match this regular expression: $regex\"\n if params[\"mostly\"] is not None:\n params_with_json_schema[\"mostly_pct\"][\"value\"] = num_to_str(\n params[\"mostly\"] * 100, precision=15, no_scientific=True\n )\n template_str += \", at least $mostly_pct % of the time.\"\n else:\n template_str += \".\"\n\n if include_column_name:\n template_str = \"$column \" + template_str\n\n if params[\"row_condition\"] is not None:\n (\n conditional_template_str,\n conditional_params,\n ) = parse_row_condition_string_pandas_engine(\n params[\"row_condition\"], with_schema=True\n )\n template_str = conditional_template_str + \", then \" + template_str\n params_with_json_schema.update(conditional_params)\n\n return (template_str, params_with_json_schema, styling)\n\n @classmethod\n @renderer(renderer_type=\"renderer.prescriptive\")\n @render_evaluation_parameter_string\n def _prescriptive_renderer(\n cls,\n configuration=None,\n result=None,\n language=None,\n runtime_configuration=None,\n **kwargs,\n ):\n runtime_configuration = runtime_configuration or {}\n include_column_name = runtime_configuration.get(\"include_column_name\", True)\n include_column_name = (\n include_column_name if include_column_name is not None else True\n )\n styling = runtime_configuration.get(\"styling\")\n params = substitute_none_for_missing(\n configuration.kwargs,\n [\"column\", \"regex\", \"mostly\", \"row_condition\", \"condition_parser\"],\n )\n\n if not params.get(\"regex\"):\n template_str = (\n \"values must match a regular expression but none was specified.\"\n )\n else:\n template_str = \"values must match this regular expression: $regex\"\n if params[\"mostly\"] is not None:\n params[\"mostly_pct\"] = num_to_str(\n params[\"mostly\"] * 100, precision=15, no_scientific=True\n )\n # params[\"mostly_pct\"] = \"{:.14f}\".format(params[\"mostly\"]*100).rstrip(\"0\").rstrip(\".\")\n template_str += \", at least $mostly_pct % of the time.\"\n else:\n template_str += \".\"\n\n if include_column_name:\n template_str = \"$column \" + template_str\n\n if params[\"row_condition\"] is not None:\n (\n conditional_template_str,\n conditional_params,\n ) = parse_row_condition_string_pandas_engine(params[\"row_condition\"])\n template_str = conditional_template_str + \", then \" + template_str\n params.update(conditional_params)\n\n params_with_json_schema = {\n \"column\": {\"schema\": {\"type\": \"string\"}, \"value\": params.get(\"column\")},\n \"mostly\": {\"schema\": {\"type\": \"number\"}, \"value\": params.get(\"mostly\")},\n \"mostly_pct\": {\n \"schema\": {\"type\": \"number\"},\n \"value\": params.get(\"mostly_pct\"),\n },\n \"regex\": {\"schema\": {\"type\": \"string\"}, \"value\": params.get(\"regex\")},\n \"row_condition\": {\n \"schema\": {\"type\": \"string\"},\n \"value\": params.get(\"row_condition\"),\n },\n \"condition_parser\": {\n \"schema\": {\"type\": \"string\"},\n \"value\": params.get(\"condition_parser\"),\n },\n }\n\n return [\n RenderedStringTemplateContent(\n **{\n \"content_block_type\": \"string_template\",\n \"string_template\": {\n \"template\": template_str,\n \"params\": params,\n \"styling\": styling,\n },\n }\n )\n ]\n", "path": "great_expectations/expectations/regex_based_column_map_expectation.py"}]} | 3,987 | 444 |
gh_patches_debug_6233 | rasdani/github-patches | git_diff | WeblateOrg__weblate-6217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Auth provider title and image override do not work for auth providers with '-' in it
**Describe the issue**
The new feature to override the name and image for an auth provider (introduced in 159ae591e91e7c2065ede9c39f14d743f5ab07e5) does not work for auth providers with '-' in them.
<!--
A clear and concise description of the problem you are facing.
-->
**I already tried**
Describe the steps you tried to solve the problem yourself.
- [x] I've read and searched [the docs](https://docs.weblate.org/) and did not find the answer there.
If you didn’t try already, try to search there what you wrote above.
**To Reproduce the issue**
Steps to reproduce the behavior:
1. Add an auth to your settings - e.g. `SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY`
2. Set the title and image override (`SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TITLE` and `SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_IMAGE`)
3. Go to the login page
4. The title and image are not overriden
**Expected behavior**
The title and image should be overridden.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/accounts/templatetags/authnames.py`
Content:
```
1 #
2 # Copyright © 2012 - 2021 Michal Čihař <[email protected]>
3 #
4 # This file is part of Weblate <https://weblate.org/>
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <https://www.gnu.org/licenses/>.
18 #
19 """Provide user friendly names for social authentication methods."""
20
21 from django import template
22 from django.conf import settings
23 from django.contrib.staticfiles.storage import staticfiles_storage
24 from django.utils.safestring import mark_safe
25 from django.utils.translation import gettext_lazy
26
27 register = template.Library()
28
29 SOCIALS = {
30 "auth0": {"name": "Auth0", "image": "auth0.svg"},
31 "saml": {"name": "SAML", "image": "saml.svg"},
32 "google": {"name": "Google", "image": "google.svg"},
33 "google-oauth2": {"name": "Google", "image": "google.svg"},
34 "google-plus": {"name": "Google+", "image": "google.svg"},
35 "password": {"name": gettext_lazy("Password"), "image": "password.svg"},
36 "email": {"name": gettext_lazy("E-mail"), "image": "email.svg"},
37 "ubuntu": {"name": "Ubuntu", "image": "ubuntu.svg"},
38 "opensuse": {"name": "openSUSE", "image": "opensuse.svg"},
39 "fedora": {"name": "Fedora", "image": "fedora.svg"},
40 "facebook": {"name": "Facebook", "image": "facebook.svg"},
41 "github": {"name": "GitHub", "image": "github.svg"},
42 "github-enterprise": {"name": "GitHub Enterprise", "image": "github.svg"},
43 "bitbucket": {"name": "Bitbucket", "image": "bitbucket.svg"},
44 "bitbucket-oauth2": {"name": "Bitbucket", "image": "bitbucket.svg"},
45 "azuread-oauth2": {"name": "Azure", "image": "azure.svg"},
46 "azuread-tenant-oauth2": {"name": "Azure", "image": "azure.svg"},
47 "gitlab": {"name": "GitLab", "image": "gitlab.svg"},
48 "amazon": {"name": "Amazon", "image": "amazon.svg"},
49 "twitter": {"name": "Twitter", "image": "twitter.svg"},
50 "stackoverflow": {"name": "Stack Overflow", "image": "stackoverflow.svg"},
51 }
52
53 IMAGE_SOCIAL_TEMPLATE = """
54 <img class="auth-image" src="{image}" />
55 """
56
57 SOCIAL_TEMPLATE = """
58 {icon}
59 {separator}
60 {name}
61 """
62
63
64 def get_auth_params(auth: str):
65 """Returns authentication parameters."""
66 # Fallback values
67 params = {"name": auth, "image": "password.svg"}
68
69 # Hardcoded names
70 if auth in SOCIALS:
71 params.update(SOCIALS[auth])
72
73 # Settings override
74 settings_params = {
75 "name": f"SOCIAL_AUTH_{auth.upper()}_TITLE",
76 "image": f"SOCIAL_AUTH_{auth.upper()}_IMAGE",
77 }
78 for target, source in settings_params.items():
79 value = getattr(settings, source, None)
80 if value:
81 params[target] = value
82
83 return params
84
85
86 @register.simple_tag
87 def auth_name(auth: str, separator: str = "<br />"):
88 """Create HTML markup for social authentication method."""
89 params = get_auth_params(auth)
90
91 if not params["image"].startswith("http"):
92 params["image"] = staticfiles_storage.url("auth/" + params["image"])
93 params["icon"] = IMAGE_SOCIAL_TEMPLATE.format(separator=separator, **params)
94
95 return mark_safe(SOCIAL_TEMPLATE.format(separator=separator, **params))
96
97
98 def get_auth_name(auth: str):
99 """Get nice name for authentication backend."""
100 return get_auth_params(auth)["name"]
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/weblate/accounts/templatetags/authnames.py b/weblate/accounts/templatetags/authnames.py
--- a/weblate/accounts/templatetags/authnames.py
+++ b/weblate/accounts/templatetags/authnames.py
@@ -72,8 +72,8 @@
# Settings override
settings_params = {
- "name": f"SOCIAL_AUTH_{auth.upper()}_TITLE",
- "image": f"SOCIAL_AUTH_{auth.upper()}_IMAGE",
+ "name": f"SOCIAL_AUTH_{auth.upper().replace('-','_')}_TITLE",
+ "image": f"SOCIAL_AUTH_{auth.upper().replace('-','_')}_IMAGE",
}
for target, source in settings_params.items():
value = getattr(settings, source, None)
| {"golden_diff": "diff --git a/weblate/accounts/templatetags/authnames.py b/weblate/accounts/templatetags/authnames.py\n--- a/weblate/accounts/templatetags/authnames.py\n+++ b/weblate/accounts/templatetags/authnames.py\n@@ -72,8 +72,8 @@\n \n # Settings override\n settings_params = {\n- \"name\": f\"SOCIAL_AUTH_{auth.upper()}_TITLE\",\n- \"image\": f\"SOCIAL_AUTH_{auth.upper()}_IMAGE\",\n+ \"name\": f\"SOCIAL_AUTH_{auth.upper().replace('-','_')}_TITLE\",\n+ \"image\": f\"SOCIAL_AUTH_{auth.upper().replace('-','_')}_IMAGE\",\n }\n for target, source in settings_params.items():\n value = getattr(settings, source, None)\n", "issue": "Auth provider title and image override do not work for auth providers with '-' in it\n**Describe the issue**\r\n\r\nThe new feature to override the name and image for an auth provider (introduced in 159ae591e91e7c2065ede9c39f14d743f5ab07e5) does not work for auth providers with '-' in them. \r\n\r\n<!--\r\nA clear and concise description of the problem you are facing.\r\n-->\r\n\r\n**I already tried**\r\n\r\nDescribe the steps you tried to solve the problem yourself.\r\n\r\n- [x] I've read and searched [the docs](https://docs.weblate.org/) and did not find the answer there.\r\n If you didn\u2019t try already, try to search there what you wrote above.\r\n\r\n**To Reproduce the issue**\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Add an auth to your settings - e.g. `SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY`\r\n2. Set the title and image override (`SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TITLE` and `SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_IMAGE`)\r\n3. Go to the login page\r\n4. The title and image are not overriden\r\n\r\n**Expected behavior**\r\n\r\nThe title and image should be overridden. \r\n\n", "before_files": [{"content": "#\n# Copyright \u00a9 2012 - 2021 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\"\"\"Provide user friendly names for social authentication methods.\"\"\"\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy\n\nregister = template.Library()\n\nSOCIALS = {\n \"auth0\": {\"name\": \"Auth0\", \"image\": \"auth0.svg\"},\n \"saml\": {\"name\": \"SAML\", \"image\": \"saml.svg\"},\n \"google\": {\"name\": \"Google\", \"image\": \"google.svg\"},\n \"google-oauth2\": {\"name\": \"Google\", \"image\": \"google.svg\"},\n \"google-plus\": {\"name\": \"Google+\", \"image\": \"google.svg\"},\n \"password\": {\"name\": gettext_lazy(\"Password\"), \"image\": \"password.svg\"},\n \"email\": {\"name\": gettext_lazy(\"E-mail\"), \"image\": \"email.svg\"},\n \"ubuntu\": {\"name\": \"Ubuntu\", \"image\": \"ubuntu.svg\"},\n \"opensuse\": {\"name\": \"openSUSE\", \"image\": \"opensuse.svg\"},\n \"fedora\": {\"name\": \"Fedora\", \"image\": \"fedora.svg\"},\n \"facebook\": {\"name\": \"Facebook\", \"image\": \"facebook.svg\"},\n \"github\": {\"name\": \"GitHub\", \"image\": \"github.svg\"},\n \"github-enterprise\": {\"name\": \"GitHub Enterprise\", \"image\": \"github.svg\"},\n \"bitbucket\": {\"name\": \"Bitbucket\", \"image\": \"bitbucket.svg\"},\n \"bitbucket-oauth2\": {\"name\": \"Bitbucket\", \"image\": \"bitbucket.svg\"},\n \"azuread-oauth2\": {\"name\": \"Azure\", \"image\": \"azure.svg\"},\n \"azuread-tenant-oauth2\": {\"name\": \"Azure\", \"image\": \"azure.svg\"},\n \"gitlab\": {\"name\": \"GitLab\", \"image\": \"gitlab.svg\"},\n \"amazon\": {\"name\": \"Amazon\", \"image\": \"amazon.svg\"},\n \"twitter\": {\"name\": \"Twitter\", \"image\": \"twitter.svg\"},\n \"stackoverflow\": {\"name\": \"Stack Overflow\", \"image\": \"stackoverflow.svg\"},\n}\n\nIMAGE_SOCIAL_TEMPLATE = \"\"\"\n<img class=\"auth-image\" src=\"{image}\" />\n\"\"\"\n\nSOCIAL_TEMPLATE = \"\"\"\n{icon}\n{separator}\n{name}\n\"\"\"\n\n\ndef get_auth_params(auth: str):\n \"\"\"Returns authentication parameters.\"\"\"\n # Fallback values\n params = {\"name\": auth, \"image\": \"password.svg\"}\n\n # Hardcoded names\n if auth in SOCIALS:\n params.update(SOCIALS[auth])\n\n # Settings override\n settings_params = {\n \"name\": f\"SOCIAL_AUTH_{auth.upper()}_TITLE\",\n \"image\": f\"SOCIAL_AUTH_{auth.upper()}_IMAGE\",\n }\n for target, source in settings_params.items():\n value = getattr(settings, source, None)\n if value:\n params[target] = value\n\n return params\n\n\[email protected]_tag\ndef auth_name(auth: str, separator: str = \"<br />\"):\n \"\"\"Create HTML markup for social authentication method.\"\"\"\n params = get_auth_params(auth)\n\n if not params[\"image\"].startswith(\"http\"):\n params[\"image\"] = staticfiles_storage.url(\"auth/\" + params[\"image\"])\n params[\"icon\"] = IMAGE_SOCIAL_TEMPLATE.format(separator=separator, **params)\n\n return mark_safe(SOCIAL_TEMPLATE.format(separator=separator, **params))\n\n\ndef get_auth_name(auth: str):\n \"\"\"Get nice name for authentication backend.\"\"\"\n return get_auth_params(auth)[\"name\"]\n", "path": "weblate/accounts/templatetags/authnames.py"}], "after_files": [{"content": "#\n# Copyright \u00a9 2012 - 2021 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\"\"\"Provide user friendly names for social authentication methods.\"\"\"\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy\n\nregister = template.Library()\n\nSOCIALS = {\n \"auth0\": {\"name\": \"Auth0\", \"image\": \"auth0.svg\"},\n \"saml\": {\"name\": \"SAML\", \"image\": \"saml.svg\"},\n \"google\": {\"name\": \"Google\", \"image\": \"google.svg\"},\n \"google-oauth2\": {\"name\": \"Google\", \"image\": \"google.svg\"},\n \"google-plus\": {\"name\": \"Google+\", \"image\": \"google.svg\"},\n \"password\": {\"name\": gettext_lazy(\"Password\"), \"image\": \"password.svg\"},\n \"email\": {\"name\": gettext_lazy(\"E-mail\"), \"image\": \"email.svg\"},\n \"ubuntu\": {\"name\": \"Ubuntu\", \"image\": \"ubuntu.svg\"},\n \"opensuse\": {\"name\": \"openSUSE\", \"image\": \"opensuse.svg\"},\n \"fedora\": {\"name\": \"Fedora\", \"image\": \"fedora.svg\"},\n \"facebook\": {\"name\": \"Facebook\", \"image\": \"facebook.svg\"},\n \"github\": {\"name\": \"GitHub\", \"image\": \"github.svg\"},\n \"github-enterprise\": {\"name\": \"GitHub Enterprise\", \"image\": \"github.svg\"},\n \"bitbucket\": {\"name\": \"Bitbucket\", \"image\": \"bitbucket.svg\"},\n \"bitbucket-oauth2\": {\"name\": \"Bitbucket\", \"image\": \"bitbucket.svg\"},\n \"azuread-oauth2\": {\"name\": \"Azure\", \"image\": \"azure.svg\"},\n \"azuread-tenant-oauth2\": {\"name\": \"Azure\", \"image\": \"azure.svg\"},\n \"gitlab\": {\"name\": \"GitLab\", \"image\": \"gitlab.svg\"},\n \"amazon\": {\"name\": \"Amazon\", \"image\": \"amazon.svg\"},\n \"twitter\": {\"name\": \"Twitter\", \"image\": \"twitter.svg\"},\n \"stackoverflow\": {\"name\": \"Stack Overflow\", \"image\": \"stackoverflow.svg\"},\n}\n\nIMAGE_SOCIAL_TEMPLATE = \"\"\"\n<img class=\"auth-image\" src=\"{image}\" />\n\"\"\"\n\nSOCIAL_TEMPLATE = \"\"\"\n{icon}\n{separator}\n{name}\n\"\"\"\n\n\ndef get_auth_params(auth: str):\n \"\"\"Returns authentication parameters.\"\"\"\n # Fallback values\n params = {\"name\": auth, \"image\": \"password.svg\"}\n\n # Hardcoded names\n if auth in SOCIALS:\n params.update(SOCIALS[auth])\n\n # Settings override\n settings_params = {\n \"name\": f\"SOCIAL_AUTH_{auth.upper().replace('-','_')}_TITLE\",\n \"image\": f\"SOCIAL_AUTH_{auth.upper().replace('-','_')}_IMAGE\",\n }\n for target, source in settings_params.items():\n value = getattr(settings, source, None)\n if value:\n params[target] = value\n\n return params\n\n\[email protected]_tag\ndef auth_name(auth: str, separator: str = \"<br />\"):\n \"\"\"Create HTML markup for social authentication method.\"\"\"\n params = get_auth_params(auth)\n\n if not params[\"image\"].startswith(\"http\"):\n params[\"image\"] = staticfiles_storage.url(\"auth/\" + params[\"image\"])\n params[\"icon\"] = IMAGE_SOCIAL_TEMPLATE.format(separator=separator, **params)\n\n return mark_safe(SOCIAL_TEMPLATE.format(separator=separator, **params))\n\n\ndef get_auth_name(auth: str):\n \"\"\"Get nice name for authentication backend.\"\"\"\n return get_auth_params(auth)[\"name\"]\n", "path": "weblate/accounts/templatetags/authnames.py"}]} | 1,719 | 183 |
gh_patches_debug_33535 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-479 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dataset edit: The country functionality should not be in optional and should work as it does on the Dataset Creation page
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_theme/ckanext/hdx_theme/licenses.py`
Content:
```
1 '''
2 Created on May 12, 2014
3
4 @author: alexandru-m-g
5 '''
6
7 from ckan.common import _
8 from ckan.model.license import DefaultLicense
9
10
11 class LicenseCreativeCommonsIntergovernmentalOrgs(DefaultLicense):
12 # domain_content = True
13 # domain_data = True
14 id = "cc-by-igo"
15 is_okd_compliant = False
16 url = "http://creativecommons.org/licenses/by/3.0/igo/legalcode"
17
18 @property
19 def title(self):
20 return _("Creative Commons Attribution for Intergovernmental Organisations")
21
22 class LicenseCreativeCommonsNoDerives(DefaultLicense):
23 # domain_content = True
24 # domain_data = True
25 id = "cc-by-nd"
26 is_okd_compliant = False
27 url = "http://creativecommons.org/licenses/by-nd/3.0/legalcode"
28
29 @property
30 def title(self):
31 return _("Creative Commons Attribution-NoDerives")
32
33 class LicenseOtherPublicDomainNoRestrictions(DefaultLicense):
34 # domain_content = True
35 id = "other-pd-nr"
36 is_generic = True
37 is_okd_compliant = True
38
39 @property
40 def title(self):
41 return _("Public Domain / No Restrictions")
42
43 class LicenseHdxMultiple(DefaultLicense):
44 # domain_content = True
45 id = "hdx-multi"
46 # is_generic = True
47 # is_okd_compliant = True
48
49 @property
50 def title(self):
51 return _("Multiple Licenses")
52
53 class LicenseHdxOther(DefaultLicense):
54 # domain_content = True
55 id = "hdx-other"
56 # is_generic = True
57 # is_okd_compliant = True
58
59 @property
60 def title(self):
61 return _("Other")
62
63
```
Path: `ckanext-hdx_theme/ckanext/hdx_theme/plugin.py`
Content:
```
1 import ckanext.hdx_theme.licenses as hdx_licenses
2
3 import ckan.plugins as plugins
4 import ckan.plugins.toolkit as toolkit
5 import ckan.model.package as package
6 import ckan.model.license as license
7 import version
8
9 import ckanext.hdx_theme.caching as caching
10
11
12
13 def run_on_startup():
14 _generate_license_list()
15
16 caching.cached_get_group_package_stuff()
17
18
19 def _generate_license_list():
20 package.Package._license_register = license.LicenseRegister()
21 package.Package._license_register.licenses = [
22 license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),
23 license.License(license.LicenseCreativeCommonsAttribution()),
24 license.License(license.LicenseCreativeCommonsAttributionShareAlike()),
25 license.License(hdx_licenses.LicenseCreativeCommonsNoDerives()),
26 license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),
27 license.License(hdx_licenses.LicenseHdxMultiple()),
28 license.License(hdx_licenses.LicenseHdxOther())
29 ]
30
31 class HDXThemePlugin(plugins.SingletonPlugin):
32 plugins.implements(plugins.IConfigurer)
33 plugins.implements(plugins.IRoutes, inherit=True)
34 plugins.implements(plugins.ITemplateHelpers)
35 plugins.implements(plugins.IActions)
36 plugins.implements(plugins.IGroupController, inherit=True)
37 plugins.implements(plugins.IMiddleware, inherit=True)
38
39 def update_config(self, config):
40 toolkit.add_template_directory(config, 'templates')
41 toolkit.add_public_directory(config, 'public')
42 toolkit.add_resource('fanstatic', 'hdx_theme')
43
44
45 def before_map(self, map):
46 map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')
47 map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')
48 map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')
49 map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')
50 map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')
51 map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')
52
53 map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')
54
55 return map
56
57 def create(self, entity):
58 caching.invalidate_group_caches()
59
60 def edit(self, entity):
61 caching.invalidate_group_caches()
62
63 def get_helpers(self):
64 from ckanext.hdx_theme import helpers as hdx_helpers
65 return {
66 'is_downloadable': hdx_helpers.is_downloadable,
67 'get_facet_items_dict':hdx_helpers.get_facet_items_dict,
68 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,
69 'get_filtered_params_list':hdx_helpers.get_filtered_params_list,
70 'get_last_revision_package':hdx_helpers.get_last_revision_package,
71 'get_last_modifier_user':hdx_helpers.get_last_modifier_user,
72 'get_last_revision_group':hdx_helpers.get_last_revision_group,
73 'get_group_followers':hdx_helpers.get_group_followers,
74 'get_group_members':hdx_helpers.get_group_members,
75 'markdown_extract_strip':hdx_helpers.markdown_extract_strip,
76 'render_date_from_concat_str':hdx_helpers.render_date_from_concat_str,
77 'hdx_version':hdx_helpers.hdx_version,
78 'hdx_build_nav_icon_with_message':hdx_helpers.hdx_build_nav_icon_with_message,
79 'hdx_num_of_new_related_items':hdx_helpers.hdx_num_of_new_related_items
80 }
81
82 def get_actions(self):
83 from ckanext.hdx_theme import actions as hdx_actions
84 return {
85 'organization_list_for_user':hdx_actions.organization_list_for_user,
86 'cached_group_list': hdx_actions.cached_group_list
87
88 }
89
90 def make_middleware(self, app, config):
91 run_on_startup()
92 return app
93
94
95
96
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py b/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py
@@ -19,16 +19,16 @@
def title(self):
return _("Creative Commons Attribution for Intergovernmental Organisations")
-class LicenseCreativeCommonsNoDerives(DefaultLicense):
+#class LicenseCreativeCommonsNoDerives(DefaultLicense):
# domain_content = True
# domain_data = True
- id = "cc-by-nd"
- is_okd_compliant = False
- url = "http://creativecommons.org/licenses/by-nd/3.0/legalcode"
+# id = "cc-by-nd"
+# is_okd_compliant = False
+# url = "http://creativecommons.org/licenses/by-nd/3.0/legalcode"
- @property
- def title(self):
- return _("Creative Commons Attribution-NoDerives")
+# @property
+# def title(self):
+# return _("Creative Commons Attribution-NoDerives")
class LicenseOtherPublicDomainNoRestrictions(DefaultLicense):
# domain_content = True
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
@@ -21,7 +21,6 @@
license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),
license.License(license.LicenseCreativeCommonsAttribution()),
license.License(license.LicenseCreativeCommonsAttributionShareAlike()),
- license.License(hdx_licenses.LicenseCreativeCommonsNoDerives()),
license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),
license.License(hdx_licenses.LicenseHdxMultiple()),
license.License(hdx_licenses.LicenseHdxOther())
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py b/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py\n@@ -19,16 +19,16 @@\n def title(self):\n return _(\"Creative Commons Attribution for Intergovernmental Organisations\")\n \n-class LicenseCreativeCommonsNoDerives(DefaultLicense):\n+#class LicenseCreativeCommonsNoDerives(DefaultLicense):\n # domain_content = True\n # domain_data = True\n- id = \"cc-by-nd\"\n- is_okd_compliant = False\n- url = \"http://creativecommons.org/licenses/by-nd/3.0/legalcode\"\n+# id = \"cc-by-nd\"\n+# is_okd_compliant = False\n+# url = \"http://creativecommons.org/licenses/by-nd/3.0/legalcode\"\n \n- @property\n- def title(self):\n- return _(\"Creative Commons Attribution-NoDerives\")\n+# @property\n+# def title(self):\n+# return _(\"Creative Commons Attribution-NoDerives\")\n \n class LicenseOtherPublicDomainNoRestrictions(DefaultLicense):\n # domain_content = True\ndiff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n@@ -21,7 +21,6 @@\n license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),\n license.License(license.LicenseCreativeCommonsAttribution()),\n license.License(license.LicenseCreativeCommonsAttributionShareAlike()),\n- license.License(hdx_licenses.LicenseCreativeCommonsNoDerives()),\n license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),\n license.License(hdx_licenses.LicenseHdxMultiple()),\n license.License(hdx_licenses.LicenseHdxOther())\n", "issue": "Dataset edit: The country functionality should not be in optional and should work as it does on the Dataset Creation page\n\n", "before_files": [{"content": "'''\nCreated on May 12, 2014\n\n@author: alexandru-m-g\n'''\n\nfrom ckan.common import _\nfrom ckan.model.license import DefaultLicense\n\n\nclass LicenseCreativeCommonsIntergovernmentalOrgs(DefaultLicense):\n# domain_content = True\n# domain_data = True\n id = \"cc-by-igo\"\n is_okd_compliant = False\n url = \"http://creativecommons.org/licenses/by/3.0/igo/legalcode\"\n\n @property\n def title(self):\n return _(\"Creative Commons Attribution for Intergovernmental Organisations\")\n \nclass LicenseCreativeCommonsNoDerives(DefaultLicense):\n# domain_content = True\n# domain_data = True\n id = \"cc-by-nd\"\n is_okd_compliant = False\n url = \"http://creativecommons.org/licenses/by-nd/3.0/legalcode\"\n\n @property\n def title(self):\n return _(\"Creative Commons Attribution-NoDerives\")\n \nclass LicenseOtherPublicDomainNoRestrictions(DefaultLicense):\n# domain_content = True\n id = \"other-pd-nr\"\n is_generic = True\n is_okd_compliant = True\n\n @property\n def title(self):\n return _(\"Public Domain / No Restrictions\")\n\nclass LicenseHdxMultiple(DefaultLicense):\n# domain_content = True\n id = \"hdx-multi\"\n# is_generic = True\n# is_okd_compliant = True\n\n @property\n def title(self):\n return _(\"Multiple Licenses\")\n\nclass LicenseHdxOther(DefaultLicense):\n# domain_content = True\n id = \"hdx-other\"\n# is_generic = True\n# is_okd_compliant = True\n\n @property\n def title(self):\n return _(\"Other\")\n\n ", "path": "ckanext-hdx_theme/ckanext/hdx_theme/licenses.py"}, {"content": "import ckanext.hdx_theme.licenses as hdx_licenses\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as toolkit\nimport ckan.model.package as package\nimport ckan.model.license as license\nimport version\n\nimport ckanext.hdx_theme.caching as caching\n\n\n\ndef run_on_startup():\n _generate_license_list()\n \n caching.cached_get_group_package_stuff()\n \n\ndef _generate_license_list():\n package.Package._license_register = license.LicenseRegister() \n package.Package._license_register.licenses = [\n license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),\n license.License(license.LicenseCreativeCommonsAttribution()),\n license.License(license.LicenseCreativeCommonsAttributionShareAlike()),\n license.License(hdx_licenses.LicenseCreativeCommonsNoDerives()),\n license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),\n license.License(hdx_licenses.LicenseHdxMultiple()),\n license.License(hdx_licenses.LicenseHdxOther())\n ]\n\nclass HDXThemePlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n plugins.implements(plugins.IGroupController, inherit=True)\n plugins.implements(plugins.IMiddleware, inherit=True)\n \n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'hdx_theme')\n \n\n def before_map(self, map):\n map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')\n map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')\n map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')\n map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')\n map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')\n map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')\n \n map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')\n \n return map\n \n def create(self, entity):\n caching.invalidate_group_caches()\n\n def edit(self, entity):\n caching.invalidate_group_caches()\n\n def get_helpers(self):\n from ckanext.hdx_theme import helpers as hdx_helpers\n return {\n 'is_downloadable': hdx_helpers.is_downloadable,\n 'get_facet_items_dict':hdx_helpers.get_facet_items_dict,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_filtered_params_list':hdx_helpers.get_filtered_params_list,\n 'get_last_revision_package':hdx_helpers.get_last_revision_package,\n 'get_last_modifier_user':hdx_helpers.get_last_modifier_user,\n 'get_last_revision_group':hdx_helpers.get_last_revision_group,\n 'get_group_followers':hdx_helpers.get_group_followers,\n 'get_group_members':hdx_helpers.get_group_members,\n 'markdown_extract_strip':hdx_helpers.markdown_extract_strip,\n 'render_date_from_concat_str':hdx_helpers.render_date_from_concat_str,\n 'hdx_version':hdx_helpers.hdx_version,\n 'hdx_build_nav_icon_with_message':hdx_helpers.hdx_build_nav_icon_with_message,\n 'hdx_num_of_new_related_items':hdx_helpers.hdx_num_of_new_related_items\n }\n \n def get_actions(self):\n from ckanext.hdx_theme import actions as hdx_actions\n return {\n 'organization_list_for_user':hdx_actions.organization_list_for_user, \n 'cached_group_list': hdx_actions.cached_group_list\n \n }\n \n def make_middleware(self, app, config):\n run_on_startup()\n return app\n\n \n \n\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/plugin.py"}], "after_files": [{"content": "'''\nCreated on May 12, 2014\n\n@author: alexandru-m-g\n'''\n\nfrom ckan.common import _\nfrom ckan.model.license import DefaultLicense\n\n\nclass LicenseCreativeCommonsIntergovernmentalOrgs(DefaultLicense):\n# domain_content = True\n# domain_data = True\n id = \"cc-by-igo\"\n is_okd_compliant = False\n url = \"http://creativecommons.org/licenses/by/3.0/igo/legalcode\"\n\n @property\n def title(self):\n return _(\"Creative Commons Attribution for Intergovernmental Organisations\")\n \n#class LicenseCreativeCommonsNoDerives(DefaultLicense):\n# domain_content = True\n# domain_data = True\n# id = \"cc-by-nd\"\n# is_okd_compliant = False\n# url = \"http://creativecommons.org/licenses/by-nd/3.0/legalcode\"\n\n# @property\n# def title(self):\n# return _(\"Creative Commons Attribution-NoDerives\")\n \nclass LicenseOtherPublicDomainNoRestrictions(DefaultLicense):\n# domain_content = True\n id = \"other-pd-nr\"\n is_generic = True\n is_okd_compliant = True\n\n @property\n def title(self):\n return _(\"Public Domain / No Restrictions\")\n\nclass LicenseHdxMultiple(DefaultLicense):\n# domain_content = True\n id = \"hdx-multi\"\n# is_generic = True\n# is_okd_compliant = True\n\n @property\n def title(self):\n return _(\"Multiple Licenses\")\n\nclass LicenseHdxOther(DefaultLicense):\n# domain_content = True\n id = \"hdx-other\"\n# is_generic = True\n# is_okd_compliant = True\n\n @property\n def title(self):\n return _(\"Other\")\n\n ", "path": "ckanext-hdx_theme/ckanext/hdx_theme/licenses.py"}, {"content": "import ckanext.hdx_theme.licenses as hdx_licenses\nfrom beaker.cache import cache_regions\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as toolkit\nimport ckan.model.package as package\nimport ckan.model.license as license\nimport version;\n\ncache_regions.update({\n 'hdx_memory_cache':{\n 'expire': 172800, # 2 days\n 'type':'memory',\n 'key_length': 250\n }\n })\n\ndef _generate_license_list():\n package.Package._license_register = license.LicenseRegister() \n package.Package._license_register.licenses = [\n license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),\n license.License(license.LicenseCreativeCommonsAttribution()),\n license.License(license.LicenseCreativeCommonsAttributionShareAlike()),\n license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),\n license.License(hdx_licenses.LicenseHdxMultiple()),\n license.License(hdx_licenses.LicenseHdxOther())\n ]\n\nclass HDXThemePlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n\n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'hdx_theme')\n\n def before_map(self, map):\n map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')\n map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')\n map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')\n map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')\n map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')\n map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')\n \n map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')\n \n # this is actually a HACK to force the customization of the license list.\n # the license list should be changed to be based on a JSON rest service\n _generate_license_list()\n \n return map\n\n def get_helpers(self):\n from ckanext.hdx_theme import helpers as hdx_helpers\n return {\n 'is_downloadable': hdx_helpers.is_downloadable,\n 'get_facet_items_dict':hdx_helpers.get_facet_items_dict,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_filtered_params_list':hdx_helpers.get_filtered_params_list,\n 'get_last_revision_package':hdx_helpers.get_last_revision_package,\n 'get_last_modifier_user':hdx_helpers.get_last_modifier_user,\n 'get_last_revision_group':hdx_helpers.get_last_revision_group,\n 'get_group_followers':hdx_helpers.get_group_followers,\n 'get_group_members':hdx_helpers.get_group_members,\n 'markdown_extract_strip':hdx_helpers.markdown_extract_strip,\n 'render_date_from_concat_str':hdx_helpers.render_date_from_concat_str,\n 'hdx_version':hdx_helpers.hdx_version,\n 'hdx_build_nav_icon_with_message':hdx_helpers.hdx_build_nav_icon_with_message,\n 'hdx_num_of_new_related_items':hdx_helpers.hdx_num_of_new_related_items\n }\n \n def get_actions(self):\n from ckanext.hdx_theme import actions as hdx_actions\n return {\n 'organization_list_for_user':hdx_actions.organization_list_for_user\n }\n \n \n\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/plugin.py"}]} | 1,920 | 493 |
gh_patches_debug_18558 | rasdani/github-patches | git_diff | litestar-org__litestar-2592 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: Caching route handlers with the same path but different methods leads to overwriting of cache
### Description
Caching route handlers with the same path (`/` in this example) but different methods (`GET` and `POST` in this example) leads to overwriting of cache. The comments above the assert statements illustrate the current vs expected behavior.
This is related to https://github.com/litestar-org/litestar/issues/2573 and is not restricted to just `OPTIONS` and `GET`
### URL to code causing the issue
_No response_
### MCVE
```python
from litestar import get, post
from litestar.testing import create_test_client
@get(cache=10)
async def something() -> str:
return "text"
@post(cache=10)
async def something_post() -> str:
return "text_post"
with create_test_client([something, something_post]) as client:
response = client.post("")
assert response.status_code == 201
assert response.text == "text_post"
# these shouldn't pass, but they do
response = client.get("")
assert response.status_code == 201
assert response.text == "text_post"
# these should pass, but they don't
response = client.get("")
assert response.status_code == 200
assert response.text == "text"
```
### Steps to reproduce
```bash
1. Run the code
2. The second set of asserts should fail, the third set of asserts should pass
```
### Screenshots
```bash
""
```
### Logs
_No response_
### Litestar Version
GitHub - main (fd06486e2ad4ed0a41636659fec4f093a09e3dd0) as of creating this issue
### Platform
- [ ] Linux
- [X] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/2588">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2588/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2588/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/config/response_cache.py`
Content:
```
1 from __future__ import annotations
2
3 from dataclasses import dataclass, field
4 from typing import TYPE_CHECKING, Any, Callable, final
5 from urllib.parse import urlencode
6
7 from litestar.status_codes import (
8 HTTP_200_OK,
9 HTTP_300_MULTIPLE_CHOICES,
10 HTTP_301_MOVED_PERMANENTLY,
11 HTTP_308_PERMANENT_REDIRECT,
12 )
13
14 if TYPE_CHECKING:
15 from litestar import Litestar
16 from litestar.connection import Request
17 from litestar.stores.base import Store
18 from litestar.types import CacheKeyBuilder, HTTPScope
19
20 __all__ = ("ResponseCacheConfig", "default_cache_key_builder", "CACHE_FOREVER")
21
22
23 @final
24 class CACHE_FOREVER: # noqa: N801
25 """Sentinel value indicating that a cached response should be stored without an expiration, explicitly skipping the
26 default expiration
27 """
28
29
30 def default_cache_key_builder(request: Request[Any, Any, Any]) -> str:
31 """Given a request object, returns a cache key by combining the path with the sorted query params.
32
33 Args:
34 request: request used to generate cache key.
35
36 Returns:
37 A combination of url path and query parameters
38 """
39 query_params: list[tuple[str, Any]] = list(request.query_params.dict().items())
40 query_params.sort(key=lambda x: x[0])
41 return request.url.path + urlencode(query_params, doseq=True)
42
43
44 def default_do_cache_predicate(_: HTTPScope, status_code: int) -> bool:
45 """Given a status code, returns a boolean indicating whether the response should be cached.
46
47 Args:
48 _: ASGI scope.
49 status_code: status code of the response.
50
51 Returns:
52 A boolean indicating whether the response should be cached.
53 """
54 return HTTP_200_OK <= status_code < HTTP_300_MULTIPLE_CHOICES or status_code in (
55 HTTP_301_MOVED_PERMANENTLY,
56 HTTP_308_PERMANENT_REDIRECT,
57 )
58
59
60 @dataclass
61 class ResponseCacheConfig:
62 """Configuration for response caching.
63
64 To enable response caching, pass an instance of this class to :class:`Litestar <.app.Litestar>` using the
65 ``response_cache_config`` key.
66 """
67
68 default_expiration: int | None = 60
69 """Default cache expiration in seconds used when a route handler is configured with ``cache=True``."""
70 key_builder: CacheKeyBuilder = field(default=default_cache_key_builder)
71 """:class:`CacheKeyBuilder <.types.CacheKeyBuilder>`. Defaults to :func:`default_cache_key_builder`."""
72 store: str = "response_cache"
73 """Name of the :class:`Store <.stores.base.Store>` to use."""
74 cache_response_filter: Callable[[HTTPScope, int], bool] = field(default=default_do_cache_predicate)
75 """A callable that receives connection scope and a status code, and returns a boolean indicating whether the
76 response should be cached."""
77
78 def get_store_from_app(self, app: Litestar) -> Store:
79 """Get the store defined in :attr:`store` from an :class:`Litestar <.app.Litestar>` instance."""
80 return app.stores.get(self.store)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/config/response_cache.py b/litestar/config/response_cache.py
--- a/litestar/config/response_cache.py
+++ b/litestar/config/response_cache.py
@@ -28,7 +28,8 @@
def default_cache_key_builder(request: Request[Any, Any, Any]) -> str:
- """Given a request object, returns a cache key by combining the path with the sorted query params.
+ """Given a request object, returns a cache key by combining
+ the request method and path with the sorted query params.
Args:
request: request used to generate cache key.
@@ -38,7 +39,7 @@
"""
query_params: list[tuple[str, Any]] = list(request.query_params.dict().items())
query_params.sort(key=lambda x: x[0])
- return request.url.path + urlencode(query_params, doseq=True)
+ return request.method + request.url.path + urlencode(query_params, doseq=True)
def default_do_cache_predicate(_: HTTPScope, status_code: int) -> bool:
| {"golden_diff": "diff --git a/litestar/config/response_cache.py b/litestar/config/response_cache.py\n--- a/litestar/config/response_cache.py\n+++ b/litestar/config/response_cache.py\n@@ -28,7 +28,8 @@\n \n \n def default_cache_key_builder(request: Request[Any, Any, Any]) -> str:\n- \"\"\"Given a request object, returns a cache key by combining the path with the sorted query params.\n+ \"\"\"Given a request object, returns a cache key by combining\n+ the request method and path with the sorted query params.\n \n Args:\n request: request used to generate cache key.\n@@ -38,7 +39,7 @@\n \"\"\"\n query_params: list[tuple[str, Any]] = list(request.query_params.dict().items())\n query_params.sort(key=lambda x: x[0])\n- return request.url.path + urlencode(query_params, doseq=True)\n+ return request.method + request.url.path + urlencode(query_params, doseq=True)\n \n \n def default_do_cache_predicate(_: HTTPScope, status_code: int) -> bool:\n", "issue": "Bug: Caching route handlers with the same path but different methods leads to overwriting of cache\n### Description\r\n\r\nCaching route handlers with the same path (`/` in this example) but different methods (`GET` and `POST` in this example) leads to overwriting of cache. The comments above the assert statements illustrate the current vs expected behavior.\r\n\r\nThis is related to https://github.com/litestar-org/litestar/issues/2573 and is not restricted to just `OPTIONS` and `GET`\r\n\r\n### URL to code causing the issue\r\n\r\n_No response_\r\n\r\n### MCVE\r\n\r\n```python\r\nfrom litestar import get, post\r\nfrom litestar.testing import create_test_client\r\n\r\n\r\n@get(cache=10)\r\nasync def something() -> str:\r\n return \"text\"\r\n\r\n\r\n@post(cache=10)\r\nasync def something_post() -> str:\r\n return \"text_post\"\r\n\r\n\r\nwith create_test_client([something, something_post]) as client:\r\n response = client.post(\"\")\r\n assert response.status_code == 201\r\n assert response.text == \"text_post\"\r\n\r\n # these shouldn't pass, but they do\r\n response = client.get(\"\")\r\n assert response.status_code == 201\r\n assert response.text == \"text_post\"\r\n\r\n # these should pass, but they don't\r\n response = client.get(\"\")\r\n assert response.status_code == 200\r\n assert response.text == \"text\"\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n```bash\r\n1. Run the code\r\n2. The second set of asserts should fail, the third set of asserts should pass\r\n```\r\n\r\n\r\n### Screenshots\r\n\r\n```bash\r\n\"\"\r\n```\r\n\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Litestar Version\r\n\r\nGitHub - main (fd06486e2ad4ed0a41636659fec4f093a09e3dd0) as of creating this issue\r\n\r\n### Platform\r\n\r\n- [ ] Linux\r\n- [X] Mac\r\n- [ ] Windows\r\n- [ ] Other (Please specify in the description above)\r\n\r\n<!-- POLAR PLEDGE BADGE START -->\r\n---\r\n> [!NOTE] \r\n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \r\n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\r\n>\r\n> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)\r\n> * If you would like to see an issue prioritized, make a pledge towards it!\r\n> * We receive the pledge once the issue is completed & verified\r\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\r\n\r\n<a href=\"https://polar.sh/litestar-org/litestar/issues/2588\">\r\n<picture>\r\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/2588/pledge.svg?darkmode=1\">\r\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/2588/pledge.svg\">\r\n</picture>\r\n</a>\r\n<!-- POLAR PLEDGE BADGE END -->\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, Any, Callable, final\nfrom urllib.parse import urlencode\n\nfrom litestar.status_codes import (\n HTTP_200_OK,\n HTTP_300_MULTIPLE_CHOICES,\n HTTP_301_MOVED_PERMANENTLY,\n HTTP_308_PERMANENT_REDIRECT,\n)\n\nif TYPE_CHECKING:\n from litestar import Litestar\n from litestar.connection import Request\n from litestar.stores.base import Store\n from litestar.types import CacheKeyBuilder, HTTPScope\n\n__all__ = (\"ResponseCacheConfig\", \"default_cache_key_builder\", \"CACHE_FOREVER\")\n\n\n@final\nclass CACHE_FOREVER: # noqa: N801\n \"\"\"Sentinel value indicating that a cached response should be stored without an expiration, explicitly skipping the\n default expiration\n \"\"\"\n\n\ndef default_cache_key_builder(request: Request[Any, Any, Any]) -> str:\n \"\"\"Given a request object, returns a cache key by combining the path with the sorted query params.\n\n Args:\n request: request used to generate cache key.\n\n Returns:\n A combination of url path and query parameters\n \"\"\"\n query_params: list[tuple[str, Any]] = list(request.query_params.dict().items())\n query_params.sort(key=lambda x: x[0])\n return request.url.path + urlencode(query_params, doseq=True)\n\n\ndef default_do_cache_predicate(_: HTTPScope, status_code: int) -> bool:\n \"\"\"Given a status code, returns a boolean indicating whether the response should be cached.\n\n Args:\n _: ASGI scope.\n status_code: status code of the response.\n\n Returns:\n A boolean indicating whether the response should be cached.\n \"\"\"\n return HTTP_200_OK <= status_code < HTTP_300_MULTIPLE_CHOICES or status_code in (\n HTTP_301_MOVED_PERMANENTLY,\n HTTP_308_PERMANENT_REDIRECT,\n )\n\n\n@dataclass\nclass ResponseCacheConfig:\n \"\"\"Configuration for response caching.\n\n To enable response caching, pass an instance of this class to :class:`Litestar <.app.Litestar>` using the\n ``response_cache_config`` key.\n \"\"\"\n\n default_expiration: int | None = 60\n \"\"\"Default cache expiration in seconds used when a route handler is configured with ``cache=True``.\"\"\"\n key_builder: CacheKeyBuilder = field(default=default_cache_key_builder)\n \"\"\":class:`CacheKeyBuilder <.types.CacheKeyBuilder>`. Defaults to :func:`default_cache_key_builder`.\"\"\"\n store: str = \"response_cache\"\n \"\"\"Name of the :class:`Store <.stores.base.Store>` to use.\"\"\"\n cache_response_filter: Callable[[HTTPScope, int], bool] = field(default=default_do_cache_predicate)\n \"\"\"A callable that receives connection scope and a status code, and returns a boolean indicating whether the\n response should be cached.\"\"\"\n\n def get_store_from_app(self, app: Litestar) -> Store:\n \"\"\"Get the store defined in :attr:`store` from an :class:`Litestar <.app.Litestar>` instance.\"\"\"\n return app.stores.get(self.store)\n", "path": "litestar/config/response_cache.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, Any, Callable, final\nfrom urllib.parse import urlencode\n\nfrom litestar.status_codes import (\n HTTP_200_OK,\n HTTP_300_MULTIPLE_CHOICES,\n HTTP_301_MOVED_PERMANENTLY,\n HTTP_308_PERMANENT_REDIRECT,\n)\n\nif TYPE_CHECKING:\n from litestar import Litestar\n from litestar.connection import Request\n from litestar.stores.base import Store\n from litestar.types import CacheKeyBuilder, HTTPScope\n\n__all__ = (\"ResponseCacheConfig\", \"default_cache_key_builder\", \"CACHE_FOREVER\")\n\n\n@final\nclass CACHE_FOREVER: # noqa: N801\n \"\"\"Sentinel value indicating that a cached response should be stored without an expiration, explicitly skipping the\n default expiration\n \"\"\"\n\n\ndef default_cache_key_builder(request: Request[Any, Any, Any]) -> str:\n \"\"\"Given a request object, returns a cache key by combining\n the request method and path with the sorted query params.\n\n Args:\n request: request used to generate cache key.\n\n Returns:\n A combination of url path and query parameters\n \"\"\"\n query_params: list[tuple[str, Any]] = list(request.query_params.dict().items())\n query_params.sort(key=lambda x: x[0])\n return request.method + request.url.path + urlencode(query_params, doseq=True)\n\n\ndef default_do_cache_predicate(_: HTTPScope, status_code: int) -> bool:\n \"\"\"Given a status code, returns a boolean indicating whether the response should be cached.\n\n Args:\n _: ASGI scope.\n status_code: status code of the response.\n\n Returns:\n A boolean indicating whether the response should be cached.\n \"\"\"\n return HTTP_200_OK <= status_code < HTTP_300_MULTIPLE_CHOICES or status_code in (\n HTTP_301_MOVED_PERMANENTLY,\n HTTP_308_PERMANENT_REDIRECT,\n )\n\n\n@dataclass\nclass ResponseCacheConfig:\n \"\"\"Configuration for response caching.\n\n To enable response caching, pass an instance of this class to :class:`Litestar <.app.Litestar>` using the\n ``response_cache_config`` key.\n \"\"\"\n\n default_expiration: int | None = 60\n \"\"\"Default cache expiration in seconds used when a route handler is configured with ``cache=True``.\"\"\"\n key_builder: CacheKeyBuilder = field(default=default_cache_key_builder)\n \"\"\":class:`CacheKeyBuilder <.types.CacheKeyBuilder>`. Defaults to :func:`default_cache_key_builder`.\"\"\"\n store: str = \"response_cache\"\n \"\"\"Name of the :class:`Store <.stores.base.Store>` to use.\"\"\"\n cache_response_filter: Callable[[HTTPScope, int], bool] = field(default=default_do_cache_predicate)\n \"\"\"A callable that receives connection scope and a status code, and returns a boolean indicating whether the\n response should be cached.\"\"\"\n\n def get_store_from_app(self, app: Litestar) -> Store:\n \"\"\"Get the store defined in :attr:`store` from an :class:`Litestar <.app.Litestar>` instance.\"\"\"\n return app.stores.get(self.store)\n", "path": "litestar/config/response_cache.py"}]} | 1,849 | 231 |
gh_patches_debug_34917 | rasdani/github-patches | git_diff | adap__flower-744 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update signature to Dict[floatScalar
https://github.com/adap/flower/blob/a8a7dd1fce957540b49c59e56221d5762a937f8d/src/py/flwr/server/strategy/fedavg.py#L106
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/py/flwr/server/strategy/fedavg.py`
Content:
```
1 # Copyright 2020 Adap GmbH. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Federated Averaging (FedAvg) [McMahan et al., 2016] strategy.
16
17 Paper: https://arxiv.org/abs/1602.05629
18 """
19
20
21 from logging import WARNING
22 from typing import Callable, Dict, List, Optional, Tuple
23
24 from flwr.common import (
25 EvaluateIns,
26 EvaluateRes,
27 FitIns,
28 FitRes,
29 Parameters,
30 Scalar,
31 Weights,
32 parameters_to_weights,
33 weights_to_parameters,
34 )
35 from flwr.common.logger import log
36 from flwr.server.client_manager import ClientManager
37 from flwr.server.client_proxy import ClientProxy
38
39 from .aggregate import aggregate, weighted_loss_avg
40 from .strategy import Strategy
41
42 DEPRECATION_WARNING = """
43 DEPRECATION WARNING: deprecated `eval_fn` return format
44
45 loss, accuracy
46
47 move to
48
49 loss, {"accuracy": accuracy}
50
51 instead. Note that compatibility with the deprecated return format will be
52 removed in a future release.
53 """
54
55 DEPRECATION_WARNING_INITIAL_PARAMETERS = """
56 DEPRECATION WARNING: deprecated initial parameter type
57
58 flwr.common.Weights (i.e., List[np.ndarray])
59
60 will be removed in a future update, move to
61
62 flwr.common.Parameters
63
64 instead. Use
65
66 parameters = flwr.common.weights_to_parameters(weights)
67
68 to easily transform `Weights` to `Parameters`.
69 """
70
71
72 class FedAvg(Strategy):
73 """Configurable FedAvg strategy implementation."""
74
75 # pylint: disable=too-many-arguments,too-many-instance-attributes
76 def __init__(
77 self,
78 fraction_fit: float = 0.1,
79 fraction_eval: float = 0.1,
80 min_fit_clients: int = 2,
81 min_eval_clients: int = 2,
82 min_available_clients: int = 2,
83 eval_fn: Optional[
84 Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]
85 ] = None,
86 on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,
87 on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,
88 accept_failures: bool = True,
89 initial_parameters: Optional[Parameters] = None,
90 ) -> None:
91 """Federated Averaging strategy.
92
93 Implementation based on https://arxiv.org/abs/1602.05629
94
95 Args:
96 fraction_fit (float, optional): Fraction of clients used during
97 training. Defaults to 0.1.
98 fraction_eval (float, optional): Fraction of clients used during
99 validation. Defaults to 0.1.
100 min_fit_clients (int, optional): Minimum number of clients used
101 during training. Defaults to 2.
102 min_eval_clients (int, optional): Minimum number of clients used
103 during validation. Defaults to 2.
104 min_available_clients (int, optional): Minimum number of total
105 clients in the system. Defaults to 2.
106 eval_fn (Callable[[Weights], Optional[Tuple[float, float]]], optional):
107 Function used for validation. Defaults to None.
108 on_fit_config_fn (Callable[[int], Dict[str, Scalar]], optional):
109 Function used to configure training. Defaults to None.
110 on_evaluate_config_fn (Callable[[int], Dict[str, Scalar]], optional):
111 Function used to configure validation. Defaults to None.
112 accept_failures (bool, optional): Whether or not accept rounds
113 containing failures. Defaults to True.
114 initial_parameters (Parameters, optional): Initial global model parameters.
115 """
116 super().__init__()
117 self.min_fit_clients = min_fit_clients
118 self.min_eval_clients = min_eval_clients
119 self.fraction_fit = fraction_fit
120 self.fraction_eval = fraction_eval
121 self.min_available_clients = min_available_clients
122 self.eval_fn = eval_fn
123 self.on_fit_config_fn = on_fit_config_fn
124 self.on_evaluate_config_fn = on_evaluate_config_fn
125 self.accept_failures = accept_failures
126 self.initial_parameters = initial_parameters
127
128 def __repr__(self) -> str:
129 rep = f"FedAvg(accept_failures={self.accept_failures})"
130 return rep
131
132 def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]:
133 """Return the sample size and the required number of available
134 clients."""
135 num_clients = int(num_available_clients * self.fraction_fit)
136 return max(num_clients, self.min_fit_clients), self.min_available_clients
137
138 def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]:
139 """Use a fraction of available clients for evaluation."""
140 num_clients = int(num_available_clients * self.fraction_eval)
141 return max(num_clients, self.min_eval_clients), self.min_available_clients
142
143 def initialize_parameters(
144 self, client_manager: ClientManager
145 ) -> Optional[Parameters]:
146 """Initialize global model parameters."""
147 initial_parameters = self.initial_parameters
148 self.initial_parameters = None # Don't keep initial parameters in memory
149 if isinstance(initial_parameters, list):
150 log(WARNING, DEPRECATION_WARNING_INITIAL_PARAMETERS)
151 initial_parameters = weights_to_parameters(weights=initial_parameters)
152 return initial_parameters
153
154 def evaluate(
155 self, parameters: Parameters
156 ) -> Optional[Tuple[float, Dict[str, Scalar]]]:
157 """Evaluate model parameters using an evaluation function."""
158 if self.eval_fn is None:
159 # No evaluation function provided
160 return None
161 weights = parameters_to_weights(parameters)
162 eval_res = self.eval_fn(weights)
163 if eval_res is None:
164 return None
165 loss, other = eval_res
166 if isinstance(other, float):
167 print(DEPRECATION_WARNING)
168 metrics = {"accuracy": other}
169 else:
170 metrics = other
171 return loss, metrics
172
173 def configure_fit(
174 self, rnd: int, parameters: Parameters, client_manager: ClientManager
175 ) -> List[Tuple[ClientProxy, FitIns]]:
176 """Configure the next round of training."""
177 config = {}
178 if self.on_fit_config_fn is not None:
179 # Custom fit config function provided
180 config = self.on_fit_config_fn(rnd)
181 fit_ins = FitIns(parameters, config)
182
183 # Sample clients
184 sample_size, min_num_clients = self.num_fit_clients(
185 client_manager.num_available()
186 )
187 clients = client_manager.sample(
188 num_clients=sample_size, min_num_clients=min_num_clients
189 )
190
191 # Return client/config pairs
192 return [(client, fit_ins) for client in clients]
193
194 def configure_evaluate(
195 self, rnd: int, parameters: Parameters, client_manager: ClientManager
196 ) -> List[Tuple[ClientProxy, EvaluateIns]]:
197 """Configure the next round of evaluation."""
198 # Do not configure federated evaluation if a centralized evaluation
199 # function is provided
200 if self.eval_fn is not None:
201 return []
202
203 # Parameters and config
204 config = {}
205 if self.on_evaluate_config_fn is not None:
206 # Custom evaluation config function provided
207 config = self.on_evaluate_config_fn(rnd)
208 evaluate_ins = EvaluateIns(parameters, config)
209
210 # Sample clients
211 if rnd >= 0:
212 sample_size, min_num_clients = self.num_evaluation_clients(
213 client_manager.num_available()
214 )
215 clients = client_manager.sample(
216 num_clients=sample_size, min_num_clients=min_num_clients
217 )
218 else:
219 clients = list(client_manager.all().values())
220
221 # Return client/config pairs
222 return [(client, evaluate_ins) for client in clients]
223
224 def aggregate_fit(
225 self,
226 rnd: int,
227 results: List[Tuple[ClientProxy, FitRes]],
228 failures: List[BaseException],
229 ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
230 """Aggregate fit results using weighted average."""
231 if not results:
232 return None, {}
233 # Do not aggregate if there are failures and failures are not accepted
234 if not self.accept_failures and failures:
235 return None, {}
236 # Convert results
237 weights_results = [
238 (parameters_to_weights(fit_res.parameters), fit_res.num_examples)
239 for client, fit_res in results
240 ]
241 return weights_to_parameters(aggregate(weights_results)), {}
242
243 def aggregate_evaluate(
244 self,
245 rnd: int,
246 results: List[Tuple[ClientProxy, EvaluateRes]],
247 failures: List[BaseException],
248 ) -> Tuple[Optional[float], Dict[str, Scalar]]:
249 """Aggregate evaluation losses using weighted average."""
250 if not results:
251 return None, {}
252 # Do not aggregate if there are failures and failures are not accepted
253 if not self.accept_failures and failures:
254 return None, {}
255 loss_aggregated = weighted_loss_avg(
256 [
257 (
258 evaluate_res.num_examples,
259 evaluate_res.loss,
260 evaluate_res.accuracy,
261 )
262 for _, evaluate_res in results
263 ]
264 )
265 return loss_aggregated, {}
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/py/flwr/server/strategy/fedavg.py b/src/py/flwr/server/strategy/fedavg.py
--- a/src/py/flwr/server/strategy/fedavg.py
+++ b/src/py/flwr/server/strategy/fedavg.py
@@ -92,26 +92,28 @@
Implementation based on https://arxiv.org/abs/1602.05629
- Args:
- fraction_fit (float, optional): Fraction of clients used during
- training. Defaults to 0.1.
- fraction_eval (float, optional): Fraction of clients used during
- validation. Defaults to 0.1.
- min_fit_clients (int, optional): Minimum number of clients used
- during training. Defaults to 2.
- min_eval_clients (int, optional): Minimum number of clients used
- during validation. Defaults to 2.
- min_available_clients (int, optional): Minimum number of total
- clients in the system. Defaults to 2.
- eval_fn (Callable[[Weights], Optional[Tuple[float, float]]], optional):
- Function used for validation. Defaults to None.
- on_fit_config_fn (Callable[[int], Dict[str, Scalar]], optional):
- Function used to configure training. Defaults to None.
- on_evaluate_config_fn (Callable[[int], Dict[str, Scalar]], optional):
- Function used to configure validation. Defaults to None.
- accept_failures (bool, optional): Whether or not accept rounds
- containing failures. Defaults to True.
- initial_parameters (Parameters, optional): Initial global model parameters.
+ Parameters
+ ----------
+ fraction_fit : float, optional
+ Fraction of clients used during training. Defaults to 0.1.
+ fraction_eval : float, optional
+ Fraction of clients used during validation. Defaults to 0.1.
+ min_fit_clients : int, optional
+ Minimum number of clients used during training. Defaults to 2.
+ min_eval_clients : int, optional
+ Minimum number of clients used during validation. Defaults to 2.
+ min_available_clients : int, optional
+ Minimum number of total clients in the system. Defaults to 2.
+ eval_fn : Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]
+ Optional function used for validation. Defaults to None.
+ on_fit_config_fn : Callable[[int], Dict[str, Scalar]], optional
+ Function used to configure training. Defaults to None.
+ on_evaluate_config_fn : Callable[[int], Dict[str, Scalar]], optional
+ Function used to configure validation. Defaults to None.
+ accept_failures : bool, optional
+ Whether or not accept rounds containing failures. Defaults to True.
+ initial_parameters : Parameters, optional
+ Initial global model parameters.
"""
super().__init__()
self.min_fit_clients = min_fit_clients
| {"golden_diff": "diff --git a/src/py/flwr/server/strategy/fedavg.py b/src/py/flwr/server/strategy/fedavg.py\n--- a/src/py/flwr/server/strategy/fedavg.py\n+++ b/src/py/flwr/server/strategy/fedavg.py\n@@ -92,26 +92,28 @@\n \n Implementation based on https://arxiv.org/abs/1602.05629\n \n- Args:\n- fraction_fit (float, optional): Fraction of clients used during\n- training. Defaults to 0.1.\n- fraction_eval (float, optional): Fraction of clients used during\n- validation. Defaults to 0.1.\n- min_fit_clients (int, optional): Minimum number of clients used\n- during training. Defaults to 2.\n- min_eval_clients (int, optional): Minimum number of clients used\n- during validation. Defaults to 2.\n- min_available_clients (int, optional): Minimum number of total\n- clients in the system. Defaults to 2.\n- eval_fn (Callable[[Weights], Optional[Tuple[float, float]]], optional):\n- Function used for validation. Defaults to None.\n- on_fit_config_fn (Callable[[int], Dict[str, Scalar]], optional):\n- Function used to configure training. Defaults to None.\n- on_evaluate_config_fn (Callable[[int], Dict[str, Scalar]], optional):\n- Function used to configure validation. Defaults to None.\n- accept_failures (bool, optional): Whether or not accept rounds\n- containing failures. Defaults to True.\n- initial_parameters (Parameters, optional): Initial global model parameters.\n+ Parameters\n+ ----------\n+ fraction_fit : float, optional\n+ Fraction of clients used during training. Defaults to 0.1.\n+ fraction_eval : float, optional\n+ Fraction of clients used during validation. Defaults to 0.1.\n+ min_fit_clients : int, optional\n+ Minimum number of clients used during training. Defaults to 2.\n+ min_eval_clients : int, optional\n+ Minimum number of clients used during validation. Defaults to 2.\n+ min_available_clients : int, optional\n+ Minimum number of total clients in the system. Defaults to 2.\n+ eval_fn : Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]\n+ Optional function used for validation. Defaults to None.\n+ on_fit_config_fn : Callable[[int], Dict[str, Scalar]], optional\n+ Function used to configure training. Defaults to None.\n+ on_evaluate_config_fn : Callable[[int], Dict[str, Scalar]], optional\n+ Function used to configure validation. Defaults to None.\n+ accept_failures : bool, optional\n+ Whether or not accept rounds containing failures. Defaults to True.\n+ initial_parameters : Parameters, optional\n+ Initial global model parameters.\n \"\"\"\n super().__init__()\n self.min_fit_clients = min_fit_clients\n", "issue": "Update signature to Dict[floatScalar\nhttps://github.com/adap/flower/blob/a8a7dd1fce957540b49c59e56221d5762a937f8d/src/py/flwr/server/strategy/fedavg.py#L106\n", "before_files": [{"content": "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Federated Averaging (FedAvg) [McMahan et al., 2016] strategy.\n\nPaper: https://arxiv.org/abs/1602.05629\n\"\"\"\n\n\nfrom logging import WARNING\nfrom typing import Callable, Dict, List, Optional, Tuple\n\nfrom flwr.common import (\n EvaluateIns,\n EvaluateRes,\n FitIns,\n FitRes,\n Parameters,\n Scalar,\n Weights,\n parameters_to_weights,\n weights_to_parameters,\n)\nfrom flwr.common.logger import log\nfrom flwr.server.client_manager import ClientManager\nfrom flwr.server.client_proxy import ClientProxy\n\nfrom .aggregate import aggregate, weighted_loss_avg\nfrom .strategy import Strategy\n\nDEPRECATION_WARNING = \"\"\"\nDEPRECATION WARNING: deprecated `eval_fn` return format\n\n loss, accuracy\n\nmove to\n\n loss, {\"accuracy\": accuracy}\n\ninstead. Note that compatibility with the deprecated return format will be\nremoved in a future release.\n\"\"\"\n\nDEPRECATION_WARNING_INITIAL_PARAMETERS = \"\"\"\nDEPRECATION WARNING: deprecated initial parameter type\n\n flwr.common.Weights (i.e., List[np.ndarray])\n\nwill be removed in a future update, move to\n\n flwr.common.Parameters\n\ninstead. Use\n\n parameters = flwr.common.weights_to_parameters(weights)\n\nto easily transform `Weights` to `Parameters`.\n\"\"\"\n\n\nclass FedAvg(Strategy):\n \"\"\"Configurable FedAvg strategy implementation.\"\"\"\n\n # pylint: disable=too-many-arguments,too-many-instance-attributes\n def __init__(\n self,\n fraction_fit: float = 0.1,\n fraction_eval: float = 0.1,\n min_fit_clients: int = 2,\n min_eval_clients: int = 2,\n min_available_clients: int = 2,\n eval_fn: Optional[\n Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]\n ] = None,\n on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,\n on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,\n accept_failures: bool = True,\n initial_parameters: Optional[Parameters] = None,\n ) -> None:\n \"\"\"Federated Averaging strategy.\n\n Implementation based on https://arxiv.org/abs/1602.05629\n\n Args:\n fraction_fit (float, optional): Fraction of clients used during\n training. Defaults to 0.1.\n fraction_eval (float, optional): Fraction of clients used during\n validation. Defaults to 0.1.\n min_fit_clients (int, optional): Minimum number of clients used\n during training. Defaults to 2.\n min_eval_clients (int, optional): Minimum number of clients used\n during validation. Defaults to 2.\n min_available_clients (int, optional): Minimum number of total\n clients in the system. Defaults to 2.\n eval_fn (Callable[[Weights], Optional[Tuple[float, float]]], optional):\n Function used for validation. Defaults to None.\n on_fit_config_fn (Callable[[int], Dict[str, Scalar]], optional):\n Function used to configure training. Defaults to None.\n on_evaluate_config_fn (Callable[[int], Dict[str, Scalar]], optional):\n Function used to configure validation. Defaults to None.\n accept_failures (bool, optional): Whether or not accept rounds\n containing failures. Defaults to True.\n initial_parameters (Parameters, optional): Initial global model parameters.\n \"\"\"\n super().__init__()\n self.min_fit_clients = min_fit_clients\n self.min_eval_clients = min_eval_clients\n self.fraction_fit = fraction_fit\n self.fraction_eval = fraction_eval\n self.min_available_clients = min_available_clients\n self.eval_fn = eval_fn\n self.on_fit_config_fn = on_fit_config_fn\n self.on_evaluate_config_fn = on_evaluate_config_fn\n self.accept_failures = accept_failures\n self.initial_parameters = initial_parameters\n\n def __repr__(self) -> str:\n rep = f\"FedAvg(accept_failures={self.accept_failures})\"\n return rep\n\n def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]:\n \"\"\"Return the sample size and the required number of available\n clients.\"\"\"\n num_clients = int(num_available_clients * self.fraction_fit)\n return max(num_clients, self.min_fit_clients), self.min_available_clients\n\n def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]:\n \"\"\"Use a fraction of available clients for evaluation.\"\"\"\n num_clients = int(num_available_clients * self.fraction_eval)\n return max(num_clients, self.min_eval_clients), self.min_available_clients\n\n def initialize_parameters(\n self, client_manager: ClientManager\n ) -> Optional[Parameters]:\n \"\"\"Initialize global model parameters.\"\"\"\n initial_parameters = self.initial_parameters\n self.initial_parameters = None # Don't keep initial parameters in memory\n if isinstance(initial_parameters, list):\n log(WARNING, DEPRECATION_WARNING_INITIAL_PARAMETERS)\n initial_parameters = weights_to_parameters(weights=initial_parameters)\n return initial_parameters\n\n def evaluate(\n self, parameters: Parameters\n ) -> Optional[Tuple[float, Dict[str, Scalar]]]:\n \"\"\"Evaluate model parameters using an evaluation function.\"\"\"\n if self.eval_fn is None:\n # No evaluation function provided\n return None\n weights = parameters_to_weights(parameters)\n eval_res = self.eval_fn(weights)\n if eval_res is None:\n return None\n loss, other = eval_res\n if isinstance(other, float):\n print(DEPRECATION_WARNING)\n metrics = {\"accuracy\": other}\n else:\n metrics = other\n return loss, metrics\n\n def configure_fit(\n self, rnd: int, parameters: Parameters, client_manager: ClientManager\n ) -> List[Tuple[ClientProxy, FitIns]]:\n \"\"\"Configure the next round of training.\"\"\"\n config = {}\n if self.on_fit_config_fn is not None:\n # Custom fit config function provided\n config = self.on_fit_config_fn(rnd)\n fit_ins = FitIns(parameters, config)\n\n # Sample clients\n sample_size, min_num_clients = self.num_fit_clients(\n client_manager.num_available()\n )\n clients = client_manager.sample(\n num_clients=sample_size, min_num_clients=min_num_clients\n )\n\n # Return client/config pairs\n return [(client, fit_ins) for client in clients]\n\n def configure_evaluate(\n self, rnd: int, parameters: Parameters, client_manager: ClientManager\n ) -> List[Tuple[ClientProxy, EvaluateIns]]:\n \"\"\"Configure the next round of evaluation.\"\"\"\n # Do not configure federated evaluation if a centralized evaluation\n # function is provided\n if self.eval_fn is not None:\n return []\n\n # Parameters and config\n config = {}\n if self.on_evaluate_config_fn is not None:\n # Custom evaluation config function provided\n config = self.on_evaluate_config_fn(rnd)\n evaluate_ins = EvaluateIns(parameters, config)\n\n # Sample clients\n if rnd >= 0:\n sample_size, min_num_clients = self.num_evaluation_clients(\n client_manager.num_available()\n )\n clients = client_manager.sample(\n num_clients=sample_size, min_num_clients=min_num_clients\n )\n else:\n clients = list(client_manager.all().values())\n\n # Return client/config pairs\n return [(client, evaluate_ins) for client in clients]\n\n def aggregate_fit(\n self,\n rnd: int,\n results: List[Tuple[ClientProxy, FitRes]],\n failures: List[BaseException],\n ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:\n \"\"\"Aggregate fit results using weighted average.\"\"\"\n if not results:\n return None, {}\n # Do not aggregate if there are failures and failures are not accepted\n if not self.accept_failures and failures:\n return None, {}\n # Convert results\n weights_results = [\n (parameters_to_weights(fit_res.parameters), fit_res.num_examples)\n for client, fit_res in results\n ]\n return weights_to_parameters(aggregate(weights_results)), {}\n\n def aggregate_evaluate(\n self,\n rnd: int,\n results: List[Tuple[ClientProxy, EvaluateRes]],\n failures: List[BaseException],\n ) -> Tuple[Optional[float], Dict[str, Scalar]]:\n \"\"\"Aggregate evaluation losses using weighted average.\"\"\"\n if not results:\n return None, {}\n # Do not aggregate if there are failures and failures are not accepted\n if not self.accept_failures and failures:\n return None, {}\n loss_aggregated = weighted_loss_avg(\n [\n (\n evaluate_res.num_examples,\n evaluate_res.loss,\n evaluate_res.accuracy,\n )\n for _, evaluate_res in results\n ]\n )\n return loss_aggregated, {}\n", "path": "src/py/flwr/server/strategy/fedavg.py"}], "after_files": [{"content": "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Federated Averaging (FedAvg) [McMahan et al., 2016] strategy.\n\nPaper: https://arxiv.org/abs/1602.05629\n\"\"\"\n\n\nfrom logging import WARNING\nfrom typing import Callable, Dict, List, Optional, Tuple\n\nfrom flwr.common import (\n EvaluateIns,\n EvaluateRes,\n FitIns,\n FitRes,\n Parameters,\n Scalar,\n Weights,\n parameters_to_weights,\n weights_to_parameters,\n)\nfrom flwr.common.logger import log\nfrom flwr.server.client_manager import ClientManager\nfrom flwr.server.client_proxy import ClientProxy\n\nfrom .aggregate import aggregate, weighted_loss_avg\nfrom .strategy import Strategy\n\nDEPRECATION_WARNING = \"\"\"\nDEPRECATION WARNING: deprecated `eval_fn` return format\n\n loss, accuracy\n\nmove to\n\n loss, {\"accuracy\": accuracy}\n\ninstead. Note that compatibility with the deprecated return format will be\nremoved in a future release.\n\"\"\"\n\nDEPRECATION_WARNING_INITIAL_PARAMETERS = \"\"\"\nDEPRECATION WARNING: deprecated initial parameter type\n\n flwr.common.Weights (i.e., List[np.ndarray])\n\nwill be removed in a future update, move to\n\n flwr.common.Parameters\n\ninstead. Use\n\n parameters = flwr.common.weights_to_parameters(weights)\n\nto easily transform `Weights` to `Parameters`.\n\"\"\"\n\n\nclass FedAvg(Strategy):\n \"\"\"Configurable FedAvg strategy implementation.\"\"\"\n\n # pylint: disable=too-many-arguments,too-many-instance-attributes\n def __init__(\n self,\n fraction_fit: float = 0.1,\n fraction_eval: float = 0.1,\n min_fit_clients: int = 2,\n min_eval_clients: int = 2,\n min_available_clients: int = 2,\n eval_fn: Optional[\n Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]\n ] = None,\n on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,\n on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,\n accept_failures: bool = True,\n initial_parameters: Optional[Parameters] = None,\n ) -> None:\n \"\"\"Federated Averaging strategy.\n\n Implementation based on https://arxiv.org/abs/1602.05629\n\n Parameters\n ----------\n fraction_fit : float, optional\n Fraction of clients used during training. Defaults to 0.1.\n fraction_eval : float, optional\n Fraction of clients used during validation. Defaults to 0.1.\n min_fit_clients : int, optional\n Minimum number of clients used during training. Defaults to 2.\n min_eval_clients : int, optional\n Minimum number of clients used during validation. Defaults to 2.\n min_available_clients : int, optional\n Minimum number of total clients in the system. Defaults to 2.\n eval_fn : Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]\n Optional function used for validation. Defaults to None.\n on_fit_config_fn : Callable[[int], Dict[str, Scalar]], optional\n Function used to configure training. Defaults to None.\n on_evaluate_config_fn : Callable[[int], Dict[str, Scalar]], optional\n Function used to configure validation. Defaults to None.\n accept_failures : bool, optional\n Whether or not accept rounds containing failures. Defaults to True.\n initial_parameters : Parameters, optional\n Initial global model parameters.\n \"\"\"\n super().__init__()\n self.min_fit_clients = min_fit_clients\n self.min_eval_clients = min_eval_clients\n self.fraction_fit = fraction_fit\n self.fraction_eval = fraction_eval\n self.min_available_clients = min_available_clients\n self.eval_fn = eval_fn\n self.on_fit_config_fn = on_fit_config_fn\n self.on_evaluate_config_fn = on_evaluate_config_fn\n self.accept_failures = accept_failures\n self.initial_parameters = initial_parameters\n\n def __repr__(self) -> str:\n rep = f\"FedAvg(accept_failures={self.accept_failures})\"\n return rep\n\n def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]:\n \"\"\"Return the sample size and the required number of available\n clients.\"\"\"\n num_clients = int(num_available_clients * self.fraction_fit)\n return max(num_clients, self.min_fit_clients), self.min_available_clients\n\n def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]:\n \"\"\"Use a fraction of available clients for evaluation.\"\"\"\n num_clients = int(num_available_clients * self.fraction_eval)\n return max(num_clients, self.min_eval_clients), self.min_available_clients\n\n def initialize_parameters(\n self, client_manager: ClientManager\n ) -> Optional[Parameters]:\n \"\"\"Initialize global model parameters.\"\"\"\n initial_parameters = self.initial_parameters\n self.initial_parameters = None # Don't keep initial parameters in memory\n if isinstance(initial_parameters, list):\n log(WARNING, DEPRECATION_WARNING_INITIAL_PARAMETERS)\n initial_parameters = weights_to_parameters(weights=initial_parameters)\n return initial_parameters\n\n def evaluate(\n self, parameters: Parameters\n ) -> Optional[Tuple[float, Dict[str, Scalar]]]:\n \"\"\"Evaluate model parameters using an evaluation function.\"\"\"\n if self.eval_fn is None:\n # No evaluation function provided\n return None\n weights = parameters_to_weights(parameters)\n eval_res = self.eval_fn(weights)\n if eval_res is None:\n return None\n loss, other = eval_res\n if isinstance(other, float):\n print(DEPRECATION_WARNING)\n metrics = {\"accuracy\": other}\n else:\n metrics = other\n return loss, metrics\n\n def configure_fit(\n self, rnd: int, parameters: Parameters, client_manager: ClientManager\n ) -> List[Tuple[ClientProxy, FitIns]]:\n \"\"\"Configure the next round of training.\"\"\"\n config = {}\n if self.on_fit_config_fn is not None:\n # Custom fit config function provided\n config = self.on_fit_config_fn(rnd)\n fit_ins = FitIns(parameters, config)\n\n # Sample clients\n sample_size, min_num_clients = self.num_fit_clients(\n client_manager.num_available()\n )\n clients = client_manager.sample(\n num_clients=sample_size, min_num_clients=min_num_clients\n )\n\n # Return client/config pairs\n return [(client, fit_ins) for client in clients]\n\n def configure_evaluate(\n self, rnd: int, parameters: Parameters, client_manager: ClientManager\n ) -> List[Tuple[ClientProxy, EvaluateIns]]:\n \"\"\"Configure the next round of evaluation.\"\"\"\n # Do not configure federated evaluation if a centralized evaluation\n # function is provided\n if self.eval_fn is not None:\n return []\n\n # Parameters and config\n config = {}\n if self.on_evaluate_config_fn is not None:\n # Custom evaluation config function provided\n config = self.on_evaluate_config_fn(rnd)\n evaluate_ins = EvaluateIns(parameters, config)\n\n # Sample clients\n if rnd >= 0:\n sample_size, min_num_clients = self.num_evaluation_clients(\n client_manager.num_available()\n )\n clients = client_manager.sample(\n num_clients=sample_size, min_num_clients=min_num_clients\n )\n else:\n clients = list(client_manager.all().values())\n\n # Return client/config pairs\n return [(client, evaluate_ins) for client in clients]\n\n def aggregate_fit(\n self,\n rnd: int,\n results: List[Tuple[ClientProxy, FitRes]],\n failures: List[BaseException],\n ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:\n \"\"\"Aggregate fit results using weighted average.\"\"\"\n if not results:\n return None, {}\n # Do not aggregate if there are failures and failures are not accepted\n if not self.accept_failures and failures:\n return None, {}\n # Convert results\n weights_results = [\n (parameters_to_weights(fit_res.parameters), fit_res.num_examples)\n for client, fit_res in results\n ]\n return weights_to_parameters(aggregate(weights_results)), {}\n\n def aggregate_evaluate(\n self,\n rnd: int,\n results: List[Tuple[ClientProxy, EvaluateRes]],\n failures: List[BaseException],\n ) -> Tuple[Optional[float], Dict[str, Scalar]]:\n \"\"\"Aggregate evaluation losses using weighted average.\"\"\"\n if not results:\n return None, {}\n # Do not aggregate if there are failures and failures are not accepted\n if not self.accept_failures and failures:\n return None, {}\n loss_aggregated = weighted_loss_avg(\n [\n (\n evaluate_res.num_examples,\n evaluate_res.loss,\n evaluate_res.accuracy,\n )\n for _, evaluate_res in results\n ]\n )\n return loss_aggregated, {}\n", "path": "src/py/flwr/server/strategy/fedavg.py"}]} | 3,090 | 642 |
gh_patches_debug_15787 | rasdani/github-patches | git_diff | pulp__pulpcore-63 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[PR #4609/4c4d6aa0 backport][3.28] Works around a sync-time performance regression on PG12
**This is a backport of PR #4609 as merged into main (4c4d6aa0745b4938e2aeee8ffc28a0b0392cc9ef).**
closes #4591
We still don't really "know":
* Why this only seems to happen on PG12
* Why the original query change (https://github.com/pulp/pulpcore/pull/4275/files) triggered this
* Why it only seems to happen on capsule syncs (this is most mysterious)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/serializers/repository.py`
Content:
```
1 from gettext import gettext as _
2
3 from rest_framework import serializers, fields
4 from rest_framework.validators import UniqueValidator
5 from rest_framework_nested.serializers import NestedHyperlinkedModelSerializer
6
7 from pulpcore.app import models
8 from pulpcore.app.serializers import (
9 DetailIdentityField,
10 IdentityField,
11 NestedIdentityField,
12 NestedRelatedField,
13 LatestVersionField,
14 MasterModelSerializer,
15 ModelSerializer,
16 )
17 from pulpcore.app.serializers import validate_unknown_fields
18
19
20 class RepositorySerializer(ModelSerializer):
21 _href = IdentityField(
22 view_name='repositories-detail'
23 )
24 _versions_href = IdentityField(
25 view_name='versions-list',
26 lookup_url_kwarg='repository_pk',
27 )
28 _latest_version_href = LatestVersionField()
29 name = serializers.CharField(
30 help_text=_('A unique name for this repository.'),
31 validators=[UniqueValidator(queryset=models.Repository.objects.all())]
32 )
33 description = serializers.CharField(
34 help_text=_('An optional description.'),
35 required=False,
36 allow_blank=True
37 )
38
39 class Meta:
40 model = models.Repository
41 fields = ModelSerializer.Meta.fields + ('_versions_href', '_latest_version_href', 'name',
42 'description')
43
44
45 class RemoteSerializer(MasterModelSerializer):
46 """
47 Every remote defined by a plugin should have a Remote serializer that inherits from this
48 class. Please import from `pulpcore.plugin.serializers` rather than from this module directly.
49 """
50 _href = DetailIdentityField()
51 name = serializers.CharField(
52 help_text=_('A unique name for this remote.'),
53 validators=[UniqueValidator(queryset=models.Remote.objects.all())],
54 )
55 url = serializers.CharField(
56 help_text='The URL of an external content source.',
57 )
58 validate = serializers.BooleanField(
59 help_text='If True, the plugin will validate imported artifacts.',
60 required=False,
61 )
62 ssl_ca_certificate = serializers.FileField(
63 help_text='A PEM encoded CA certificate used to validate the server '
64 'certificate presented by the remote server.',
65 write_only=True,
66 required=False,
67 )
68 ssl_client_certificate = serializers.FileField(
69 help_text='A PEM encoded client certificate used for authentication.',
70 write_only=True,
71 required=False,
72 )
73 ssl_client_key = serializers.FileField(
74 help_text='A PEM encoded private key used for authentication.',
75 write_only=True,
76 required=False,
77 )
78 ssl_validation = serializers.BooleanField(
79 help_text='If True, SSL peer validation must be performed.',
80 required=False,
81 )
82 proxy_url = serializers.CharField(
83 help_text='The proxy URL. Format: scheme://user:password@host:port',
84 required=False,
85 allow_blank=True,
86 )
87 username = serializers.CharField(
88 help_text='The username to be used for authentication when syncing.',
89 write_only=True,
90 required=False,
91 allow_blank=True,
92 )
93 password = serializers.CharField(
94 help_text='The password to be used for authentication when syncing.',
95 write_only=True,
96 required=False,
97 allow_blank=True,
98 )
99 _last_updated = serializers.DateTimeField(
100 help_text='Timestamp of the most recent update of the remote.',
101 read_only=True
102 )
103 download_concurrency = serializers.IntegerField(
104 help_text='Total number of simultaneous connections.',
105 required=False,
106 min_value=1
107 )
108 policy = serializers.ChoiceField(
109 help_text="The policy to use when downloading content. The possible values include: "
110 "'immediate', 'on_demand', and 'cache_only'. 'immediate' is the default.",
111 choices=models.Remote.POLICY_CHOICES,
112 default=models.Remote.IMMEDIATE
113 )
114
115 class Meta:
116 abstract = True
117 model = models.Remote
118 fields = MasterModelSerializer.Meta.fields + (
119 'name', 'url', 'validate', 'ssl_ca_certificate', 'ssl_client_certificate',
120 'ssl_client_key', 'ssl_validation', 'proxy_url', 'username', 'password',
121 '_last_updated', 'download_concurrency', 'policy')
122
123
124 class RepositorySyncURLSerializer(serializers.Serializer):
125 repository = serializers.HyperlinkedRelatedField(
126 required=True,
127 help_text=_('A URI of the repository to be synchronized.'),
128 queryset=models.Repository.objects.all(),
129 view_name='repositories-detail',
130 label=_('Repository'),
131 error_messages={
132 'required': _('The repository URI must be specified.')
133 }
134 )
135
136 mirror = fields.BooleanField(
137 required=False,
138 default=False,
139 help_text=_('The synchronization mode, True for "mirror" and False for "additive" mode.')
140 )
141
142
143 class PublisherSerializer(MasterModelSerializer):
144 """
145 Every publisher defined by a plugin should have an Publisher serializer that inherits from this
146 class. Please import from `pulpcore.plugin.serializers` rather than from this module directly.
147 """
148 _href = DetailIdentityField()
149 name = serializers.CharField(
150 help_text=_('A unique name for this publisher.'),
151 validators=[UniqueValidator(queryset=models.Publisher.objects.all())]
152 )
153 _last_updated = serializers.DateTimeField(
154 help_text=_('Timestamp of the most recent update of the publisher configuration.'),
155 read_only=True
156 )
157 _distributions = serializers.HyperlinkedRelatedField(
158 many=True,
159 read_only=True,
160 view_name='distributions-detail',
161 )
162
163 class Meta:
164 abstract = True
165 model = models.Publisher
166 fields = MasterModelSerializer.Meta.fields + (
167 'name', '_last_updated', '_distributions',
168 )
169
170
171 class RepositoryPublishURLSerializer(serializers.Serializer):
172
173 repository = serializers.HyperlinkedRelatedField(
174 help_text=_('A URI of the repository to be synchronized.'),
175 required=False,
176 label=_('Repository'),
177 queryset=models.Repository.objects.all(),
178 view_name='repositories-detail',
179 )
180
181 repository_version = NestedRelatedField(
182 help_text=_('A URI of the repository version to be published.'),
183 required=False,
184 label=_('Repository Version'),
185 queryset=models.RepositoryVersion.objects.all(),
186 view_name='versions-detail',
187 lookup_field='number',
188 parent_lookup_kwargs={'repository_pk': 'repository__pk'},
189 )
190
191 def validate(self, data):
192 if hasattr(self, 'initial_data'):
193 validate_unknown_fields(self.initial_data, self.fields)
194
195 repository = data.pop('repository', None)
196 repository_version = data.get('repository_version')
197 if not repository and not repository_version:
198 raise serializers.ValidationError(
199 _("Either the 'repository' or 'repository_version' need to be specified"))
200 elif not repository and repository_version:
201 return data
202 elif repository and not repository_version:
203 version = models.RepositoryVersion.latest(repository)
204 if version:
205 new_data = {'repository_version': version}
206 new_data.update(data)
207 return new_data
208 else:
209 raise serializers.ValidationError(
210 detail=_('Repository has no version available to publish'))
211 raise serializers.ValidationError(
212 _("Either the 'repository' or 'repository_version' need to be specified "
213 "but not both.")
214 )
215
216
217 class ExporterSerializer(MasterModelSerializer):
218 _href = DetailIdentityField()
219 name = serializers.CharField(
220 help_text=_('The exporter unique name.'),
221 validators=[UniqueValidator(queryset=models.Exporter.objects.all())]
222 )
223 _last_updated = serializers.DateTimeField(
224 help_text=_('Timestamp of the last update.'),
225 read_only=True
226 )
227 last_export = serializers.DateTimeField(
228 help_text=_('Timestamp of the last export.'),
229 read_only=True
230 )
231
232 class Meta:
233 abstract = True
234 model = models.Exporter
235 fields = MasterModelSerializer.Meta.fields + (
236 'name',
237 '_last_updated',
238 'last_export',
239 )
240
241
242 class RepositoryVersionSerializer(ModelSerializer, NestedHyperlinkedModelSerializer):
243 _href = NestedIdentityField(
244 view_name='versions-detail',
245 lookup_field='number', parent_lookup_kwargs={'repository_pk': 'repository__pk'},
246 )
247 number = serializers.IntegerField(
248 read_only=True
249 )
250 base_version = NestedRelatedField(
251 required=False,
252 help_text=_('A repository version whose content was used as the initial set of content '
253 'for this repository version'),
254 queryset=models.RepositoryVersion.objects.all(),
255 view_name='versions-detail',
256 lookup_field='number',
257 parent_lookup_kwargs={'repository_pk': 'repository__pk'},
258 )
259 content_summary = serializers.SerializerMethodField(
260 help_text=_('Various count summaries of the content in the version and the HREF to view '
261 'them.'),
262 read_only=True,
263 )
264
265 def get_content_summary(self, obj):
266 """
267 The summary of contained content.
268
269 Returns:
270 dict: {'added': {<_type>: {'count': <count>, 'href': <href>},
271 'removed': {<_type>: {'count': <count>, 'href': <href>},
272 'present': {<_type>: {'count': <count>, 'href': <href>},
273 }
274 """
275 to_return = {'added': {}, 'removed': {}, 'present': {}}
276 for count_detail in obj.counts.all():
277 count_type = count_detail.get_count_type_display()
278 item_dict = {'count': count_detail.count, 'href': count_detail.content_href}
279 to_return[count_type][count_detail.content_type] = item_dict
280 return to_return
281
282 class Meta:
283 model = models.RepositoryVersion
284 fields = ModelSerializer.Meta.fields + (
285 '_href', 'number', 'base_version', 'content_summary',
286 )
287
288
289 class RepositoryVersionCreateSerializer(ModelSerializer, NestedHyperlinkedModelSerializer):
290 add_content_units = serializers.ListField(
291 help_text=_('A list of content units to add to a new repository version'),
292 write_only=True
293 )
294 remove_content_units = serializers.ListField(
295 help_text=_('A list of content units to remove from the latest repository version'),
296 write_only=True
297 )
298 base_version = NestedRelatedField(
299 required=False,
300 help_text=_('A repository version whose content will be used as the initial set of content '
301 'for the new repository version'),
302 queryset=models.RepositoryVersion.objects.all(),
303 view_name='versions-detail',
304 lookup_field='number',
305 parent_lookup_kwargs={'repository_pk': 'repository__pk'},
306 )
307
308 class Meta:
309 model = models.RepositoryVersion
310 fields = ['add_content_units', 'remove_content_units', 'base_version']
311
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/serializers/repository.py b/pulpcore/app/serializers/repository.py
--- a/pulpcore/app/serializers/repository.py
+++ b/pulpcore/app/serializers/repository.py
@@ -267,10 +267,14 @@
The summary of contained content.
Returns:
- dict: {'added': {<_type>: {'count': <count>, 'href': <href>},
- 'removed': {<_type>: {'count': <count>, 'href': <href>},
- 'present': {<_type>: {'count': <count>, 'href': <href>},
- }
+ dict: The dictionary has the following format.::
+
+ {
+ 'added': {<_type>: {'count': <count>, 'href': <href>},
+ 'removed': {<_type>: {'count': <count>, 'href': <href>},
+ 'present': {<_type>: {'count': <count>, 'href': <href>},
+ }
+
"""
to_return = {'added': {}, 'removed': {}, 'present': {}}
for count_detail in obj.counts.all():
| {"golden_diff": "diff --git a/pulpcore/app/serializers/repository.py b/pulpcore/app/serializers/repository.py\n--- a/pulpcore/app/serializers/repository.py\n+++ b/pulpcore/app/serializers/repository.py\n@@ -267,10 +267,14 @@\n The summary of contained content.\n \n Returns:\n- dict: {'added': {<_type>: {'count': <count>, 'href': <href>},\n- 'removed': {<_type>: {'count': <count>, 'href': <href>},\n- 'present': {<_type>: {'count': <count>, 'href': <href>},\n- }\n+ dict: The dictionary has the following format.::\n+\n+ {\n+ 'added': {<_type>: {'count': <count>, 'href': <href>},\n+ 'removed': {<_type>: {'count': <count>, 'href': <href>},\n+ 'present': {<_type>: {'count': <count>, 'href': <href>},\n+ }\n+\n \"\"\"\n to_return = {'added': {}, 'removed': {}, 'present': {}}\n for count_detail in obj.counts.all():\n", "issue": "[PR #4609/4c4d6aa0 backport][3.28] Works around a sync-time performance regression on PG12\n**This is a backport of PR #4609 as merged into main (4c4d6aa0745b4938e2aeee8ffc28a0b0392cc9ef).**\n\ncloses #4591\r\n\r\nWe still don't really \"know\":\r\n\r\n* Why this only seems to happen on PG12\r\n* Why the original query change (https://github.com/pulp/pulpcore/pull/4275/files) triggered this\r\n* Why it only seems to happen on capsule syncs (this is most mysterious)\n", "before_files": [{"content": "from gettext import gettext as _\n\nfrom rest_framework import serializers, fields\nfrom rest_framework.validators import UniqueValidator\nfrom rest_framework_nested.serializers import NestedHyperlinkedModelSerializer\n\nfrom pulpcore.app import models\nfrom pulpcore.app.serializers import (\n DetailIdentityField,\n IdentityField,\n NestedIdentityField,\n NestedRelatedField,\n LatestVersionField,\n MasterModelSerializer,\n ModelSerializer,\n)\nfrom pulpcore.app.serializers import validate_unknown_fields\n\n\nclass RepositorySerializer(ModelSerializer):\n _href = IdentityField(\n view_name='repositories-detail'\n )\n _versions_href = IdentityField(\n view_name='versions-list',\n lookup_url_kwarg='repository_pk',\n )\n _latest_version_href = LatestVersionField()\n name = serializers.CharField(\n help_text=_('A unique name for this repository.'),\n validators=[UniqueValidator(queryset=models.Repository.objects.all())]\n )\n description = serializers.CharField(\n help_text=_('An optional description.'),\n required=False,\n allow_blank=True\n )\n\n class Meta:\n model = models.Repository\n fields = ModelSerializer.Meta.fields + ('_versions_href', '_latest_version_href', 'name',\n 'description')\n\n\nclass RemoteSerializer(MasterModelSerializer):\n \"\"\"\n Every remote defined by a plugin should have a Remote serializer that inherits from this\n class. Please import from `pulpcore.plugin.serializers` rather than from this module directly.\n \"\"\"\n _href = DetailIdentityField()\n name = serializers.CharField(\n help_text=_('A unique name for this remote.'),\n validators=[UniqueValidator(queryset=models.Remote.objects.all())],\n )\n url = serializers.CharField(\n help_text='The URL of an external content source.',\n )\n validate = serializers.BooleanField(\n help_text='If True, the plugin will validate imported artifacts.',\n required=False,\n )\n ssl_ca_certificate = serializers.FileField(\n help_text='A PEM encoded CA certificate used to validate the server '\n 'certificate presented by the remote server.',\n write_only=True,\n required=False,\n )\n ssl_client_certificate = serializers.FileField(\n help_text='A PEM encoded client certificate used for authentication.',\n write_only=True,\n required=False,\n )\n ssl_client_key = serializers.FileField(\n help_text='A PEM encoded private key used for authentication.',\n write_only=True,\n required=False,\n )\n ssl_validation = serializers.BooleanField(\n help_text='If True, SSL peer validation must be performed.',\n required=False,\n )\n proxy_url = serializers.CharField(\n help_text='The proxy URL. Format: scheme://user:password@host:port',\n required=False,\n allow_blank=True,\n )\n username = serializers.CharField(\n help_text='The username to be used for authentication when syncing.',\n write_only=True,\n required=False,\n allow_blank=True,\n )\n password = serializers.CharField(\n help_text='The password to be used for authentication when syncing.',\n write_only=True,\n required=False,\n allow_blank=True,\n )\n _last_updated = serializers.DateTimeField(\n help_text='Timestamp of the most recent update of the remote.',\n read_only=True\n )\n download_concurrency = serializers.IntegerField(\n help_text='Total number of simultaneous connections.',\n required=False,\n min_value=1\n )\n policy = serializers.ChoiceField(\n help_text=\"The policy to use when downloading content. The possible values include: \"\n \"'immediate', 'on_demand', and 'cache_only'. 'immediate' is the default.\",\n choices=models.Remote.POLICY_CHOICES,\n default=models.Remote.IMMEDIATE\n )\n\n class Meta:\n abstract = True\n model = models.Remote\n fields = MasterModelSerializer.Meta.fields + (\n 'name', 'url', 'validate', 'ssl_ca_certificate', 'ssl_client_certificate',\n 'ssl_client_key', 'ssl_validation', 'proxy_url', 'username', 'password',\n '_last_updated', 'download_concurrency', 'policy')\n\n\nclass RepositorySyncURLSerializer(serializers.Serializer):\n repository = serializers.HyperlinkedRelatedField(\n required=True,\n help_text=_('A URI of the repository to be synchronized.'),\n queryset=models.Repository.objects.all(),\n view_name='repositories-detail',\n label=_('Repository'),\n error_messages={\n 'required': _('The repository URI must be specified.')\n }\n )\n\n mirror = fields.BooleanField(\n required=False,\n default=False,\n help_text=_('The synchronization mode, True for \"mirror\" and False for \"additive\" mode.')\n )\n\n\nclass PublisherSerializer(MasterModelSerializer):\n \"\"\"\n Every publisher defined by a plugin should have an Publisher serializer that inherits from this\n class. Please import from `pulpcore.plugin.serializers` rather than from this module directly.\n \"\"\"\n _href = DetailIdentityField()\n name = serializers.CharField(\n help_text=_('A unique name for this publisher.'),\n validators=[UniqueValidator(queryset=models.Publisher.objects.all())]\n )\n _last_updated = serializers.DateTimeField(\n help_text=_('Timestamp of the most recent update of the publisher configuration.'),\n read_only=True\n )\n _distributions = serializers.HyperlinkedRelatedField(\n many=True,\n read_only=True,\n view_name='distributions-detail',\n )\n\n class Meta:\n abstract = True\n model = models.Publisher\n fields = MasterModelSerializer.Meta.fields + (\n 'name', '_last_updated', '_distributions',\n )\n\n\nclass RepositoryPublishURLSerializer(serializers.Serializer):\n\n repository = serializers.HyperlinkedRelatedField(\n help_text=_('A URI of the repository to be synchronized.'),\n required=False,\n label=_('Repository'),\n queryset=models.Repository.objects.all(),\n view_name='repositories-detail',\n )\n\n repository_version = NestedRelatedField(\n help_text=_('A URI of the repository version to be published.'),\n required=False,\n label=_('Repository Version'),\n queryset=models.RepositoryVersion.objects.all(),\n view_name='versions-detail',\n lookup_field='number',\n parent_lookup_kwargs={'repository_pk': 'repository__pk'},\n )\n\n def validate(self, data):\n if hasattr(self, 'initial_data'):\n validate_unknown_fields(self.initial_data, self.fields)\n\n repository = data.pop('repository', None)\n repository_version = data.get('repository_version')\n if not repository and not repository_version:\n raise serializers.ValidationError(\n _(\"Either the 'repository' or 'repository_version' need to be specified\"))\n elif not repository and repository_version:\n return data\n elif repository and not repository_version:\n version = models.RepositoryVersion.latest(repository)\n if version:\n new_data = {'repository_version': version}\n new_data.update(data)\n return new_data\n else:\n raise serializers.ValidationError(\n detail=_('Repository has no version available to publish'))\n raise serializers.ValidationError(\n _(\"Either the 'repository' or 'repository_version' need to be specified \"\n \"but not both.\")\n )\n\n\nclass ExporterSerializer(MasterModelSerializer):\n _href = DetailIdentityField()\n name = serializers.CharField(\n help_text=_('The exporter unique name.'),\n validators=[UniqueValidator(queryset=models.Exporter.objects.all())]\n )\n _last_updated = serializers.DateTimeField(\n help_text=_('Timestamp of the last update.'),\n read_only=True\n )\n last_export = serializers.DateTimeField(\n help_text=_('Timestamp of the last export.'),\n read_only=True\n )\n\n class Meta:\n abstract = True\n model = models.Exporter\n fields = MasterModelSerializer.Meta.fields + (\n 'name',\n '_last_updated',\n 'last_export',\n )\n\n\nclass RepositoryVersionSerializer(ModelSerializer, NestedHyperlinkedModelSerializer):\n _href = NestedIdentityField(\n view_name='versions-detail',\n lookup_field='number', parent_lookup_kwargs={'repository_pk': 'repository__pk'},\n )\n number = serializers.IntegerField(\n read_only=True\n )\n base_version = NestedRelatedField(\n required=False,\n help_text=_('A repository version whose content was used as the initial set of content '\n 'for this repository version'),\n queryset=models.RepositoryVersion.objects.all(),\n view_name='versions-detail',\n lookup_field='number',\n parent_lookup_kwargs={'repository_pk': 'repository__pk'},\n )\n content_summary = serializers.SerializerMethodField(\n help_text=_('Various count summaries of the content in the version and the HREF to view '\n 'them.'),\n read_only=True,\n )\n\n def get_content_summary(self, obj):\n \"\"\"\n The summary of contained content.\n\n Returns:\n dict: {'added': {<_type>: {'count': <count>, 'href': <href>},\n 'removed': {<_type>: {'count': <count>, 'href': <href>},\n 'present': {<_type>: {'count': <count>, 'href': <href>},\n }\n \"\"\"\n to_return = {'added': {}, 'removed': {}, 'present': {}}\n for count_detail in obj.counts.all():\n count_type = count_detail.get_count_type_display()\n item_dict = {'count': count_detail.count, 'href': count_detail.content_href}\n to_return[count_type][count_detail.content_type] = item_dict\n return to_return\n\n class Meta:\n model = models.RepositoryVersion\n fields = ModelSerializer.Meta.fields + (\n '_href', 'number', 'base_version', 'content_summary',\n )\n\n\nclass RepositoryVersionCreateSerializer(ModelSerializer, NestedHyperlinkedModelSerializer):\n add_content_units = serializers.ListField(\n help_text=_('A list of content units to add to a new repository version'),\n write_only=True\n )\n remove_content_units = serializers.ListField(\n help_text=_('A list of content units to remove from the latest repository version'),\n write_only=True\n )\n base_version = NestedRelatedField(\n required=False,\n help_text=_('A repository version whose content will be used as the initial set of content '\n 'for the new repository version'),\n queryset=models.RepositoryVersion.objects.all(),\n view_name='versions-detail',\n lookup_field='number',\n parent_lookup_kwargs={'repository_pk': 'repository__pk'},\n )\n\n class Meta:\n model = models.RepositoryVersion\n fields = ['add_content_units', 'remove_content_units', 'base_version']\n", "path": "pulpcore/app/serializers/repository.py"}], "after_files": [{"content": "from gettext import gettext as _\n\nfrom rest_framework import serializers, fields\nfrom rest_framework.validators import UniqueValidator\nfrom rest_framework_nested.serializers import NestedHyperlinkedModelSerializer\n\nfrom pulpcore.app import models\nfrom pulpcore.app.serializers import (\n DetailIdentityField,\n IdentityField,\n NestedIdentityField,\n NestedRelatedField,\n LatestVersionField,\n MasterModelSerializer,\n ModelSerializer,\n)\nfrom pulpcore.app.serializers import validate_unknown_fields\n\n\nclass RepositorySerializer(ModelSerializer):\n _href = IdentityField(\n view_name='repositories-detail'\n )\n _versions_href = IdentityField(\n view_name='versions-list',\n lookup_url_kwarg='repository_pk',\n )\n _latest_version_href = LatestVersionField()\n name = serializers.CharField(\n help_text=_('A unique name for this repository.'),\n validators=[UniqueValidator(queryset=models.Repository.objects.all())]\n )\n description = serializers.CharField(\n help_text=_('An optional description.'),\n required=False,\n allow_blank=True\n )\n\n class Meta:\n model = models.Repository\n fields = ModelSerializer.Meta.fields + ('_versions_href', '_latest_version_href', 'name',\n 'description')\n\n\nclass RemoteSerializer(MasterModelSerializer):\n \"\"\"\n Every remote defined by a plugin should have a Remote serializer that inherits from this\n class. Please import from `pulpcore.plugin.serializers` rather than from this module directly.\n \"\"\"\n _href = DetailIdentityField()\n name = serializers.CharField(\n help_text=_('A unique name for this remote.'),\n validators=[UniqueValidator(queryset=models.Remote.objects.all())],\n )\n url = serializers.CharField(\n help_text='The URL of an external content source.',\n )\n validate = serializers.BooleanField(\n help_text='If True, the plugin will validate imported artifacts.',\n required=False,\n )\n ssl_ca_certificate = serializers.FileField(\n help_text='A PEM encoded CA certificate used to validate the server '\n 'certificate presented by the remote server.',\n write_only=True,\n required=False,\n )\n ssl_client_certificate = serializers.FileField(\n help_text='A PEM encoded client certificate used for authentication.',\n write_only=True,\n required=False,\n )\n ssl_client_key = serializers.FileField(\n help_text='A PEM encoded private key used for authentication.',\n write_only=True,\n required=False,\n )\n ssl_validation = serializers.BooleanField(\n help_text='If True, SSL peer validation must be performed.',\n required=False,\n )\n proxy_url = serializers.CharField(\n help_text='The proxy URL. Format: scheme://user:password@host:port',\n required=False,\n allow_blank=True,\n )\n username = serializers.CharField(\n help_text='The username to be used for authentication when syncing.',\n write_only=True,\n required=False,\n allow_blank=True,\n )\n password = serializers.CharField(\n help_text='The password to be used for authentication when syncing.',\n write_only=True,\n required=False,\n allow_blank=True,\n )\n _last_updated = serializers.DateTimeField(\n help_text='Timestamp of the most recent update of the remote.',\n read_only=True\n )\n download_concurrency = serializers.IntegerField(\n help_text='Total number of simultaneous connections.',\n required=False,\n min_value=1\n )\n policy = serializers.ChoiceField(\n help_text=\"The policy to use when downloading content. The possible values include: \"\n \"'immediate', 'on_demand', and 'cache_only'. 'immediate' is the default.\",\n choices=models.Remote.POLICY_CHOICES,\n default=models.Remote.IMMEDIATE\n )\n\n class Meta:\n abstract = True\n model = models.Remote\n fields = MasterModelSerializer.Meta.fields + (\n 'name', 'url', 'validate', 'ssl_ca_certificate', 'ssl_client_certificate',\n 'ssl_client_key', 'ssl_validation', 'proxy_url', 'username', 'password',\n '_last_updated', 'download_concurrency', 'policy')\n\n\nclass RepositorySyncURLSerializer(serializers.Serializer):\n repository = serializers.HyperlinkedRelatedField(\n required=True,\n help_text=_('A URI of the repository to be synchronized.'),\n queryset=models.Repository.objects.all(),\n view_name='repositories-detail',\n label=_('Repository'),\n error_messages={\n 'required': _('The repository URI must be specified.')\n }\n )\n\n mirror = fields.BooleanField(\n required=False,\n default=False,\n help_text=_('The synchronization mode, True for \"mirror\" and False for \"additive\" mode.')\n )\n\n\nclass PublisherSerializer(MasterModelSerializer):\n \"\"\"\n Every publisher defined by a plugin should have an Publisher serializer that inherits from this\n class. Please import from `pulpcore.plugin.serializers` rather than from this module directly.\n \"\"\"\n _href = DetailIdentityField()\n name = serializers.CharField(\n help_text=_('A unique name for this publisher.'),\n validators=[UniqueValidator(queryset=models.Publisher.objects.all())]\n )\n _last_updated = serializers.DateTimeField(\n help_text=_('Timestamp of the most recent update of the publisher configuration.'),\n read_only=True\n )\n _distributions = serializers.HyperlinkedRelatedField(\n many=True,\n read_only=True,\n view_name='distributions-detail',\n )\n\n class Meta:\n abstract = True\n model = models.Publisher\n fields = MasterModelSerializer.Meta.fields + (\n 'name', '_last_updated', '_distributions',\n )\n\n\nclass RepositoryPublishURLSerializer(serializers.Serializer):\n\n repository = serializers.HyperlinkedRelatedField(\n help_text=_('A URI of the repository to be synchronized.'),\n required=False,\n label=_('Repository'),\n queryset=models.Repository.objects.all(),\n view_name='repositories-detail',\n )\n\n repository_version = NestedRelatedField(\n help_text=_('A URI of the repository version to be published.'),\n required=False,\n label=_('Repository Version'),\n queryset=models.RepositoryVersion.objects.all(),\n view_name='versions-detail',\n lookup_field='number',\n parent_lookup_kwargs={'repository_pk': 'repository__pk'},\n )\n\n def validate(self, data):\n if hasattr(self, 'initial_data'):\n validate_unknown_fields(self.initial_data, self.fields)\n\n repository = data.pop('repository', None)\n repository_version = data.get('repository_version')\n if not repository and not repository_version:\n raise serializers.ValidationError(\n _(\"Either the 'repository' or 'repository_version' need to be specified\"))\n elif not repository and repository_version:\n return data\n elif repository and not repository_version:\n version = models.RepositoryVersion.latest(repository)\n if version:\n new_data = {'repository_version': version}\n new_data.update(data)\n return new_data\n else:\n raise serializers.ValidationError(\n detail=_('Repository has no version available to publish'))\n raise serializers.ValidationError(\n _(\"Either the 'repository' or 'repository_version' need to be specified \"\n \"but not both.\")\n )\n\n\nclass ExporterSerializer(MasterModelSerializer):\n _href = DetailIdentityField()\n name = serializers.CharField(\n help_text=_('The exporter unique name.'),\n validators=[UniqueValidator(queryset=models.Exporter.objects.all())]\n )\n _last_updated = serializers.DateTimeField(\n help_text=_('Timestamp of the last update.'),\n read_only=True\n )\n last_export = serializers.DateTimeField(\n help_text=_('Timestamp of the last export.'),\n read_only=True\n )\n\n class Meta:\n abstract = True\n model = models.Exporter\n fields = MasterModelSerializer.Meta.fields + (\n 'name',\n '_last_updated',\n 'last_export',\n )\n\n\nclass RepositoryVersionSerializer(ModelSerializer, NestedHyperlinkedModelSerializer):\n _href = NestedIdentityField(\n view_name='versions-detail',\n lookup_field='number', parent_lookup_kwargs={'repository_pk': 'repository__pk'},\n )\n number = serializers.IntegerField(\n read_only=True\n )\n base_version = NestedRelatedField(\n required=False,\n help_text=_('A repository version whose content was used as the initial set of content '\n 'for this repository version'),\n queryset=models.RepositoryVersion.objects.all(),\n view_name='versions-detail',\n lookup_field='number',\n parent_lookup_kwargs={'repository_pk': 'repository__pk'},\n )\n content_summary = serializers.SerializerMethodField(\n help_text=_('Various count summaries of the content in the version and the HREF to view '\n 'them.'),\n read_only=True,\n )\n\n def get_content_summary(self, obj):\n \"\"\"\n The summary of contained content.\n\n Returns:\n dict: The dictionary has the following format.::\n\n {\n 'added': {<_type>: {'count': <count>, 'href': <href>},\n 'removed': {<_type>: {'count': <count>, 'href': <href>},\n 'present': {<_type>: {'count': <count>, 'href': <href>},\n }\n\n \"\"\"\n to_return = {'added': {}, 'removed': {}, 'present': {}}\n for count_detail in obj.counts.all():\n count_type = count_detail.get_count_type_display()\n item_dict = {'count': count_detail.count, 'href': count_detail.content_href}\n to_return[count_type][count_detail.content_type] = item_dict\n return to_return\n\n class Meta:\n model = models.RepositoryVersion\n fields = ModelSerializer.Meta.fields + (\n '_href', 'number', 'base_version', 'content_summary',\n )\n\n\nclass RepositoryVersionCreateSerializer(ModelSerializer, NestedHyperlinkedModelSerializer):\n add_content_units = serializers.ListField(\n help_text=_('A list of content units to add to a new repository version'),\n write_only=True\n )\n remove_content_units = serializers.ListField(\n help_text=_('A list of content units to remove from the latest repository version'),\n write_only=True\n )\n base_version = NestedRelatedField(\n required=False,\n help_text=_('A repository version whose content will be used as the initial set of content '\n 'for the new repository version'),\n queryset=models.RepositoryVersion.objects.all(),\n view_name='versions-detail',\n lookup_field='number',\n parent_lookup_kwargs={'repository_pk': 'repository__pk'},\n )\n\n class Meta:\n model = models.RepositoryVersion\n fields = ['add_content_units', 'remove_content_units', 'base_version']\n", "path": "pulpcore/app/serializers/repository.py"}]} | 3,423 | 262 |
gh_patches_debug_696 | rasdani/github-patches | git_diff | boto__botocore-658 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pin jmespatch dependency version
Can this library pin its jmespath dependency to a specific version? Currently, it depends on the development branch of the jmespath GitHub repo - which is not stable nor deterministic.
Currently, this project's setup.py requires version 0.7.1 but the upstream GitHub repo/branch does not deliver that version - so this project's dependency graph is disconnected.
This can result in runtime errors for downstream consumers - like my organization did today.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import botocore
3 import sys
4
5 from setuptools import setup, find_packages
6
7
8 requires = ['jmespath==0.7.1',
9 'python-dateutil>=2.1,<3.0.0',
10 'docutils>=0.10']
11
12
13 if sys.version_info[:2] == (2, 6):
14 # For python2.6 we have a few other dependencies.
15 # First we need an ordered dictionary so we use the
16 # 2.6 backport.
17 requires.append('ordereddict==1.1')
18 # Then we need simplejson. This is because we need
19 # a json version that allows us to specify we want to
20 # use an ordereddict instead of a normal dict for the
21 # JSON objects. The 2.7 json module has this. For 2.6
22 # we need simplejson.
23 requires.append('simplejson==3.3.0')
24
25
26 setup(
27 name='botocore',
28 version=botocore.__version__,
29 description='Low-level, data-driven core of boto 3.',
30 long_description=open('README.rst').read(),
31 author='Amazon Web Services',
32 url='https://github.com/boto/botocore',
33 scripts=[],
34 packages=find_packages(exclude=['tests*']),
35 package_data={'botocore': ['data/*.json', 'data/*/*.json'],
36 'botocore.vendored.requests': ['*.pem']},
37 include_package_data=True,
38 install_requires=requires,
39 extras_require={
40 ':python_version=="2.6"': [
41 'ordereddict==1.1',
42 'simplejson==3.3.0',
43 ]
44 },
45 license="Apache License 2.0",
46 classifiers=(
47 'Development Status :: 5 - Production/Stable',
48 'Intended Audience :: Developers',
49 'Intended Audience :: System Administrators',
50 'Natural Language :: English',
51 'License :: OSI Approved :: Apache Software License',
52 'Programming Language :: Python',
53 'Programming Language :: Python :: 2.6',
54 'Programming Language :: Python :: 2.7',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.3',
57 'Programming Language :: Python :: 3.4',
58 ),
59 )
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@
from setuptools import setup, find_packages
-requires = ['jmespath==0.7.1',
+requires = ['jmespath>=0.7.1,<1.0.0',
'python-dateutil>=2.1,<3.0.0',
'docutils>=0.10']
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,7 +5,7 @@\n from setuptools import setup, find_packages\n \n \n-requires = ['jmespath==0.7.1',\n+requires = ['jmespath>=0.7.1,<1.0.0',\n 'python-dateutil>=2.1,<3.0.0',\n 'docutils>=0.10']\n", "issue": "Pin jmespatch dependency version\nCan this library pin its jmespath dependency to a specific version? Currently, it depends on the development branch of the jmespath GitHub repo - which is not stable nor deterministic.\n\nCurrently, this project's setup.py requires version 0.7.1 but the upstream GitHub repo/branch does not deliver that version - so this project's dependency graph is disconnected.\n\nThis can result in runtime errors for downstream consumers - like my organization did today.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport botocore\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nrequires = ['jmespath==0.7.1',\n 'python-dateutil>=2.1,<3.0.0',\n 'docutils>=0.10']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have a few other dependencies.\n # First we need an ordered dictionary so we use the\n # 2.6 backport.\n requires.append('ordereddict==1.1')\n # Then we need simplejson. This is because we need\n # a json version that allows us to specify we want to\n # use an ordereddict instead of a normal dict for the\n # JSON objects. The 2.7 json module has this. For 2.6\n # we need simplejson.\n requires.append('simplejson==3.3.0')\n\n\nsetup(\n name='botocore',\n version=botocore.__version__,\n description='Low-level, data-driven core of boto 3.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='https://github.com/boto/botocore',\n scripts=[],\n packages=find_packages(exclude=['tests*']),\n package_data={'botocore': ['data/*.json', 'data/*/*.json'],\n 'botocore.vendored.requests': ['*.pem']},\n include_package_data=True,\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'ordereddict==1.1',\n 'simplejson==3.3.0',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport botocore\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nrequires = ['jmespath>=0.7.1,<1.0.0',\n 'python-dateutil>=2.1,<3.0.0',\n 'docutils>=0.10']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have a few other dependencies.\n # First we need an ordered dictionary so we use the\n # 2.6 backport.\n requires.append('ordereddict==1.1')\n # Then we need simplejson. This is because we need\n # a json version that allows us to specify we want to\n # use an ordereddict instead of a normal dict for the\n # JSON objects. The 2.7 json module has this. For 2.6\n # we need simplejson.\n requires.append('simplejson==3.3.0')\n\n\nsetup(\n name='botocore',\n version=botocore.__version__,\n description='Low-level, data-driven core of boto 3.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='https://github.com/boto/botocore',\n scripts=[],\n packages=find_packages(exclude=['tests*']),\n package_data={'botocore': ['data/*.json', 'data/*/*.json'],\n 'botocore.vendored.requests': ['*.pem']},\n include_package_data=True,\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'ordereddict==1.1',\n 'simplejson==3.3.0',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ),\n)\n", "path": "setup.py"}]} | 973 | 99 |
gh_patches_debug_24517 | rasdani/github-patches | git_diff | saleor__saleor-1460 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve "Sort by" label appearance
"Sort by" label is rendered differently on the `demo` branch and on `master`. This is probably due to the fact that on demo it's rendered as React component and on master as Django template. However, we have to make sure that the markup and styling is the same in both cases.
Demo:

- missing space between the colon and the field name
Master:

- missing colon
- too much space between the label and the icon
Also, I think that the arrow icon could be a little bit smaller; currently it's visually too heavy compared to the text.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/core/utils/filters.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django_countries import countries
4 from django.db.models import Q
5
6
7 def get_sort_by_choices(filter_set):
8 return [(choice[0], choice[1].lower()) for choice in
9 filter_set.filters['sort_by'].field.choices[1::2]]
10
11
12 def get_now_sorted_by(filter_set, fields, default_sort='name'):
13 sort_by = filter_set.form.cleaned_data.get('sort_by')
14 if sort_by:
15 sort_by = fields[sort_by[0].strip('-')]
16 else:
17 sort_by = fields[default_sort]
18 return sort_by
19
20
21 def filter_by_customer(queryset, name, value):
22 return queryset.filter(
23 Q(email__icontains=value) |
24 Q(default_billing_address__first_name__icontains=value) |
25 Q(default_billing_address__last_name__icontains=value))
26
27
28 def filter_by_order_customer(queryset, name, value):
29 return queryset.filter(
30 Q(user__email__icontains=value) |
31 Q(user__default_billing_address__first_name__icontains=value) |
32 Q(user__default_billing_address__last_name__icontains=value))
33
34
35 def filter_by_location(queryset, name, value):
36 q = Q(default_billing_address__city__icontains=value)
37 q |= Q(default_billing_address__country__icontains=value)
38 country_codes = get_mapped_country_codes_from_search(value)
39 for code in country_codes:
40 q |= Q(default_billing_address__country__icontains=code)
41 return queryset.filter(q)
42
43
44 def get_mapped_country_codes_from_search(value):
45 country_codes = []
46 for code, country in dict(countries).items():
47 if value.lower() in country.lower():
48 country_codes.append(code)
49 return country_codes
50
51
52 def filter_by_date_range(queryset, name, value):
53 q = Q()
54 if value.start:
55 q = Q(start_date__gte=value.start)
56 if value.stop:
57 if value.start:
58 q |= Q(end_date__lte=value.stop)
59 else:
60 q = Q(end_date__lte=value.stop)
61 return queryset.filter(q)
62
```
Path: `saleor/product/views.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import datetime
4 import json
5
6 from django.conf import settings
7 from django.http import HttpResponsePermanentRedirect, JsonResponse
8 from django.shortcuts import get_object_or_404, redirect
9 from django.template.response import TemplateResponse
10 from django.urls import reverse
11
12 from ..cart.utils import set_cart_cookie
13 from ..core.utils import get_paginator_items, serialize_decimal
14 from ..core.utils.filters import get_now_sorted_by, get_sort_by_choices
15 from .filters import ProductFilter, SORT_BY_FIELDS
16 from .models import Category
17 from .utils import (
18 get_availability, get_product_attributes_data, get_product_images,
19 get_variant_picker_data, handle_cart_form, product_json_ld,
20 products_for_cart, products_with_availability, products_with_details)
21
22
23 def product_details(request, slug, product_id, form=None):
24 """Product details page
25
26 The following variables are available to the template:
27
28 product:
29 The Product instance itself.
30
31 is_visible:
32 Whether the product is visible to regular users (for cases when an
33 admin is previewing a product before publishing).
34
35 form:
36 The add-to-cart form.
37
38 price_range:
39 The PriceRange for the product including all discounts.
40
41 undiscounted_price_range:
42 The PriceRange excluding all discounts.
43
44 discount:
45 Either a Price instance equal to the discount value or None if no
46 discount was available.
47
48 local_price_range:
49 The same PriceRange from price_range represented in user's local
50 currency. The value will be None if exchange rate is not available or
51 the local currency is the same as site's default currency.
52 """
53 products = products_with_details(user=request.user)
54 product = get_object_or_404(products, id=product_id)
55 if product.get_slug() != slug:
56 return HttpResponsePermanentRedirect(product.get_absolute_url())
57 today = datetime.date.today()
58 is_visible = (
59 product.available_on is None or product.available_on <= today)
60 if form is None:
61 form = handle_cart_form(request, product, create_cart=False)[0]
62 availability = get_availability(product, discounts=request.discounts,
63 local_currency=request.currency)
64 product_images = get_product_images(product)
65 variant_picker_data = get_variant_picker_data(
66 product, request.discounts, request.currency)
67 product_attributes = get_product_attributes_data(product)
68 show_variant_picker = all([v.attributes for v in product.variants.all()])
69 json_ld_data = product_json_ld(product, availability, product_attributes)
70 return TemplateResponse(
71 request, 'product/details.html',
72 {'is_visible': is_visible,
73 'form': form,
74 'availability': availability,
75 'product': product,
76 'product_attributes': product_attributes,
77 'product_images': product_images,
78 'show_variant_picker': show_variant_picker,
79 'variant_picker_data': json.dumps(
80 variant_picker_data, default=serialize_decimal),
81 'json_ld_product_data': json.dumps(
82 json_ld_data, default=serialize_decimal)})
83
84
85 def product_add_to_cart(request, slug, product_id):
86 # types: (int, str, dict) -> None
87
88 if not request.method == 'POST':
89 return redirect(reverse(
90 'product:details',
91 kwargs={'product_id': product_id, 'slug': slug}))
92
93 products = products_for_cart(user=request.user)
94 product = get_object_or_404(products, pk=product_id)
95 form, cart = handle_cart_form(request, product, create_cart=True)
96 if form.is_valid():
97 form.save()
98 if request.is_ajax():
99 response = JsonResponse({'next': reverse('cart:index')}, status=200)
100 else:
101 response = redirect('cart:index')
102 else:
103 if request.is_ajax():
104 response = JsonResponse({'error': form.errors}, status=400)
105 else:
106 response = product_details(request, slug, product_id, form)
107 if not request.user.is_authenticated:
108 set_cart_cookie(cart, response)
109 return response
110
111
112 def category_index(request, path, category_id):
113 category = get_object_or_404(Category, id=category_id)
114 actual_path = category.get_full_path()
115 if actual_path != path:
116 return redirect('product:category', permanent=True, path=actual_path,
117 category_id=category_id)
118 products = (products_with_details(user=request.user)
119 .filter(categories__id=category.id)
120 .order_by('name'))
121 product_filter = ProductFilter(
122 request.GET, queryset=products, category=category)
123 products_paginated = get_paginator_items(
124 product_filter.qs, settings.PAGINATE_BY, request.GET.get('page'))
125 products_and_availability = list(products_with_availability(
126 products_paginated, request.discounts, request.currency))
127 now_sorted_by = get_now_sorted_by(product_filter, SORT_BY_FIELDS)
128 arg_sort_by = request.GET.get('sort_by')
129 is_descending = arg_sort_by.startswith('-') if arg_sort_by else False
130 ctx = {'category': category, 'filter': product_filter,
131 'products': products_and_availability,
132 'products_paginated': products_paginated,
133 'sort_by_choices': get_sort_by_choices(product_filter),
134 'now_sorted_by': now_sorted_by,
135 'is_descending': is_descending}
136 return TemplateResponse(request, 'category/index.html', ctx)
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/core/utils/filters.py b/saleor/core/utils/filters.py
--- a/saleor/core/utils/filters.py
+++ b/saleor/core/utils/filters.py
@@ -9,12 +9,12 @@
filter_set.filters['sort_by'].field.choices[1::2]]
-def get_now_sorted_by(filter_set, fields, default_sort='name'):
+def get_now_sorted_by(filter_set, default_sort='name'):
sort_by = filter_set.form.cleaned_data.get('sort_by')
if sort_by:
- sort_by = fields[sort_by[0].strip('-')]
+ sort_by = sort_by[0].strip('-')
else:
- sort_by = fields[default_sort]
+ sort_by = default_sort
return sort_by
diff --git a/saleor/product/views.py b/saleor/product/views.py
--- a/saleor/product/views.py
+++ b/saleor/product/views.py
@@ -124,7 +124,7 @@
product_filter.qs, settings.PAGINATE_BY, request.GET.get('page'))
products_and_availability = list(products_with_availability(
products_paginated, request.discounts, request.currency))
- now_sorted_by = get_now_sorted_by(product_filter, SORT_BY_FIELDS)
+ now_sorted_by = get_now_sorted_by(product_filter)
arg_sort_by = request.GET.get('sort_by')
is_descending = arg_sort_by.startswith('-') if arg_sort_by else False
ctx = {'category': category, 'filter': product_filter,
| {"golden_diff": "diff --git a/saleor/core/utils/filters.py b/saleor/core/utils/filters.py\n--- a/saleor/core/utils/filters.py\n+++ b/saleor/core/utils/filters.py\n@@ -9,12 +9,12 @@\n filter_set.filters['sort_by'].field.choices[1::2]]\n \n \n-def get_now_sorted_by(filter_set, fields, default_sort='name'):\n+def get_now_sorted_by(filter_set, default_sort='name'):\n sort_by = filter_set.form.cleaned_data.get('sort_by')\n if sort_by:\n- sort_by = fields[sort_by[0].strip('-')]\n+ sort_by = sort_by[0].strip('-')\n else:\n- sort_by = fields[default_sort]\n+ sort_by = default_sort\n return sort_by\n \n \ndiff --git a/saleor/product/views.py b/saleor/product/views.py\n--- a/saleor/product/views.py\n+++ b/saleor/product/views.py\n@@ -124,7 +124,7 @@\n product_filter.qs, settings.PAGINATE_BY, request.GET.get('page'))\n products_and_availability = list(products_with_availability(\n products_paginated, request.discounts, request.currency))\n- now_sorted_by = get_now_sorted_by(product_filter, SORT_BY_FIELDS)\n+ now_sorted_by = get_now_sorted_by(product_filter)\n arg_sort_by = request.GET.get('sort_by')\n is_descending = arg_sort_by.startswith('-') if arg_sort_by else False\n ctx = {'category': category, 'filter': product_filter,\n", "issue": "Improve \"Sort by\" label appearance\n\"Sort by\" label is rendered differently on the `demo` branch and on `master`. This is probably due to the fact that on demo it's rendered as React component and on master as Django template. However, we have to make sure that the markup and styling is the same in both cases.\r\n\r\nDemo:\r\n\r\n- missing space between the colon and the field name\r\n\r\nMaster:\r\n\r\n- missing colon\r\n- too much space between the label and the icon\r\n\r\nAlso, I think that the arrow icon could be a little bit smaller; currently it's visually too heavy compared to the text.\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django_countries import countries\nfrom django.db.models import Q\n\n\ndef get_sort_by_choices(filter_set):\n return [(choice[0], choice[1].lower()) for choice in\n filter_set.filters['sort_by'].field.choices[1::2]]\n\n\ndef get_now_sorted_by(filter_set, fields, default_sort='name'):\n sort_by = filter_set.form.cleaned_data.get('sort_by')\n if sort_by:\n sort_by = fields[sort_by[0].strip('-')]\n else:\n sort_by = fields[default_sort]\n return sort_by\n\n\ndef filter_by_customer(queryset, name, value):\n return queryset.filter(\n Q(email__icontains=value) |\n Q(default_billing_address__first_name__icontains=value) |\n Q(default_billing_address__last_name__icontains=value))\n\n\ndef filter_by_order_customer(queryset, name, value):\n return queryset.filter(\n Q(user__email__icontains=value) |\n Q(user__default_billing_address__first_name__icontains=value) |\n Q(user__default_billing_address__last_name__icontains=value))\n\n\ndef filter_by_location(queryset, name, value):\n q = Q(default_billing_address__city__icontains=value)\n q |= Q(default_billing_address__country__icontains=value)\n country_codes = get_mapped_country_codes_from_search(value)\n for code in country_codes:\n q |= Q(default_billing_address__country__icontains=code)\n return queryset.filter(q)\n\n\ndef get_mapped_country_codes_from_search(value):\n country_codes = []\n for code, country in dict(countries).items():\n if value.lower() in country.lower():\n country_codes.append(code)\n return country_codes\n\n\ndef filter_by_date_range(queryset, name, value):\n q = Q()\n if value.start:\n q = Q(start_date__gte=value.start)\n if value.stop:\n if value.start:\n q |= Q(end_date__lte=value.stop)\n else:\n q = Q(end_date__lte=value.stop)\n return queryset.filter(q)\n", "path": "saleor/core/utils/filters.py"}, {"content": "from __future__ import unicode_literals\n\nimport datetime\nimport json\n\nfrom django.conf import settings\nfrom django.http import HttpResponsePermanentRedirect, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\n\nfrom ..cart.utils import set_cart_cookie\nfrom ..core.utils import get_paginator_items, serialize_decimal\nfrom ..core.utils.filters import get_now_sorted_by, get_sort_by_choices\nfrom .filters import ProductFilter, SORT_BY_FIELDS\nfrom .models import Category\nfrom .utils import (\n get_availability, get_product_attributes_data, get_product_images,\n get_variant_picker_data, handle_cart_form, product_json_ld,\n products_for_cart, products_with_availability, products_with_details)\n\n\ndef product_details(request, slug, product_id, form=None):\n \"\"\"Product details page\n\n The following variables are available to the template:\n\n product:\n The Product instance itself.\n\n is_visible:\n Whether the product is visible to regular users (for cases when an\n admin is previewing a product before publishing).\n\n form:\n The add-to-cart form.\n\n price_range:\n The PriceRange for the product including all discounts.\n\n undiscounted_price_range:\n The PriceRange excluding all discounts.\n\n discount:\n Either a Price instance equal to the discount value or None if no\n discount was available.\n\n local_price_range:\n The same PriceRange from price_range represented in user's local\n currency. The value will be None if exchange rate is not available or\n the local currency is the same as site's default currency.\n \"\"\"\n products = products_with_details(user=request.user)\n product = get_object_or_404(products, id=product_id)\n if product.get_slug() != slug:\n return HttpResponsePermanentRedirect(product.get_absolute_url())\n today = datetime.date.today()\n is_visible = (\n product.available_on is None or product.available_on <= today)\n if form is None:\n form = handle_cart_form(request, product, create_cart=False)[0]\n availability = get_availability(product, discounts=request.discounts,\n local_currency=request.currency)\n product_images = get_product_images(product)\n variant_picker_data = get_variant_picker_data(\n product, request.discounts, request.currency)\n product_attributes = get_product_attributes_data(product)\n show_variant_picker = all([v.attributes for v in product.variants.all()])\n json_ld_data = product_json_ld(product, availability, product_attributes)\n return TemplateResponse(\n request, 'product/details.html',\n {'is_visible': is_visible,\n 'form': form,\n 'availability': availability,\n 'product': product,\n 'product_attributes': product_attributes,\n 'product_images': product_images,\n 'show_variant_picker': show_variant_picker,\n 'variant_picker_data': json.dumps(\n variant_picker_data, default=serialize_decimal),\n 'json_ld_product_data': json.dumps(\n json_ld_data, default=serialize_decimal)})\n\n\ndef product_add_to_cart(request, slug, product_id):\n # types: (int, str, dict) -> None\n\n if not request.method == 'POST':\n return redirect(reverse(\n 'product:details',\n kwargs={'product_id': product_id, 'slug': slug}))\n\n products = products_for_cart(user=request.user)\n product = get_object_or_404(products, pk=product_id)\n form, cart = handle_cart_form(request, product, create_cart=True)\n if form.is_valid():\n form.save()\n if request.is_ajax():\n response = JsonResponse({'next': reverse('cart:index')}, status=200)\n else:\n response = redirect('cart:index')\n else:\n if request.is_ajax():\n response = JsonResponse({'error': form.errors}, status=400)\n else:\n response = product_details(request, slug, product_id, form)\n if not request.user.is_authenticated:\n set_cart_cookie(cart, response)\n return response\n\n\ndef category_index(request, path, category_id):\n category = get_object_or_404(Category, id=category_id)\n actual_path = category.get_full_path()\n if actual_path != path:\n return redirect('product:category', permanent=True, path=actual_path,\n category_id=category_id)\n products = (products_with_details(user=request.user)\n .filter(categories__id=category.id)\n .order_by('name'))\n product_filter = ProductFilter(\n request.GET, queryset=products, category=category)\n products_paginated = get_paginator_items(\n product_filter.qs, settings.PAGINATE_BY, request.GET.get('page'))\n products_and_availability = list(products_with_availability(\n products_paginated, request.discounts, request.currency))\n now_sorted_by = get_now_sorted_by(product_filter, SORT_BY_FIELDS)\n arg_sort_by = request.GET.get('sort_by')\n is_descending = arg_sort_by.startswith('-') if arg_sort_by else False\n ctx = {'category': category, 'filter': product_filter,\n 'products': products_and_availability,\n 'products_paginated': products_paginated,\n 'sort_by_choices': get_sort_by_choices(product_filter),\n 'now_sorted_by': now_sorted_by,\n 'is_descending': is_descending}\n return TemplateResponse(request, 'category/index.html', ctx)\n", "path": "saleor/product/views.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django_countries import countries\nfrom django.db.models import Q\n\n\ndef get_sort_by_choices(filter_set):\n return [(choice[0], choice[1].lower()) for choice in\n filter_set.filters['sort_by'].field.choices[1::2]]\n\n\ndef get_now_sorted_by(filter_set, default_sort='name'):\n sort_by = filter_set.form.cleaned_data.get('sort_by')\n if sort_by:\n sort_by = sort_by[0].strip('-')\n else:\n sort_by = default_sort\n return sort_by\n\n\ndef filter_by_customer(queryset, name, value):\n return queryset.filter(\n Q(email__icontains=value) |\n Q(default_billing_address__first_name__icontains=value) |\n Q(default_billing_address__last_name__icontains=value))\n\n\ndef filter_by_order_customer(queryset, name, value):\n return queryset.filter(\n Q(user__email__icontains=value) |\n Q(user__default_billing_address__first_name__icontains=value) |\n Q(user__default_billing_address__last_name__icontains=value))\n\n\ndef filter_by_location(queryset, name, value):\n q = Q(default_billing_address__city__icontains=value)\n q |= Q(default_billing_address__country__icontains=value)\n country_codes = get_mapped_country_codes_from_search(value)\n for code in country_codes:\n q |= Q(default_billing_address__country__icontains=code)\n return queryset.filter(q)\n\n\ndef get_mapped_country_codes_from_search(value):\n country_codes = []\n for code, country in dict(countries).items():\n if value.lower() in country.lower():\n country_codes.append(code)\n return country_codes\n\n\ndef filter_by_date_range(queryset, name, value):\n q = Q()\n if value.start:\n q = Q(start_date__gte=value.start)\n if value.stop:\n if value.start:\n q |= Q(end_date__lte=value.stop)\n else:\n q = Q(end_date__lte=value.stop)\n return queryset.filter(q)\n", "path": "saleor/core/utils/filters.py"}, {"content": "from __future__ import unicode_literals\n\nimport datetime\nimport json\n\nfrom django.conf import settings\nfrom django.http import HttpResponsePermanentRedirect, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\n\nfrom ..cart.utils import set_cart_cookie\nfrom ..core.utils import get_paginator_items, serialize_decimal\nfrom ..core.utils.filters import get_now_sorted_by, get_sort_by_choices\nfrom .filters import ProductFilter, SORT_BY_FIELDS\nfrom .models import Category\nfrom .utils import (\n get_availability, get_product_attributes_data, get_product_images,\n get_variant_picker_data, handle_cart_form, product_json_ld,\n products_for_cart, products_with_availability, products_with_details)\n\n\ndef product_details(request, slug, product_id, form=None):\n \"\"\"Product details page\n\n The following variables are available to the template:\n\n product:\n The Product instance itself.\n\n is_visible:\n Whether the product is visible to regular users (for cases when an\n admin is previewing a product before publishing).\n\n form:\n The add-to-cart form.\n\n price_range:\n The PriceRange for the product including all discounts.\n\n undiscounted_price_range:\n The PriceRange excluding all discounts.\n\n discount:\n Either a Price instance equal to the discount value or None if no\n discount was available.\n\n local_price_range:\n The same PriceRange from price_range represented in user's local\n currency. The value will be None if exchange rate is not available or\n the local currency is the same as site's default currency.\n \"\"\"\n products = products_with_details(user=request.user)\n product = get_object_or_404(products, id=product_id)\n if product.get_slug() != slug:\n return HttpResponsePermanentRedirect(product.get_absolute_url())\n today = datetime.date.today()\n is_visible = (\n product.available_on is None or product.available_on <= today)\n if form is None:\n form = handle_cart_form(request, product, create_cart=False)[0]\n availability = get_availability(product, discounts=request.discounts,\n local_currency=request.currency)\n product_images = get_product_images(product)\n variant_picker_data = get_variant_picker_data(\n product, request.discounts, request.currency)\n product_attributes = get_product_attributes_data(product)\n show_variant_picker = all([v.attributes for v in product.variants.all()])\n json_ld_data = product_json_ld(product, availability, product_attributes)\n return TemplateResponse(\n request, 'product/details.html',\n {'is_visible': is_visible,\n 'form': form,\n 'availability': availability,\n 'product': product,\n 'product_attributes': product_attributes,\n 'product_images': product_images,\n 'show_variant_picker': show_variant_picker,\n 'variant_picker_data': json.dumps(\n variant_picker_data, default=serialize_decimal),\n 'json_ld_product_data': json.dumps(\n json_ld_data, default=serialize_decimal)})\n\n\ndef product_add_to_cart(request, slug, product_id):\n # types: (int, str, dict) -> None\n\n if not request.method == 'POST':\n return redirect(reverse(\n 'product:details',\n kwargs={'product_id': product_id, 'slug': slug}))\n\n products = products_for_cart(user=request.user)\n product = get_object_or_404(products, pk=product_id)\n form, cart = handle_cart_form(request, product, create_cart=True)\n if form.is_valid():\n form.save()\n if request.is_ajax():\n response = JsonResponse({'next': reverse('cart:index')}, status=200)\n else:\n response = redirect('cart:index')\n else:\n if request.is_ajax():\n response = JsonResponse({'error': form.errors}, status=400)\n else:\n response = product_details(request, slug, product_id, form)\n if not request.user.is_authenticated:\n set_cart_cookie(cart, response)\n return response\n\n\ndef category_index(request, path, category_id):\n category = get_object_or_404(Category, id=category_id)\n actual_path = category.get_full_path()\n if actual_path != path:\n return redirect('product:category', permanent=True, path=actual_path,\n category_id=category_id)\n products = (products_with_details(user=request.user)\n .filter(categories__id=category.id)\n .order_by('name'))\n product_filter = ProductFilter(\n request.GET, queryset=products, category=category)\n products_paginated = get_paginator_items(\n product_filter.qs, settings.PAGINATE_BY, request.GET.get('page'))\n products_and_availability = list(products_with_availability(\n products_paginated, request.discounts, request.currency))\n now_sorted_by = get_now_sorted_by(product_filter)\n arg_sort_by = request.GET.get('sort_by')\n is_descending = arg_sort_by.startswith('-') if arg_sort_by else False\n ctx = {'category': category, 'filter': product_filter,\n 'products': products_and_availability,\n 'products_paginated': products_paginated,\n 'sort_by_choices': get_sort_by_choices(product_filter),\n 'now_sorted_by': now_sorted_by,\n 'is_descending': is_descending}\n return TemplateResponse(request, 'category/index.html', ctx)\n", "path": "saleor/product/views.py"}]} | 2,533 | 342 |
gh_patches_debug_5098 | rasdani/github-patches | git_diff | xonsh__xonsh-1427 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lazyjson - OSError: Bad file descriptor
this issue is bothering me for too long, a bug that appears 1 out of 5 times i startup xonsh:
```
$ XONSH_DEBUG=1 python3 -m xonsh
Exception ignored in: <bound method LazyJSON.__del__ of <xonsh.lazyjson.LazyJSON object at 0xb4a89fec>>
Traceback (most recent call last):
File "/home/laerus/.local/lib/python3.4/site-packages/xonsh/lazyjson.py", line 212, in __del__
self.close()
File "/home/laerus/.local/lib/python3.4/site-packages/xonsh/lazyjson.py", line 217, in close
self._f.close()
OSError: [Errno 9] Bad file descriptor
```
anyone else getting this? any ideas on debugging it?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/lazyjson.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Implements a lazy JSON file class that wraps around json data."""
3 import io
4 import json
5 import weakref
6 import contextlib
7 import collections.abc as abc
8
9
10 def _to_json_with_size(obj, offset=0, sort_keys=False):
11 if isinstance(obj, str):
12 s = json.dumps(obj)
13 o = offset
14 n = size = len(s.encode()) # size in bytes
15 elif isinstance(obj, abc.Mapping):
16 s = '{'
17 j = offset + 1
18 o = {}
19 size = {}
20 items = sorted(obj.items()) if sort_keys else obj.items()
21 for key, val in items:
22 s_k, o_k, n_k, size_k = _to_json_with_size(key, offset=j,
23 sort_keys=sort_keys)
24 s += s_k + ': '
25 j += n_k + 2
26 s_v, o_v, n_v, size_v = _to_json_with_size(val, offset=j,
27 sort_keys=sort_keys)
28 o[key] = o_v
29 size[key] = size_v
30 s += s_v + ', '
31 j += n_v + 2
32 if s.endswith(', '):
33 s = s[:-2]
34 s += '}\n'
35 n = len(s)
36 o['__total__'] = offset
37 size['__total__'] = n
38 elif isinstance(obj, abc.Sequence):
39 s = '['
40 j = offset + 1
41 o = []
42 size = []
43 for x in obj:
44 s_x, o_x, n_x, size_x = _to_json_with_size(x, offset=j,
45 sort_keys=sort_keys)
46 o.append(o_x)
47 size.append(size_x)
48 s += s_x + ', '
49 j += n_x + 2
50 if s.endswith(', '):
51 s = s[:-2]
52 s += ']\n'
53 n = len(s)
54 o.append(offset)
55 size.append(n)
56 else:
57 s = json.dumps(obj, sort_keys=sort_keys)
58 o = offset
59 n = size = len(s)
60 return s, o, n, size
61
62
63 def index(obj, sort_keys=False):
64 """Creates an index for a JSON file."""
65 idx = {}
66 json_obj = _to_json_with_size(obj, sort_keys=sort_keys)
67 s, idx['offsets'], _, idx['sizes'] = json_obj
68 return s, idx
69
70
71 JSON_FORMAT = \
72 """{{"locs": [{iloc:>10}, {ilen:>10}, {dloc:>10}, {dlen:>10}],
73 "index": {index},
74 "data": {data}
75 }}
76 """
77
78
79 def dumps(obj, sort_keys=False):
80 """Dumps an object to JSON with an index."""
81 data, idx = index(obj, sort_keys=sort_keys)
82 jdx = json.dumps(idx, sort_keys=sort_keys)
83 iloc = 69
84 ilen = len(jdx)
85 dloc = iloc + ilen + 11
86 dlen = len(data)
87 s = JSON_FORMAT.format(index=jdx, data=data, iloc=iloc, ilen=ilen,
88 dloc=dloc, dlen=dlen)
89 return s
90
91
92 def ljdump(obj, fp, sort_keys=False):
93 """Dumps an object to JSON file."""
94 s = dumps(obj, sort_keys=sort_keys)
95 fp.write(s)
96
97
98 class LJNode(abc.Mapping, abc.Sequence):
99 """A proxy node for JSON nodes. Acts as both sequence and mapping."""
100
101 def __init__(self, offsets, sizes, root):
102 """Parameters
103 ----------
104 offsets : dict, list, or int
105 offsets of corresponding data structure, in bytes
106 sizes : dict, list, or int
107 sizes of corresponding data structure, in bytes
108 root : weakref.proxy of LazyJSON
109 weakref back to root node, which should be a LazyJSON object.
110 """
111 self.offsets = offsets
112 self.sizes = sizes
113 self.root = root
114 self.is_mapping = isinstance(self.offsets, abc.Mapping)
115 self.is_sequence = isinstance(self.offsets, abc.Sequence)
116
117 def __len__(self):
118 # recall that for maps, the '__total__' key is added and for
119 # sequences the last element represents the total size/offset.
120 return len(self.sizes) - 1
121
122 def load(self):
123 """Returns the Python data structure represented by the node."""
124 if self.is_mapping:
125 offset = self.offsets['__total__']
126 size = self.sizes['__total__']
127 elif self.is_sequence:
128 offset = self.offsets[-1]
129 size = self.sizes[-1]
130 elif isinstance(self.offsets, int):
131 offset = self.offsets
132 size = self.sizes
133 return self._load_or_node(offset, size)
134
135 def _load_or_node(self, offset, size):
136 if isinstance(offset, int):
137 with self.root._open(newline='\n') as f:
138 f.seek(self.root.dloc + offset)
139 s = f.read(size)
140 val = json.loads(s)
141 elif isinstance(offset, (abc.Mapping, abc.Sequence)):
142 val = LJNode(offset, size, self.root)
143 else:
144 raise TypeError('incorrect types for offset node')
145 return val
146
147 def _getitem_mapping(self, key):
148 if key == '__total__':
149 raise KeyError('"__total__" is a special LazyJSON key!')
150 offset = self.offsets[key]
151 size = self.sizes[key]
152 return self._load_or_node(offset, size)
153
154 def _getitem_sequence(self, key):
155 if isinstance(key, int):
156 rtn = self._load_or_node(self.offsets[key], self.sizes[key])
157 elif isinstance(key, slice):
158 key = slice(*key.indices(len(self)))
159 rtn = list(map(self._load_or_node, self.offsets[key],
160 self.sizes[key]))
161 else:
162 raise TypeError('only integer indexing available')
163 return rtn
164
165 def __getitem__(self, key):
166 if self.is_mapping:
167 rtn = self._getitem_mapping(key)
168 elif self.is_sequence:
169 rtn = self._getitem_sequence(key)
170 else:
171 raise NotImplementedError
172 return rtn
173
174 def __iter__(self):
175 if self.is_mapping:
176 keys = set(self.offsets.keys())
177 keys.discard('__total__')
178 yield from iter(keys)
179 elif self.is_sequence:
180 i = 0
181 n = len(self)
182 while i < n:
183 yield self._load_or_node(self.offsets[i], self.sizes[i])
184 i += 1
185 else:
186 raise NotImplementedError
187
188
189 class LazyJSON(LJNode):
190 """Represents a lazy json file. Can be used like a normal Python
191 dict or list.
192 """
193
194 def __init__(self, f, reopen=True):
195 """Parameters
196 ----------
197 f : file handle or str
198 JSON file to open.
199 reopen : bool, optional
200 Whether new file handle should be opened for each load.
201 """
202 self._f = f
203 self.reopen = reopen
204 if not reopen and isinstance(f, str):
205 self._f = open(f, 'r', newline='\n')
206 self._load_index()
207 self.root = weakref.proxy(self)
208 self.is_mapping = isinstance(self.offsets, abc.Mapping)
209 self.is_sequence = isinstance(self.offsets, abc.Sequence)
210
211 def __del__(self):
212 self.close()
213
214 def close(self):
215 """Close the file handle, if appropriate."""
216 if not self.reopen and isinstance(self._f, io.IOBase):
217 self._f.close()
218
219 @contextlib.contextmanager
220 def _open(self, *args, **kwargs):
221 if self.reopen and isinstance(self._f, str):
222 f = open(self._f, *args, **kwargs)
223 yield f
224 f.close()
225 else:
226 yield self._f
227
228 def _load_index(self):
229 """Loads the index from the start of the file."""
230 with self._open(newline='\n') as f:
231 # read in the location data
232 f.seek(9)
233 locs = f.read(48)
234 locs = json.loads(locs)
235 self.iloc, self.ilen, self.dloc, self.dlen = locs
236 # read in the index
237 f.seek(self.iloc)
238 idx = f.read(self.ilen)
239 idx = json.loads(idx)
240 self.offsets = idx['offsets']
241 self.sizes = idx['sizes']
242
243 def __enter__(self):
244 return self
245
246 def __exit__(self, exc_type, exc_value, traceback):
247 self.close()
248
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/lazyjson.py b/xonsh/lazyjson.py
--- a/xonsh/lazyjson.py
+++ b/xonsh/lazyjson.py
@@ -214,7 +214,10 @@
def close(self):
"""Close the file handle, if appropriate."""
if not self.reopen and isinstance(self._f, io.IOBase):
- self._f.close()
+ try:
+ self._f.close()
+ except OSError:
+ pass
@contextlib.contextmanager
def _open(self, *args, **kwargs):
| {"golden_diff": "diff --git a/xonsh/lazyjson.py b/xonsh/lazyjson.py\n--- a/xonsh/lazyjson.py\n+++ b/xonsh/lazyjson.py\n@@ -214,7 +214,10 @@\n def close(self):\n \"\"\"Close the file handle, if appropriate.\"\"\"\n if not self.reopen and isinstance(self._f, io.IOBase):\n- self._f.close()\n+ try:\n+ self._f.close()\n+ except OSError:\n+ pass\n \n @contextlib.contextmanager\n def _open(self, *args, **kwargs):\n", "issue": "lazyjson - OSError: Bad file descriptor\nthis issue is bothering me for too long, a bug that appears 1 out of 5 times i startup xonsh:\n\n```\n$ XONSH_DEBUG=1 python3 -m xonsh\nException ignored in: <bound method LazyJSON.__del__ of <xonsh.lazyjson.LazyJSON object at 0xb4a89fec>>\nTraceback (most recent call last):\n File \"/home/laerus/.local/lib/python3.4/site-packages/xonsh/lazyjson.py\", line 212, in __del__\n self.close()\n File \"/home/laerus/.local/lib/python3.4/site-packages/xonsh/lazyjson.py\", line 217, in close\n self._f.close()\nOSError: [Errno 9] Bad file descriptor\n```\n\nanyone else getting this? any ideas on debugging it? \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Implements a lazy JSON file class that wraps around json data.\"\"\"\nimport io\nimport json\nimport weakref\nimport contextlib\nimport collections.abc as abc\n\n\ndef _to_json_with_size(obj, offset=0, sort_keys=False):\n if isinstance(obj, str):\n s = json.dumps(obj)\n o = offset\n n = size = len(s.encode()) # size in bytes\n elif isinstance(obj, abc.Mapping):\n s = '{'\n j = offset + 1\n o = {}\n size = {}\n items = sorted(obj.items()) if sort_keys else obj.items()\n for key, val in items:\n s_k, o_k, n_k, size_k = _to_json_with_size(key, offset=j,\n sort_keys=sort_keys)\n s += s_k + ': '\n j += n_k + 2\n s_v, o_v, n_v, size_v = _to_json_with_size(val, offset=j,\n sort_keys=sort_keys)\n o[key] = o_v\n size[key] = size_v\n s += s_v + ', '\n j += n_v + 2\n if s.endswith(', '):\n s = s[:-2]\n s += '}\\n'\n n = len(s)\n o['__total__'] = offset\n size['__total__'] = n\n elif isinstance(obj, abc.Sequence):\n s = '['\n j = offset + 1\n o = []\n size = []\n for x in obj:\n s_x, o_x, n_x, size_x = _to_json_with_size(x, offset=j,\n sort_keys=sort_keys)\n o.append(o_x)\n size.append(size_x)\n s += s_x + ', '\n j += n_x + 2\n if s.endswith(', '):\n s = s[:-2]\n s += ']\\n'\n n = len(s)\n o.append(offset)\n size.append(n)\n else:\n s = json.dumps(obj, sort_keys=sort_keys)\n o = offset\n n = size = len(s)\n return s, o, n, size\n\n\ndef index(obj, sort_keys=False):\n \"\"\"Creates an index for a JSON file.\"\"\"\n idx = {}\n json_obj = _to_json_with_size(obj, sort_keys=sort_keys)\n s, idx['offsets'], _, idx['sizes'] = json_obj\n return s, idx\n\n\nJSON_FORMAT = \\\n\"\"\"{{\"locs\": [{iloc:>10}, {ilen:>10}, {dloc:>10}, {dlen:>10}],\n \"index\": {index},\n \"data\": {data}\n}}\n\"\"\"\n\n\ndef dumps(obj, sort_keys=False):\n \"\"\"Dumps an object to JSON with an index.\"\"\"\n data, idx = index(obj, sort_keys=sort_keys)\n jdx = json.dumps(idx, sort_keys=sort_keys)\n iloc = 69\n ilen = len(jdx)\n dloc = iloc + ilen + 11\n dlen = len(data)\n s = JSON_FORMAT.format(index=jdx, data=data, iloc=iloc, ilen=ilen,\n dloc=dloc, dlen=dlen)\n return s\n\n\ndef ljdump(obj, fp, sort_keys=False):\n \"\"\"Dumps an object to JSON file.\"\"\"\n s = dumps(obj, sort_keys=sort_keys)\n fp.write(s)\n\n\nclass LJNode(abc.Mapping, abc.Sequence):\n \"\"\"A proxy node for JSON nodes. Acts as both sequence and mapping.\"\"\"\n\n def __init__(self, offsets, sizes, root):\n \"\"\"Parameters\n ----------\n offsets : dict, list, or int\n offsets of corresponding data structure, in bytes\n sizes : dict, list, or int\n sizes of corresponding data structure, in bytes\n root : weakref.proxy of LazyJSON\n weakref back to root node, which should be a LazyJSON object.\n \"\"\"\n self.offsets = offsets\n self.sizes = sizes\n self.root = root\n self.is_mapping = isinstance(self.offsets, abc.Mapping)\n self.is_sequence = isinstance(self.offsets, abc.Sequence)\n\n def __len__(self):\n # recall that for maps, the '__total__' key is added and for\n # sequences the last element represents the total size/offset.\n return len(self.sizes) - 1\n\n def load(self):\n \"\"\"Returns the Python data structure represented by the node.\"\"\"\n if self.is_mapping:\n offset = self.offsets['__total__']\n size = self.sizes['__total__']\n elif self.is_sequence:\n offset = self.offsets[-1]\n size = self.sizes[-1]\n elif isinstance(self.offsets, int):\n offset = self.offsets\n size = self.sizes\n return self._load_or_node(offset, size)\n\n def _load_or_node(self, offset, size):\n if isinstance(offset, int):\n with self.root._open(newline='\\n') as f:\n f.seek(self.root.dloc + offset)\n s = f.read(size)\n val = json.loads(s)\n elif isinstance(offset, (abc.Mapping, abc.Sequence)):\n val = LJNode(offset, size, self.root)\n else:\n raise TypeError('incorrect types for offset node')\n return val\n\n def _getitem_mapping(self, key):\n if key == '__total__':\n raise KeyError('\"__total__\" is a special LazyJSON key!')\n offset = self.offsets[key]\n size = self.sizes[key]\n return self._load_or_node(offset, size)\n\n def _getitem_sequence(self, key):\n if isinstance(key, int):\n rtn = self._load_or_node(self.offsets[key], self.sizes[key])\n elif isinstance(key, slice):\n key = slice(*key.indices(len(self)))\n rtn = list(map(self._load_or_node, self.offsets[key],\n self.sizes[key]))\n else:\n raise TypeError('only integer indexing available')\n return rtn\n\n def __getitem__(self, key):\n if self.is_mapping:\n rtn = self._getitem_mapping(key)\n elif self.is_sequence:\n rtn = self._getitem_sequence(key)\n else:\n raise NotImplementedError\n return rtn\n\n def __iter__(self):\n if self.is_mapping:\n keys = set(self.offsets.keys())\n keys.discard('__total__')\n yield from iter(keys)\n elif self.is_sequence:\n i = 0\n n = len(self)\n while i < n:\n yield self._load_or_node(self.offsets[i], self.sizes[i])\n i += 1\n else:\n raise NotImplementedError\n\n\nclass LazyJSON(LJNode):\n \"\"\"Represents a lazy json file. Can be used like a normal Python\n dict or list.\n \"\"\"\n\n def __init__(self, f, reopen=True):\n \"\"\"Parameters\n ----------\n f : file handle or str\n JSON file to open.\n reopen : bool, optional\n Whether new file handle should be opened for each load.\n \"\"\"\n self._f = f\n self.reopen = reopen\n if not reopen and isinstance(f, str):\n self._f = open(f, 'r', newline='\\n')\n self._load_index()\n self.root = weakref.proxy(self)\n self.is_mapping = isinstance(self.offsets, abc.Mapping)\n self.is_sequence = isinstance(self.offsets, abc.Sequence)\n\n def __del__(self):\n self.close()\n\n def close(self):\n \"\"\"Close the file handle, if appropriate.\"\"\"\n if not self.reopen and isinstance(self._f, io.IOBase):\n self._f.close()\n\n @contextlib.contextmanager\n def _open(self, *args, **kwargs):\n if self.reopen and isinstance(self._f, str):\n f = open(self._f, *args, **kwargs)\n yield f\n f.close()\n else:\n yield self._f\n\n def _load_index(self):\n \"\"\"Loads the index from the start of the file.\"\"\"\n with self._open(newline='\\n') as f:\n # read in the location data\n f.seek(9)\n locs = f.read(48)\n locs = json.loads(locs)\n self.iloc, self.ilen, self.dloc, self.dlen = locs\n # read in the index\n f.seek(self.iloc)\n idx = f.read(self.ilen)\n idx = json.loads(idx)\n self.offsets = idx['offsets']\n self.sizes = idx['sizes']\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n", "path": "xonsh/lazyjson.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Implements a lazy JSON file class that wraps around json data.\"\"\"\nimport io\nimport json\nimport weakref\nimport contextlib\nimport collections.abc as abc\n\n\ndef _to_json_with_size(obj, offset=0, sort_keys=False):\n if isinstance(obj, str):\n s = json.dumps(obj)\n o = offset\n n = size = len(s.encode()) # size in bytes\n elif isinstance(obj, abc.Mapping):\n s = '{'\n j = offset + 1\n o = {}\n size = {}\n items = sorted(obj.items()) if sort_keys else obj.items()\n for key, val in items:\n s_k, o_k, n_k, size_k = _to_json_with_size(key, offset=j,\n sort_keys=sort_keys)\n s += s_k + ': '\n j += n_k + 2\n s_v, o_v, n_v, size_v = _to_json_with_size(val, offset=j,\n sort_keys=sort_keys)\n o[key] = o_v\n size[key] = size_v\n s += s_v + ', '\n j += n_v + 2\n if s.endswith(', '):\n s = s[:-2]\n s += '}\\n'\n n = len(s)\n o['__total__'] = offset\n size['__total__'] = n\n elif isinstance(obj, abc.Sequence):\n s = '['\n j = offset + 1\n o = []\n size = []\n for x in obj:\n s_x, o_x, n_x, size_x = _to_json_with_size(x, offset=j,\n sort_keys=sort_keys)\n o.append(o_x)\n size.append(size_x)\n s += s_x + ', '\n j += n_x + 2\n if s.endswith(', '):\n s = s[:-2]\n s += ']\\n'\n n = len(s)\n o.append(offset)\n size.append(n)\n else:\n s = json.dumps(obj, sort_keys=sort_keys)\n o = offset\n n = size = len(s)\n return s, o, n, size\n\n\ndef index(obj, sort_keys=False):\n \"\"\"Creates an index for a JSON file.\"\"\"\n idx = {}\n json_obj = _to_json_with_size(obj, sort_keys=sort_keys)\n s, idx['offsets'], _, idx['sizes'] = json_obj\n return s, idx\n\n\nJSON_FORMAT = \\\n\"\"\"{{\"locs\": [{iloc:>10}, {ilen:>10}, {dloc:>10}, {dlen:>10}],\n \"index\": {index},\n \"data\": {data}\n}}\n\"\"\"\n\n\ndef dumps(obj, sort_keys=False):\n \"\"\"Dumps an object to JSON with an index.\"\"\"\n data, idx = index(obj, sort_keys=sort_keys)\n jdx = json.dumps(idx, sort_keys=sort_keys)\n iloc = 69\n ilen = len(jdx)\n dloc = iloc + ilen + 11\n dlen = len(data)\n s = JSON_FORMAT.format(index=jdx, data=data, iloc=iloc, ilen=ilen,\n dloc=dloc, dlen=dlen)\n return s\n\n\ndef ljdump(obj, fp, sort_keys=False):\n \"\"\"Dumps an object to JSON file.\"\"\"\n s = dumps(obj, sort_keys=sort_keys)\n fp.write(s)\n\n\nclass LJNode(abc.Mapping, abc.Sequence):\n \"\"\"A proxy node for JSON nodes. Acts as both sequence and mapping.\"\"\"\n\n def __init__(self, offsets, sizes, root):\n \"\"\"Parameters\n ----------\n offsets : dict, list, or int\n offsets of corresponding data structure, in bytes\n sizes : dict, list, or int\n sizes of corresponding data structure, in bytes\n root : weakref.proxy of LazyJSON\n weakref back to root node, which should be a LazyJSON object.\n \"\"\"\n self.offsets = offsets\n self.sizes = sizes\n self.root = root\n self.is_mapping = isinstance(self.offsets, abc.Mapping)\n self.is_sequence = isinstance(self.offsets, abc.Sequence)\n\n def __len__(self):\n # recall that for maps, the '__total__' key is added and for\n # sequences the last element represents the total size/offset.\n return len(self.sizes) - 1\n\n def load(self):\n \"\"\"Returns the Python data structure represented by the node.\"\"\"\n if self.is_mapping:\n offset = self.offsets['__total__']\n size = self.sizes['__total__']\n elif self.is_sequence:\n offset = self.offsets[-1]\n size = self.sizes[-1]\n elif isinstance(self.offsets, int):\n offset = self.offsets\n size = self.sizes\n return self._load_or_node(offset, size)\n\n def _load_or_node(self, offset, size):\n if isinstance(offset, int):\n with self.root._open(newline='\\n') as f:\n f.seek(self.root.dloc + offset)\n s = f.read(size)\n val = json.loads(s)\n elif isinstance(offset, (abc.Mapping, abc.Sequence)):\n val = LJNode(offset, size, self.root)\n else:\n raise TypeError('incorrect types for offset node')\n return val\n\n def _getitem_mapping(self, key):\n if key == '__total__':\n raise KeyError('\"__total__\" is a special LazyJSON key!')\n offset = self.offsets[key]\n size = self.sizes[key]\n return self._load_or_node(offset, size)\n\n def _getitem_sequence(self, key):\n if isinstance(key, int):\n rtn = self._load_or_node(self.offsets[key], self.sizes[key])\n elif isinstance(key, slice):\n key = slice(*key.indices(len(self)))\n rtn = list(map(self._load_or_node, self.offsets[key],\n self.sizes[key]))\n else:\n raise TypeError('only integer indexing available')\n return rtn\n\n def __getitem__(self, key):\n if self.is_mapping:\n rtn = self._getitem_mapping(key)\n elif self.is_sequence:\n rtn = self._getitem_sequence(key)\n else:\n raise NotImplementedError\n return rtn\n\n def __iter__(self):\n if self.is_mapping:\n keys = set(self.offsets.keys())\n keys.discard('__total__')\n yield from iter(keys)\n elif self.is_sequence:\n i = 0\n n = len(self)\n while i < n:\n yield self._load_or_node(self.offsets[i], self.sizes[i])\n i += 1\n else:\n raise NotImplementedError\n\n\nclass LazyJSON(LJNode):\n \"\"\"Represents a lazy json file. Can be used like a normal Python\n dict or list.\n \"\"\"\n\n def __init__(self, f, reopen=True):\n \"\"\"Parameters\n ----------\n f : file handle or str\n JSON file to open.\n reopen : bool, optional\n Whether new file handle should be opened for each load.\n \"\"\"\n self._f = f\n self.reopen = reopen\n if not reopen and isinstance(f, str):\n self._f = open(f, 'r', newline='\\n')\n self._load_index()\n self.root = weakref.proxy(self)\n self.is_mapping = isinstance(self.offsets, abc.Mapping)\n self.is_sequence = isinstance(self.offsets, abc.Sequence)\n\n def __del__(self):\n self.close()\n\n def close(self):\n \"\"\"Close the file handle, if appropriate.\"\"\"\n if not self.reopen and isinstance(self._f, io.IOBase):\n try:\n self._f.close()\n except OSError:\n pass\n\n @contextlib.contextmanager\n def _open(self, *args, **kwargs):\n if self.reopen and isinstance(self._f, str):\n f = open(self._f, *args, **kwargs)\n yield f\n f.close()\n else:\n yield self._f\n\n def _load_index(self):\n \"\"\"Loads the index from the start of the file.\"\"\"\n with self._open(newline='\\n') as f:\n # read in the location data\n f.seek(9)\n locs = f.read(48)\n locs = json.loads(locs)\n self.iloc, self.ilen, self.dloc, self.dlen = locs\n # read in the index\n f.seek(self.iloc)\n idx = f.read(self.ilen)\n idx = json.loads(idx)\n self.offsets = idx['offsets']\n self.sizes = idx['sizes']\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n", "path": "xonsh/lazyjson.py"}]} | 3,000 | 132 |
gh_patches_debug_21576 | rasdani/github-patches | git_diff | shuup__shuup-1707 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Admin: Product delete returns 404
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shuup/admin/modules/products/views/delete.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of Shuup.
3 #
4 # Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
5 #
6 # This source code is licensed under the OSL-3.0 license found in the
7 # LICENSE file in the root directory of this source tree.
8
9 from __future__ import unicode_literals
10
11 from django.contrib import messages
12 from django.core.urlresolvers import reverse
13 from django.http.response import HttpResponseRedirect
14 from django.utils.translation import ugettext as _
15 from django.views.generic import DetailView
16
17 from shuup.admin.utils.urls import get_model_url
18 from shuup.core.models import Product
19
20
21 class ProductDeleteView(DetailView):
22 model = Product
23 context_object_name = "product"
24
25 def get(self, request, *args, **kwargs):
26 return HttpResponseRedirect(get_model_url(self.get_object(), shop=self.request.shop))
27
28 def post(self, request, *args, **kwargs):
29 product = self.get_object()
30 product.soft_delete(user=request.user)
31 messages.success(request, _(u"%s has been marked deleted.") % product)
32 return HttpResponseRedirect(reverse("shuup_admin:shop_product.list"))
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shuup/admin/modules/products/views/delete.py b/shuup/admin/modules/products/views/delete.py
--- a/shuup/admin/modules/products/views/delete.py
+++ b/shuup/admin/modules/products/views/delete.py
@@ -15,18 +15,19 @@
from django.views.generic import DetailView
from shuup.admin.utils.urls import get_model_url
-from shuup.core.models import Product
+from shuup.core.models import ShopProduct
class ProductDeleteView(DetailView):
- model = Product
+ model = ShopProduct
context_object_name = "product"
def get(self, request, *args, **kwargs):
- return HttpResponseRedirect(get_model_url(self.get_object(), shop=self.request.shop))
+ product = self.get_object().product
+ return HttpResponseRedirect(get_model_url(product, shop=self.request.shop))
def post(self, request, *args, **kwargs):
- product = self.get_object()
+ product = self.get_object().product
product.soft_delete(user=request.user)
messages.success(request, _(u"%s has been marked deleted.") % product)
return HttpResponseRedirect(reverse("shuup_admin:shop_product.list"))
| {"golden_diff": "diff --git a/shuup/admin/modules/products/views/delete.py b/shuup/admin/modules/products/views/delete.py\n--- a/shuup/admin/modules/products/views/delete.py\n+++ b/shuup/admin/modules/products/views/delete.py\n@@ -15,18 +15,19 @@\n from django.views.generic import DetailView\n \n from shuup.admin.utils.urls import get_model_url\n-from shuup.core.models import Product\n+from shuup.core.models import ShopProduct\n \n \n class ProductDeleteView(DetailView):\n- model = Product\n+ model = ShopProduct\n context_object_name = \"product\"\n \n def get(self, request, *args, **kwargs):\n- return HttpResponseRedirect(get_model_url(self.get_object(), shop=self.request.shop))\n+ product = self.get_object().product\n+ return HttpResponseRedirect(get_model_url(product, shop=self.request.shop))\n \n def post(self, request, *args, **kwargs):\n- product = self.get_object()\n+ product = self.get_object().product\n product.soft_delete(user=request.user)\n messages.success(request, _(u\"%s has been marked deleted.\") % product)\n return HttpResponseRedirect(reverse(\"shuup_admin:shop_product.list\"))\n", "issue": "Admin: Product delete returns 404\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom __future__ import unicode_literals\n\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.http.response import HttpResponseRedirect\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import DetailView\n\nfrom shuup.admin.utils.urls import get_model_url\nfrom shuup.core.models import Product\n\n\nclass ProductDeleteView(DetailView):\n model = Product\n context_object_name = \"product\"\n\n def get(self, request, *args, **kwargs):\n return HttpResponseRedirect(get_model_url(self.get_object(), shop=self.request.shop))\n\n def post(self, request, *args, **kwargs):\n product = self.get_object()\n product.soft_delete(user=request.user)\n messages.success(request, _(u\"%s has been marked deleted.\") % product)\n return HttpResponseRedirect(reverse(\"shuup_admin:shop_product.list\"))\n", "path": "shuup/admin/modules/products/views/delete.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom __future__ import unicode_literals\n\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.http.response import HttpResponseRedirect\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import DetailView\n\nfrom shuup.admin.utils.urls import get_model_url\nfrom shuup.core.models import ShopProduct\n\n\nclass ProductDeleteView(DetailView):\n model = ShopProduct\n context_object_name = \"product\"\n\n def get(self, request, *args, **kwargs):\n product = self.get_object().product\n return HttpResponseRedirect(get_model_url(product, shop=self.request.shop))\n\n def post(self, request, *args, **kwargs):\n product = self.get_object().product\n product.soft_delete(user=request.user)\n messages.success(request, _(u\"%s has been marked deleted.\") % product)\n return HttpResponseRedirect(reverse(\"shuup_admin:shop_product.list\"))\n", "path": "shuup/admin/modules/products/views/delete.py"}]} | 589 | 257 |
gh_patches_debug_9949 | rasdani/github-patches | git_diff | inducer__relate-607 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update pysaml2 to 4.6.0 (or higher) to fix CVE-2017-1000246
* https://nvd.nist.gov/vuln/detail/CVE-2017-1000246
* https://github.com/IdentityPython/pysaml2/issues/417
Currently blocked on https://github.com/knaperek/djangosaml2/issues/129.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from setuptools import setup, find_packages
5
6 # This script (for now) is only intended to install the 'relate' content helper
7 # script. relate-validate. Its use is not needed for (and unrelated to)
8 # deploying RELATE as a web service.
9
10 # Use 'pip install -r requirements.txt' first to install prerequisites.
11
12 setup(name="relate-courseware",
13 version="2016.1",
14 description="RELATE courseware",
15 long_description=open("README.rst", "rt").read(),
16
17 scripts=["bin/relate"],
18 author="Andreas Kloeckner",
19 url="https://github.com/inducer/relate",
20 author_email="[email protected]",
21 license="MIT",
22 packages=find_packages(exclude=['tests']),
23 install_requires=[
24 "django>=1.10,<2.1",
25 "django-crispy-forms>=1.5.1",
26 "colorama",
27 "markdown",
28 "dulwich",
29 "pyyaml",
30 "nbconvert>=5.2.1",
31 "pymbolic",
32 "sympy",
33 ],
34 package_data={
35 "relate": [
36 "templates/*.html",
37 ],
38 "course": [
39 "templates/course/*.html",
40 "templates/course/jinja2/*.tpl",
41 ],
42 },
43 )
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,10 +4,11 @@
from setuptools import setup, find_packages
# This script (for now) is only intended to install the 'relate' content helper
-# script. relate-validate. Its use is not needed for (and unrelated to)
-# deploying RELATE as a web service.
+# script. Its use is not needed for (and unrelated to) deploying RELATE as a
+# web service.
-# Use 'pip install -r requirements.txt' first to install prerequisites.
+# Use 'pip install -r requirements.txt' to install prerequisites for RELATE as
+# a web service.
setup(name="relate-courseware",
version="2016.1",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,10 +4,11 @@\n from setuptools import setup, find_packages\n \n # This script (for now) is only intended to install the 'relate' content helper\n-# script. relate-validate. Its use is not needed for (and unrelated to)\n-# deploying RELATE as a web service.\n+# script. Its use is not needed for (and unrelated to) deploying RELATE as a\n+# web service.\n \n-# Use 'pip install -r requirements.txt' first to install prerequisites.\n+# Use 'pip install -r requirements.txt' to install prerequisites for RELATE as\n+# a web service.\n \n setup(name=\"relate-courseware\",\n version=\"2016.1\",\n", "issue": "Update pysaml2 to 4.6.0 (or higher) to fix CVE-2017-1000246\n* https://nvd.nist.gov/vuln/detail/CVE-2017-1000246\r\n* https://github.com/IdentityPython/pysaml2/issues/417\r\n\r\nCurrently blocked on https://github.com/knaperek/djangosaml2/issues/129.\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n# This script (for now) is only intended to install the 'relate' content helper\n# script. relate-validate. Its use is not needed for (and unrelated to)\n# deploying RELATE as a web service.\n\n# Use 'pip install -r requirements.txt' first to install prerequisites.\n\nsetup(name=\"relate-courseware\",\n version=\"2016.1\",\n description=\"RELATE courseware\",\n long_description=open(\"README.rst\", \"rt\").read(),\n\n scripts=[\"bin/relate\"],\n author=\"Andreas Kloeckner\",\n url=\"https://github.com/inducer/relate\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n packages=find_packages(exclude=['tests']),\n install_requires=[\n \"django>=1.10,<2.1\",\n \"django-crispy-forms>=1.5.1\",\n \"colorama\",\n \"markdown\",\n \"dulwich\",\n \"pyyaml\",\n \"nbconvert>=5.2.1\",\n \"pymbolic\",\n \"sympy\",\n ],\n package_data={\n \"relate\": [\n \"templates/*.html\",\n ],\n \"course\": [\n \"templates/course/*.html\",\n \"templates/course/jinja2/*.tpl\",\n ],\n },\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n# This script (for now) is only intended to install the 'relate' content helper\n# script. Its use is not needed for (and unrelated to) deploying RELATE as a\n# web service.\n\n# Use 'pip install -r requirements.txt' to install prerequisites for RELATE as\n# a web service.\n\nsetup(name=\"relate-courseware\",\n version=\"2016.1\",\n description=\"RELATE courseware\",\n long_description=open(\"README.rst\", \"rt\").read(),\n\n scripts=[\"bin/relate\"],\n author=\"Andreas Kloeckner\",\n url=\"https://github.com/inducer/relate\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n packages=find_packages(exclude=['tests']),\n install_requires=[\n \"django>=1.10,<2.1\",\n \"django-crispy-forms>=1.5.1\",\n \"colorama\",\n \"markdown\",\n \"dulwich\",\n \"pyyaml\",\n \"nbconvert>=5.2.1\",\n \"pymbolic\",\n \"sympy\",\n ],\n package_data={\n \"relate\": [\n \"templates/*.html\",\n ],\n \"course\": [\n \"templates/course/*.html\",\n \"templates/course/jinja2/*.tpl\",\n ],\n },\n )\n", "path": "setup.py"}]} | 745 | 172 |
gh_patches_debug_34167 | rasdani/github-patches | git_diff | PyGithub__PyGithub-1810 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Attributes "maintain" and "triage" missing in class "Permissions"
Current Github API returns a permission values for `admin`, `push`, `maintain`, `triage` and `pull` (see <https://docs.github.com/en/free-pro-team@latest/rest/reference/teams#check-team-permissions-for-a-repository>).
PyGithub so far only supports `admin`, `push` and `pull`.
This is a feature request to add the two missing attributes to class `github.Permissions.Permissions`.
https://github.com/PyGithub/PyGithub/blob/34d097ce473601624722b90fc5d0396011dd3acb/github/Permissions.py#L34
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `github/Permissions.py`
Content:
```
1 ############################ Copyrights and license ############################
2 # #
3 # Copyright 2012 Vincent Jacques <[email protected]> #
4 # Copyright 2012 Zearin <[email protected]> #
5 # Copyright 2013 AKFish <[email protected]> #
6 # Copyright 2013 Vincent Jacques <[email protected]> #
7 # Copyright 2014 Vincent Jacques <[email protected]> #
8 # Copyright 2016 Jannis Gebauer <[email protected]> #
9 # Copyright 2016 Peter Buckley <[email protected]> #
10 # Copyright 2018 Wan Liuyang <[email protected]> #
11 # Copyright 2018 sfdye <[email protected]> #
12 # #
13 # This file is part of PyGithub. #
14 # http://pygithub.readthedocs.io/ #
15 # #
16 # PyGithub is free software: you can redistribute it and/or modify it under #
17 # the terms of the GNU Lesser General Public License as published by the Free #
18 # Software Foundation, either version 3 of the License, or (at your option) #
19 # any later version. #
20 # #
21 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
22 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
23 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
24 # details. #
25 # #
26 # You should have received a copy of the GNU Lesser General Public License #
27 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
28 # #
29 ################################################################################
30
31 import github.GithubObject
32
33
34 class Permissions(github.GithubObject.NonCompletableGithubObject):
35 """
36 This class represents Permissions
37 """
38
39 def __repr__(self):
40 return self.get__repr__(
41 {
42 "admin": self._admin.value,
43 "pull": self._pull.value,
44 "push": self._push.value,
45 }
46 )
47
48 @property
49 def admin(self):
50 """
51 :type: bool
52 """
53 return self._admin.value
54
55 @property
56 def pull(self):
57 """
58 :type: bool
59 """
60 return self._pull.value
61
62 @property
63 def push(self):
64 """
65 :type: bool
66 """
67 return self._push.value
68
69 def _initAttributes(self):
70 self._admin = github.GithubObject.NotSet
71 self._pull = github.GithubObject.NotSet
72 self._push = github.GithubObject.NotSet
73
74 def _useAttributes(self, attributes):
75 if "admin" in attributes: # pragma no branch
76 self._admin = self._makeBoolAttribute(attributes["admin"])
77 if "pull" in attributes: # pragma no branch
78 self._pull = self._makeBoolAttribute(attributes["pull"])
79 if "push" in attributes: # pragma no branch
80 self._push = self._makeBoolAttribute(attributes["push"])
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/github/Permissions.py b/github/Permissions.py
--- a/github/Permissions.py
+++ b/github/Permissions.py
@@ -40,8 +40,10 @@
return self.get__repr__(
{
"admin": self._admin.value,
+ "maintain": self._maintain.value,
"pull": self._pull.value,
"push": self._push.value,
+ "triage": self._triage.value,
}
)
@@ -52,6 +54,13 @@
"""
return self._admin.value
+ @property
+ def maintain(self):
+ """
+ :type: bool
+ """
+ return self._maintain.value
+
@property
def pull(self):
"""
@@ -66,15 +75,28 @@
"""
return self._push.value
+ @property
+ def triage(self):
+ """
+ :type: bool
+ """
+ return self._triage.value
+
def _initAttributes(self):
self._admin = github.GithubObject.NotSet
+ self._maintain = github.GithubObject.NotSet
self._pull = github.GithubObject.NotSet
self._push = github.GithubObject.NotSet
+ self._triage = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "admin" in attributes: # pragma no branch
self._admin = self._makeBoolAttribute(attributes["admin"])
+ if "maintain" in attributes: # pragma no branch
+ self._maintain = self._makeBoolAttribute(attributes["maintain"])
if "pull" in attributes: # pragma no branch
self._pull = self._makeBoolAttribute(attributes["pull"])
if "push" in attributes: # pragma no branch
self._push = self._makeBoolAttribute(attributes["push"])
+ if "triage" in attributes: # pragma no branch
+ self._triage = self._makeBoolAttribute(attributes["triage"])
| {"golden_diff": "diff --git a/github/Permissions.py b/github/Permissions.py\n--- a/github/Permissions.py\n+++ b/github/Permissions.py\n@@ -40,8 +40,10 @@\n return self.get__repr__(\n {\n \"admin\": self._admin.value,\n+ \"maintain\": self._maintain.value,\n \"pull\": self._pull.value,\n \"push\": self._push.value,\n+ \"triage\": self._triage.value,\n }\n )\n \n@@ -52,6 +54,13 @@\n \"\"\"\n return self._admin.value\n \n+ @property\n+ def maintain(self):\n+ \"\"\"\n+ :type: bool\n+ \"\"\"\n+ return self._maintain.value\n+\n @property\n def pull(self):\n \"\"\"\n@@ -66,15 +75,28 @@\n \"\"\"\n return self._push.value\n \n+ @property\n+ def triage(self):\n+ \"\"\"\n+ :type: bool\n+ \"\"\"\n+ return self._triage.value\n+\n def _initAttributes(self):\n self._admin = github.GithubObject.NotSet\n+ self._maintain = github.GithubObject.NotSet\n self._pull = github.GithubObject.NotSet\n self._push = github.GithubObject.NotSet\n+ self._triage = github.GithubObject.NotSet\n \n def _useAttributes(self, attributes):\n if \"admin\" in attributes: # pragma no branch\n self._admin = self._makeBoolAttribute(attributes[\"admin\"])\n+ if \"maintain\" in attributes: # pragma no branch\n+ self._maintain = self._makeBoolAttribute(attributes[\"maintain\"])\n if \"pull\" in attributes: # pragma no branch\n self._pull = self._makeBoolAttribute(attributes[\"pull\"])\n if \"push\" in attributes: # pragma no branch\n self._push = self._makeBoolAttribute(attributes[\"push\"])\n+ if \"triage\" in attributes: # pragma no branch\n+ self._triage = self._makeBoolAttribute(attributes[\"triage\"])\n", "issue": "Attributes \"maintain\" and \"triage\" missing in class \"Permissions\"\nCurrent Github API returns a permission values for `admin`, `push`, `maintain`, `triage` and `pull` (see <https://docs.github.com/en/free-pro-team@latest/rest/reference/teams#check-team-permissions-for-a-repository>).\r\nPyGithub so far only supports `admin`, `push` and `pull`.\r\n\r\nThis is a feature request to add the two missing attributes to class `github.Permissions.Permissions`.\r\nhttps://github.com/PyGithub/PyGithub/blob/34d097ce473601624722b90fc5d0396011dd3acb/github/Permissions.py#L34\r\n\n", "before_files": [{"content": "############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport github.GithubObject\n\n\nclass Permissions(github.GithubObject.NonCompletableGithubObject):\n \"\"\"\n This class represents Permissions\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__(\n {\n \"admin\": self._admin.value,\n \"pull\": self._pull.value,\n \"push\": self._push.value,\n }\n )\n\n @property\n def admin(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._admin.value\n\n @property\n def pull(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._pull.value\n\n @property\n def push(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._push.value\n\n def _initAttributes(self):\n self._admin = github.GithubObject.NotSet\n self._pull = github.GithubObject.NotSet\n self._push = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"admin\" in attributes: # pragma no branch\n self._admin = self._makeBoolAttribute(attributes[\"admin\"])\n if \"pull\" in attributes: # pragma no branch\n self._pull = self._makeBoolAttribute(attributes[\"pull\"])\n if \"push\" in attributes: # pragma no branch\n self._push = self._makeBoolAttribute(attributes[\"push\"])\n", "path": "github/Permissions.py"}], "after_files": [{"content": "############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport github.GithubObject\n\n\nclass Permissions(github.GithubObject.NonCompletableGithubObject):\n \"\"\"\n This class represents Permissions\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__(\n {\n \"admin\": self._admin.value,\n \"maintain\": self._maintain.value,\n \"pull\": self._pull.value,\n \"push\": self._push.value,\n \"triage\": self._triage.value,\n }\n )\n\n @property\n def admin(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._admin.value\n\n @property\n def maintain(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._maintain.value\n\n @property\n def pull(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._pull.value\n\n @property\n def push(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._push.value\n\n @property\n def triage(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._triage.value\n\n def _initAttributes(self):\n self._admin = github.GithubObject.NotSet\n self._maintain = github.GithubObject.NotSet\n self._pull = github.GithubObject.NotSet\n self._push = github.GithubObject.NotSet\n self._triage = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"admin\" in attributes: # pragma no branch\n self._admin = self._makeBoolAttribute(attributes[\"admin\"])\n if \"maintain\" in attributes: # pragma no branch\n self._maintain = self._makeBoolAttribute(attributes[\"maintain\"])\n if \"pull\" in attributes: # pragma no branch\n self._pull = self._makeBoolAttribute(attributes[\"pull\"])\n if \"push\" in attributes: # pragma no branch\n self._push = self._makeBoolAttribute(attributes[\"push\"])\n if \"triage\" in attributes: # pragma no branch\n self._triage = self._makeBoolAttribute(attributes[\"triage\"])\n", "path": "github/Permissions.py"}]} | 1,270 | 461 |
gh_patches_debug_31773 | rasdani/github-patches | git_diff | mkdocs__mkdocs-1922 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Asyncio NotImplemented Error after Python 3.8.0
Using a new install of Python 3.8.0, pip installed latest version of MkDocs, and then tried to run `mkdocs serve`, which encountered the below error.
I tried again on a new docs directory (`mkdocs new testing`), but encountered the same issue again.
Python version: 3.8.0
OS: Windows 10 Pro v1709
error:
```
C:\dev\testing
λ mkdocs serve
INFO - Building documentation...
INFO - Cleaning site directory
[I 191017 14:49:48 server:296] Serving on http://127.0.0.1:8000
Traceback (most recent call last):
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\runpy.py", line 192, in _run_module_as_main
return _run_code(code, main_globals, None,
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Users\aleksandr.skobelev\AppData\Local\Programs\Python\Python38\Scripts\mkdocs.exe\__main__.py", line 9, in <module>
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\click\core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\click\core.py", line 717, in main
rv = self.invoke(ctx)
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\click\core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\click\core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\click\core.py", line 555, in invoke
return callback(*args, **kwargs)
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\mkdocs\__main__.py", line 128, in serve_command
serve.serve(
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\mkdocs\commands\serve.py", line 124, in serve
_livereload(host, port, config, builder, site_dir)
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\mkdocs\commands\serve.py", line 58, in _livereload
server.serve(root=site_dir, host=host, port=port, restart_delay=0)
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\livereload\server.py", line 298, in serve
self.application(
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\livereload\server.py", line 253, in application
app.listen(port, address=host)
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\tornado\web.py", line 2112, in listen
server.listen(port, address)
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\tornado\tcpserver.py", line 152, in listen
self.add_sockets(sockets)
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\tornado\tcpserver.py", line 165, in add_sockets
self._handlers[sock.fileno()] = add_accept_handler(
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\tornado\netutil.py", line 279, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\site-packages\tornado\platform\asyncio.py", line 99, in add_handler
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
File "c:\users\aleksandr.skobelev\appdata\local\programs\python\python38\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/commands/serve.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import logging
4 import shutil
5 import tempfile
6
7 from os.path import isfile, join
8 from mkdocs.commands.build import build
9 from mkdocs.config import load_config
10
11 log = logging.getLogger(__name__)
12
13
14 def _get_handler(site_dir, StaticFileHandler):
15
16 from tornado.template import Loader
17
18 class WebHandler(StaticFileHandler):
19
20 def write_error(self, status_code, **kwargs):
21
22 if status_code in (404, 500):
23 error_page = '{}.html'.format(status_code)
24 if isfile(join(site_dir, error_page)):
25 self.write(Loader(site_dir).load(error_page).generate())
26 else:
27 super(WebHandler, self).write_error(status_code, **kwargs)
28
29 return WebHandler
30
31
32 def _livereload(host, port, config, builder, site_dir):
33
34 # We are importing here for anyone that has issues with livereload. Even if
35 # this fails, the --no-livereload alternative should still work.
36 from livereload import Server
37 import livereload.handlers
38
39 class LiveReloadServer(Server):
40
41 def get_web_handlers(self, script):
42 handlers = super(LiveReloadServer, self).get_web_handlers(script)
43 # replace livereload handler
44 return [(handlers[0][0], _get_handler(site_dir, livereload.handlers.StaticFileHandler), handlers[0][2],)]
45
46 server = LiveReloadServer()
47
48 # Watch the documentation files, the config file and the theme files.
49 server.watch(config['docs_dir'], builder)
50 server.watch(config['config_file_path'], builder)
51
52 for d in config['theme'].dirs:
53 server.watch(d, builder)
54
55 # Run `serve` plugin events.
56 server = config['plugins'].run_event('serve', server, config=config)
57
58 server.serve(root=site_dir, host=host, port=port, restart_delay=0)
59
60
61 def _static_server(host, port, site_dir):
62
63 # Importing here to seperate the code paths from the --livereload
64 # alternative.
65 from tornado import ioloop
66 from tornado import web
67
68 application = web.Application([
69 (r"/(.*)", _get_handler(site_dir, web.StaticFileHandler), {
70 "path": site_dir,
71 "default_filename": "index.html"
72 }),
73 ])
74 application.listen(port=port, address=host)
75
76 log.info('Running at: http://%s:%s/', host, port)
77 log.info('Hold ctrl+c to quit.')
78 try:
79 ioloop.IOLoop.instance().start()
80 except KeyboardInterrupt:
81 log.info('Stopping server...')
82
83
84 def serve(config_file=None, dev_addr=None, strict=None, theme=None,
85 theme_dir=None, livereload='livereload'):
86 """
87 Start the MkDocs development server
88
89 By default it will serve the documentation on http://localhost:8000/ and
90 it will rebuild the documentation and refresh the page automatically
91 whenever a file is edited.
92 """
93
94 # Create a temporary build directory, and set some options to serve it
95 # PY2 returns a byte string by default. The Unicode prefix ensures a Unicode
96 # string is returned. And it makes MkDocs temp dirs easier to identify.
97 site_dir = tempfile.mkdtemp(prefix='mkdocs_')
98
99 def builder():
100 log.info("Building documentation...")
101 config = load_config(
102 config_file=config_file,
103 dev_addr=dev_addr,
104 strict=strict,
105 theme=theme,
106 theme_dir=theme_dir,
107 site_dir=site_dir
108 )
109 # Override a few config settings after validation
110 config['site_url'] = 'http://{0}/'.format(config['dev_addr'])
111
112 live_server = livereload in ['dirty', 'livereload']
113 dirty = livereload == 'dirty'
114 build(config, live_server=live_server, dirty=dirty)
115 return config
116
117 try:
118 # Perform the initial build
119 config = builder()
120
121 host, port = config['dev_addr']
122
123 if livereload in ['livereload', 'dirty']:
124 _livereload(host, port, config, builder, site_dir)
125 else:
126 _static_server(host, port, site_dir)
127 finally:
128 shutil.rmtree(site_dir)
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/commands/serve.py b/mkdocs/commands/serve.py
--- a/mkdocs/commands/serve.py
+++ b/mkdocs/commands/serve.py
@@ -3,6 +3,7 @@
import logging
import shutil
import tempfile
+import sys
from os.path import isfile, join
from mkdocs.commands.build import build
@@ -11,6 +12,26 @@
log = logging.getLogger(__name__)
+def _init_asyncio_patch():
+ """
+ Select compatible event loop for Tornado 5+.
+
+ As of Python 3.8, the default event loop on Windows is `proactor`,
+ however Tornado requires the old default "selector" event loop.
+ As Tornado has decided to leave this to users to set, MkDocs needs
+ to set it. See https://github.com/tornadoweb/tornado/issues/2608.
+ """
+ if sys.platform.startswith("win") and sys.version_info >= (3, 8):
+ import asyncio
+ try:
+ from asyncio import WindowsSelectorEventLoopPolicy
+ except ImportError:
+ pass # Can't assign a policy which doesn't exist.
+ else:
+ if not isinstance(asyncio.get_event_loop_policy(), WindowsSelectorEventLoopPolicy):
+ asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
+
+
def _get_handler(site_dir, StaticFileHandler):
from tornado.template import Loader
@@ -33,6 +54,7 @@
# We are importing here for anyone that has issues with livereload. Even if
# this fails, the --no-livereload alternative should still work.
+ _init_asyncio_patch()
from livereload import Server
import livereload.handlers
@@ -62,6 +84,7 @@
# Importing here to seperate the code paths from the --livereload
# alternative.
+ _init_asyncio_patch()
from tornado import ioloop
from tornado import web
| {"golden_diff": "diff --git a/mkdocs/commands/serve.py b/mkdocs/commands/serve.py\n--- a/mkdocs/commands/serve.py\n+++ b/mkdocs/commands/serve.py\n@@ -3,6 +3,7 @@\n import logging\n import shutil\n import tempfile\n+import sys\n \n from os.path import isfile, join\n from mkdocs.commands.build import build\n@@ -11,6 +12,26 @@\n log = logging.getLogger(__name__)\n \n \n+def _init_asyncio_patch():\n+ \"\"\"\n+ Select compatible event loop for Tornado 5+.\n+\n+ As of Python 3.8, the default event loop on Windows is `proactor`,\n+ however Tornado requires the old default \"selector\" event loop.\n+ As Tornado has decided to leave this to users to set, MkDocs needs\n+ to set it. See https://github.com/tornadoweb/tornado/issues/2608.\n+ \"\"\"\n+ if sys.platform.startswith(\"win\") and sys.version_info >= (3, 8):\n+ import asyncio\n+ try:\n+ from asyncio import WindowsSelectorEventLoopPolicy\n+ except ImportError:\n+ pass # Can't assign a policy which doesn't exist.\n+ else:\n+ if not isinstance(asyncio.get_event_loop_policy(), WindowsSelectorEventLoopPolicy):\n+ asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())\n+\n+\n def _get_handler(site_dir, StaticFileHandler):\n \n from tornado.template import Loader\n@@ -33,6 +54,7 @@\n \n # We are importing here for anyone that has issues with livereload. Even if\n # this fails, the --no-livereload alternative should still work.\n+ _init_asyncio_patch()\n from livereload import Server\n import livereload.handlers\n \n@@ -62,6 +84,7 @@\n \n # Importing here to seperate the code paths from the --livereload\n # alternative.\n+ _init_asyncio_patch()\n from tornado import ioloop\n from tornado import web\n", "issue": "Asyncio NotImplemented Error after Python 3.8.0\nUsing a new install of Python 3.8.0, pip installed latest version of MkDocs, and then tried to run `mkdocs serve`, which encountered the below error. \r\n\r\nI tried again on a new docs directory (`mkdocs new testing`), but encountered the same issue again. \r\n\r\nPython version: 3.8.0\r\nOS: Windows 10 Pro v1709\r\n\r\nerror:\r\n```\r\nC:\\dev\\testing\r\n\u03bb mkdocs serve\r\nINFO - Building documentation...\r\nINFO - Cleaning site directory\r\n[I 191017 14:49:48 server:296] Serving on http://127.0.0.1:8000\r\nTraceback (most recent call last):\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\runpy.py\", line 192, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\aleksandr.skobelev\\AppData\\Local\\Programs\\Python\\Python38\\Scripts\\mkdocs.exe\\__main__.py\", line 9, in <module>\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\click\\core.py\", line 764, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\click\\core.py\", line 717, in main\r\n rv = self.invoke(ctx)\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\click\\core.py\", line 1137, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\click\\core.py\", line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\click\\core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\mkdocs\\__main__.py\", line 128, in serve_command\r\n serve.serve(\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\mkdocs\\commands\\serve.py\", line 124, in serve\r\n _livereload(host, port, config, builder, site_dir)\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\mkdocs\\commands\\serve.py\", line 58, in _livereload\r\n server.serve(root=site_dir, host=host, port=port, restart_delay=0)\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\livereload\\server.py\", line 298, in serve\r\n self.application(\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\livereload\\server.py\", line 253, in application\r\n app.listen(port, address=host)\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\tornado\\web.py\", line 2112, in listen\r\n server.listen(port, address)\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\tornado\\tcpserver.py\", line 152, in listen\r\n self.add_sockets(sockets)\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\tornado\\tcpserver.py\", line 165, in add_sockets\r\n self._handlers[sock.fileno()] = add_accept_handler(\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\tornado\\netutil.py\", line 279, in add_accept_handler\r\n io_loop.add_handler(sock, accept_handler, IOLoop.READ)\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\site-packages\\tornado\\platform\\asyncio.py\", line 99, in add_handler\r\n self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)\r\n File \"c:\\users\\aleksandr.skobelev\\appdata\\local\\programs\\python\\python38\\lib\\asyncio\\events.py\", line 501, in add_reader\r\n raise NotImplementedError\r\nNotImplementedError\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport shutil\nimport tempfile\n\nfrom os.path import isfile, join\nfrom mkdocs.commands.build import build\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\ndef _get_handler(site_dir, StaticFileHandler):\n\n from tornado.template import Loader\n\n class WebHandler(StaticFileHandler):\n\n def write_error(self, status_code, **kwargs):\n\n if status_code in (404, 500):\n error_page = '{}.html'.format(status_code)\n if isfile(join(site_dir, error_page)):\n self.write(Loader(site_dir).load(error_page).generate())\n else:\n super(WebHandler, self).write_error(status_code, **kwargs)\n\n return WebHandler\n\n\ndef _livereload(host, port, config, builder, site_dir):\n\n # We are importing here for anyone that has issues with livereload. Even if\n # this fails, the --no-livereload alternative should still work.\n from livereload import Server\n import livereload.handlers\n\n class LiveReloadServer(Server):\n\n def get_web_handlers(self, script):\n handlers = super(LiveReloadServer, self).get_web_handlers(script)\n # replace livereload handler\n return [(handlers[0][0], _get_handler(site_dir, livereload.handlers.StaticFileHandler), handlers[0][2],)]\n\n server = LiveReloadServer()\n\n # Watch the documentation files, the config file and the theme files.\n server.watch(config['docs_dir'], builder)\n server.watch(config['config_file_path'], builder)\n\n for d in config['theme'].dirs:\n server.watch(d, builder)\n\n # Run `serve` plugin events.\n server = config['plugins'].run_event('serve', server, config=config)\n\n server.serve(root=site_dir, host=host, port=port, restart_delay=0)\n\n\ndef _static_server(host, port, site_dir):\n\n # Importing here to seperate the code paths from the --livereload\n # alternative.\n from tornado import ioloop\n from tornado import web\n\n application = web.Application([\n (r\"/(.*)\", _get_handler(site_dir, web.StaticFileHandler), {\n \"path\": site_dir,\n \"default_filename\": \"index.html\"\n }),\n ])\n application.listen(port=port, address=host)\n\n log.info('Running at: http://%s:%s/', host, port)\n log.info('Hold ctrl+c to quit.')\n try:\n ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n log.info('Stopping server...')\n\n\ndef serve(config_file=None, dev_addr=None, strict=None, theme=None,\n theme_dir=None, livereload='livereload'):\n \"\"\"\n Start the MkDocs development server\n\n By default it will serve the documentation on http://localhost:8000/ and\n it will rebuild the documentation and refresh the page automatically\n whenever a file is edited.\n \"\"\"\n\n # Create a temporary build directory, and set some options to serve it\n # PY2 returns a byte string by default. The Unicode prefix ensures a Unicode\n # string is returned. And it makes MkDocs temp dirs easier to identify.\n site_dir = tempfile.mkdtemp(prefix='mkdocs_')\n\n def builder():\n log.info(\"Building documentation...\")\n config = load_config(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n theme_dir=theme_dir,\n site_dir=site_dir\n )\n # Override a few config settings after validation\n config['site_url'] = 'http://{0}/'.format(config['dev_addr'])\n\n live_server = livereload in ['dirty', 'livereload']\n dirty = livereload == 'dirty'\n build(config, live_server=live_server, dirty=dirty)\n return config\n\n try:\n # Perform the initial build\n config = builder()\n\n host, port = config['dev_addr']\n\n if livereload in ['livereload', 'dirty']:\n _livereload(host, port, config, builder, site_dir)\n else:\n _static_server(host, port, site_dir)\n finally:\n shutil.rmtree(site_dir)\n", "path": "mkdocs/commands/serve.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport shutil\nimport tempfile\nimport sys\n\nfrom os.path import isfile, join\nfrom mkdocs.commands.build import build\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\ndef _init_asyncio_patch():\n \"\"\"\n Select compatible event loop for Tornado 5+.\n\n As of Python 3.8, the default event loop on Windows is `proactor`,\n however Tornado requires the old default \"selector\" event loop.\n As Tornado has decided to leave this to users to set, MkDocs needs\n to set it. See https://github.com/tornadoweb/tornado/issues/2608.\n \"\"\"\n if sys.platform.startswith(\"win\") and sys.version_info >= (3, 8):\n import asyncio\n try:\n from asyncio import WindowsSelectorEventLoopPolicy\n except ImportError:\n pass # Can't assign a policy which doesn't exist.\n else:\n if not isinstance(asyncio.get_event_loop_policy(), WindowsSelectorEventLoopPolicy):\n asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())\n\n\ndef _get_handler(site_dir, StaticFileHandler):\n\n from tornado.template import Loader\n\n class WebHandler(StaticFileHandler):\n\n def write_error(self, status_code, **kwargs):\n\n if status_code in (404, 500):\n error_page = '{}.html'.format(status_code)\n if isfile(join(site_dir, error_page)):\n self.write(Loader(site_dir).load(error_page).generate())\n else:\n super(WebHandler, self).write_error(status_code, **kwargs)\n\n return WebHandler\n\n\ndef _livereload(host, port, config, builder, site_dir):\n\n # We are importing here for anyone that has issues with livereload. Even if\n # this fails, the --no-livereload alternative should still work.\n _init_asyncio_patch()\n from livereload import Server\n import livereload.handlers\n\n class LiveReloadServer(Server):\n\n def get_web_handlers(self, script):\n handlers = super(LiveReloadServer, self).get_web_handlers(script)\n # replace livereload handler\n return [(handlers[0][0], _get_handler(site_dir, livereload.handlers.StaticFileHandler), handlers[0][2],)]\n\n server = LiveReloadServer()\n\n # Watch the documentation files, the config file and the theme files.\n server.watch(config['docs_dir'], builder)\n server.watch(config['config_file_path'], builder)\n\n for d in config['theme'].dirs:\n server.watch(d, builder)\n\n # Run `serve` plugin events.\n server = config['plugins'].run_event('serve', server, config=config)\n\n server.serve(root=site_dir, host=host, port=port, restart_delay=0)\n\n\ndef _static_server(host, port, site_dir):\n\n # Importing here to seperate the code paths from the --livereload\n # alternative.\n _init_asyncio_patch()\n from tornado import ioloop\n from tornado import web\n\n application = web.Application([\n (r\"/(.*)\", _get_handler(site_dir, web.StaticFileHandler), {\n \"path\": site_dir,\n \"default_filename\": \"index.html\"\n }),\n ])\n application.listen(port=port, address=host)\n\n log.info('Running at: http://%s:%s/', host, port)\n log.info('Hold ctrl+c to quit.')\n try:\n ioloop.IOLoop.instance().start()\n except KeyboardInterrupt:\n log.info('Stopping server...')\n\n\ndef serve(config_file=None, dev_addr=None, strict=None, theme=None,\n theme_dir=None, livereload='livereload'):\n \"\"\"\n Start the MkDocs development server\n\n By default it will serve the documentation on http://localhost:8000/ and\n it will rebuild the documentation and refresh the page automatically\n whenever a file is edited.\n \"\"\"\n\n # Create a temporary build directory, and set some options to serve it\n # PY2 returns a byte string by default. The Unicode prefix ensures a Unicode\n # string is returned. And it makes MkDocs temp dirs easier to identify.\n site_dir = tempfile.mkdtemp(prefix='mkdocs_')\n\n def builder():\n log.info(\"Building documentation...\")\n config = load_config(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n theme_dir=theme_dir,\n site_dir=site_dir\n )\n # Override a few config settings after validation\n config['site_url'] = 'http://{0}/'.format(config['dev_addr'])\n\n live_server = livereload in ['dirty', 'livereload']\n dirty = livereload == 'dirty'\n build(config, live_server=live_server, dirty=dirty)\n return config\n\n try:\n # Perform the initial build\n config = builder()\n\n host, port = config['dev_addr']\n\n if livereload in ['livereload', 'dirty']:\n _livereload(host, port, config, builder, site_dir)\n else:\n _static_server(host, port, site_dir)\n finally:\n shutil.rmtree(site_dir)\n", "path": "mkdocs/commands/serve.py"}]} | 2,747 | 453 |
gh_patches_debug_41209 | rasdani/github-patches | git_diff | learningequality__kolibri-1581 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reinstate 'all' on recommendations page with more efficient pagination
## Summary
Currently we use page based pagination to show all content on the learn page. This can result in very slow API requests for large channels.
For now, this has been fixed by removing the `all` section from the learn page, but it should be reinstated for the anonymous user case, alongside a cursor based pagination that will allow for more efficient queries.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/content/api.py`
Content:
```
1 import os
2 from functools import reduce
3 from random import sample
4
5 from django.core.cache import cache
6 from django.db.models import Q
7 from django.db.models.aggregates import Count
8 from kolibri.content import models, serializers
9 from kolibri.content.content_db_router import get_active_content_database
10 from kolibri.logger.models import ContentSessionLog, ContentSummaryLog
11 from le_utils.constants import content_kinds
12 from rest_framework import filters, pagination, viewsets
13 from rest_framework.decorators import detail_route
14 from rest_framework.response import Response
15
16 from .permissions import OnlyDeviceOwnerCanDelete
17 from .utils.search import fuzz
18 from .utils.paths import get_content_database_file_path
19
20
21 def _join_with_logical_operator(lst, operator):
22 op = ") {operator} (".format(operator=operator)
23 return "(({items}))".format(items=op.join(lst))
24
25
26 class ChannelMetadataCacheViewSet(viewsets.ModelViewSet):
27 permission_classes = (OnlyDeviceOwnerCanDelete,)
28 serializer_class = serializers.ChannelMetadataCacheSerializer
29
30 def get_queryset(self):
31 return models.ChannelMetadataCache.objects.all()
32
33 def destroy(self, request, pk=None):
34 """
35 Destroys the ChannelMetadata object and its associated sqlite3 file on
36 the filesystem.
37 """
38 super(ChannelMetadataCacheViewSet, self).destroy(request)
39
40 if self.delete_content_db_file(pk):
41 response_msg = 'Channel {} removed from device'.format(pk)
42 else:
43 response_msg = 'Channel {} removed, but no content database was found'.format(pk)
44
45 return Response(response_msg)
46
47 def delete_content_db_file(self, channel_id):
48 try:
49 os.remove(get_content_database_file_path(channel_id))
50 return True
51 except OSError:
52 return False
53
54
55 class ContentNodeFilter(filters.FilterSet):
56 search = filters.django_filters.MethodFilter(action='title_description_filter')
57 recommendations_for = filters.django_filters.MethodFilter()
58 next_steps = filters.django_filters.MethodFilter()
59 popular = filters.django_filters.MethodFilter()
60 resume = filters.django_filters.MethodFilter()
61 kind = filters.django_filters.MethodFilter()
62 ids = filters.django_filters.MethodFilter()
63
64 class Meta:
65 model = models.ContentNode
66 fields = ['parent', 'search', 'prerequisite_for', 'has_prerequisite', 'related', 'recommendations_for', 'ids', 'content_id']
67
68 def title_description_filter(self, queryset, value):
69 """
70 search for title or description that contains the keywords that are not necessary in adjacent
71 """
72 exact_match = queryset.filter(Q(parent__isnull=False), Q(title__icontains=value) | Q(description__icontains=value))
73 if exact_match:
74 return exact_match
75 # if no exact match, fuzzy search using the stemmed_metaphone field in ContentNode that covers the title and description
76 fuzzed_tokens = [fuzz(word) for word in value.split()]
77 if not fuzzed_tokens[0]:
78 return []
79 token_queries = [reduce(lambda x, y: x | y, [Q(stemmed_metaphone__contains=token) for token in tokens]) for tokens in fuzzed_tokens]
80 return queryset.filter(
81 Q(parent__isnull=False),
82 reduce(lambda x, y: x & y, token_queries))
83
84 def filter_recommendations_for(self, queryset, value):
85 """
86 Recommend items that are similar to this piece of content.
87 """
88 recc_node = queryset.get(pk=value)
89 descendants = recc_node.get_descendants(include_self=False).exclude(kind__in=['topic', ''])
90 siblings = recc_node.get_siblings(include_self=False).exclude(kind__in=['topic', ''])
91 data = descendants | siblings # concatenates different querysets
92 return data
93
94 def filter_next_steps(self, queryset, value):
95 """
96 Recommend uncompleted content, content that has user completed content as a prerequisite.
97
98 :param queryset: all content nodes for this channel
99 :param value: id of currently logged in user, or none if user is anonymous
100 :return: uncompleted content nodes, or empty queryset if user is anonymous
101 """
102
103 # if user is anonymous, don't return any nodes
104 if not value:
105 return queryset.none()
106
107 tables = [
108 '"{summarylog_table}" AS "complete_log"',
109 '"{summarylog_table}" AS "incomplete_log"',
110 '"{content_table}" AS "complete_node"',
111 '"{content_table}" AS "incomplete_node"',
112 ]
113 table_names = {
114 "summarylog_table": ContentSummaryLog._meta.db_table,
115 "content_table": models.ContentNode._meta.db_table,
116 }
117 # aliases for sql table names
118 sql_tables_and_aliases = [table.format(**table_names) for table in tables]
119 # where conditions joined by ANDs
120 where_statements = ["NOT (incomplete_log.progress < 1 AND incomplete_log.content_id = incomplete_node.content_id)",
121 "complete_log.user_id = '{user_id}'".format(user_id=value),
122 "incomplete_log.user_id = '{user_id}'".format(user_id=value),
123 "complete_log.progress = 1",
124 "complete_node.rght = incomplete_node.lft - 1",
125 "complete_log.content_id = complete_node.content_id"]
126 # custom SQL query to get uncompleted content based on mptt algorithm
127 next_steps_recommendations = "SELECT incomplete_node.* FROM {tables} WHERE {where}".format(
128 tables=", ".join(sql_tables_and_aliases),
129 where=_join_with_logical_operator(where_statements, "AND")
130 )
131 return models.ContentNode.objects.raw(next_steps_recommendations)
132
133 def filter_popular(self, queryset, value):
134 """
135 Recommend content that is popular with all users.
136
137 :param queryset: all content nodes for this channel
138 :param value: id of currently logged in user, or none if user is anonymous
139 :return: 10 most popular content nodes
140 """
141 if ContentSessionLog.objects.count() < 50:
142 # return 25 random content nodes if not enough session logs
143 pks = queryset.values_list('pk', flat=True).exclude(kind__in=['topic', ''])
144 count = min(pks.count(), 25)
145 return queryset.filter(pk__in=sample(list(pks), count))
146
147 cache_key = 'popular_for_{}'.format(get_active_content_database())
148 if cache.get(cache_key):
149 return cache.get(cache_key)
150
151 # get the most accessed content nodes
152 content_counts_sorted = ContentSessionLog.objects \
153 .filter(channel_id=get_active_content_database()) \
154 .values_list('content_id', flat=True) \
155 .annotate(Count('content_id')) \
156 .order_by('-content_id__count')
157
158 most_popular = queryset.filter(content_id__in=list(content_counts_sorted[:10]))
159
160 # cache the popular results queryset for 10 minutes, for efficiency
161 cache.set(cache_key, most_popular, 60 * 10)
162 return most_popular
163
164 def filter_resume(self, queryset, value):
165 """
166 Recommend content that the user has recently engaged with, but not finished.
167
168 :param queryset: all content nodes for this channel
169 :param value: id of currently logged in user, or none if user is anonymous
170 :return: 10 most recently viewed content nodes
171 """
172
173 # if user is anonymous, return no nodes
174 if not value:
175 return queryset.none()
176
177 # get the most recently viewed, but not finished, content nodes
178 content_ids = ContentSummaryLog.objects \
179 .filter(user=value, channel_id=get_active_content_database()) \
180 .exclude(progress=1) \
181 .order_by('end_timestamp') \
182 .values_list('content_id', flat=True) \
183 .distinct()
184
185 resume = queryset.filter(content_id__in=list(content_ids[:10]))
186
187 return resume
188
189 def filter_kind(self, queryset, value):
190 """
191 Show only content of a given kind.
192
193 :param queryset: all content nodes for this channel
194 :param value: 'content' for everything except topics, or one of the content kind constants
195 :return: content nodes of the given kind
196 """
197 if value == 'content':
198 return queryset.exclude(kind=content_kinds.TOPIC).order_by("lft")
199 return queryset.filter(kind=value).order_by("lft")
200
201 def filter_ids(self, queryset, value):
202 return queryset.filter(pk__in=value.split(','))
203
204
205 class OptionalPageNumberPagination(pagination.PageNumberPagination):
206 """
207 Pagination class that allows for page number-style pagination, when requested.
208 To activate, the `page_size` argument must be set. For example, to request the first 20 records:
209 `?page_size=20&page=1`
210 """
211 page_size = None
212 page_size_query_param = "page_size"
213
214
215 class ContentNodeViewset(viewsets.ModelViewSet):
216 serializer_class = serializers.ContentNodeSerializer
217 filter_backends = (filters.DjangoFilterBackend,)
218 filter_class = ContentNodeFilter
219 pagination_class = OptionalPageNumberPagination
220
221 def get_queryset(self):
222 return models.ContentNode.objects.all().select_related(
223 'parent',
224 'license',
225 ).prefetch_related(
226 'assessmentmetadata',
227 'files',
228 )
229
230 @detail_route(methods=['get'])
231 def descendants(self, request, **kwargs):
232 node = self.get_object()
233 kind = self.request.query_params.get('descendant_kind', None)
234 descendants = node.get_descendants()
235 if kind:
236 descendants = descendants.filter(kind=kind)
237
238 serializer = self.get_serializer(descendants, many=True)
239 return Response(serializer.data)
240
241 @detail_route(methods=['get'])
242 def ancestors(self, request, **kwargs):
243 return Response(self.get_object().get_ancestors().values('pk', 'title'))
244
245 @detail_route(methods=['get'])
246 def next_content(self, request, **kwargs):
247 # retrieve the "next" content node, according to depth-first tree traversal
248 this_item = self.get_object()
249 next_item = models.ContentNode.objects.filter(tree_id=this_item.tree_id, lft__gt=this_item.rght).order_by("lft").first()
250 if not next_item:
251 next_item = this_item.get_root()
252 return Response({'kind': next_item.kind, 'id': next_item.id, 'title': next_item.title})
253
254
255 class FileViewset(viewsets.ModelViewSet):
256 serializer_class = serializers.FileSerializer
257 pagination_class = OptionalPageNumberPagination
258
259 def get_queryset(self):
260 return models.File.objects.all()
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/content/api.py b/kolibri/content/api.py
--- a/kolibri/content/api.py
+++ b/kolibri/content/api.py
@@ -1,16 +1,19 @@
import os
+
+from collections import OrderedDict
from functools import reduce
from random import sample
from django.core.cache import cache
from django.db.models import Q
from django.db.models.aggregates import Count
+from future.moves.urllib.parse import parse_qs, urlparse
from kolibri.content import models, serializers
from kolibri.content.content_db_router import get_active_content_database
from kolibri.logger.models import ContentSessionLog, ContentSummaryLog
from le_utils.constants import content_kinds
from rest_framework import filters, pagination, viewsets
-from rest_framework.decorators import detail_route
+from rest_framework.decorators import detail_route, list_route
from rest_framework.response import Response
from .permissions import OnlyDeviceOwnerCanDelete
@@ -212,6 +215,39 @@
page_size_query_param = "page_size"
+class AllContentCursorPagination(pagination.CursorPagination):
+ page_size = 10
+ ordering = 'lft'
+ cursor_query_param = 'cursor'
+
+ def get_paginated_response(self, data):
+ """
+ By default the get_paginated_response method of the CursorPagination class returns the url link
+ to the next and previous queries of they exist.
+ For Kolibri this is not very helpful, as we construct our URLs on the client side rather than
+ directly querying passed in URLs.
+ Instead, return the cursor value that points to the next and previous items, so that they can be put
+ in a GET parameter in future queries.
+ """
+ if self.has_next:
+ # The CursorPagination class has no internal methods to just return the cursor value, only
+ # the url of the next and previous, so we have to generate the URL, parse it, and then
+ # extract the cursor parameter from it to return in the Response.
+ next_item = parse_qs(urlparse(self.get_next_link()).query).get(self.cursor_query_param)
+ else:
+ next_item = None
+ if self.has_previous:
+ # Similarly to next, we have to create the previous link and then parse it to get the cursor value
+ prev_item = parse_qs(urlparse(self.get_previous_link()).query).get(self.cursor_query_param)
+ else:
+ prev_item = None
+
+ return Response(OrderedDict([
+ ('next', next_item),
+ ('previous', prev_item),
+ ('results', data)
+ ]))
+
class ContentNodeViewset(viewsets.ModelViewSet):
serializer_class = serializers.ContentNodeSerializer
filter_backends = (filters.DjangoFilterBackend,)
@@ -251,6 +287,17 @@
next_item = this_item.get_root()
return Response({'kind': next_item.kind, 'id': next_item.id, 'title': next_item.title})
+ @list_route(methods=['get'], pagination_class=AllContentCursorPagination)
+ def all_content(self, request, **kwargs):
+ queryset = self.get_queryset().exclude(kind=content_kinds.TOPIC)
+ page = self.paginate_queryset(queryset)
+ if page is not None:
+ serializer = self.get_serializer(page, many=True)
+ return self.get_paginated_response(serializer.data)
+
+ serializer = self.get_serializer(queryset, many=True)
+ return Response(serializer.data)
+
class FileViewset(viewsets.ModelViewSet):
serializer_class = serializers.FileSerializer
| {"golden_diff": "diff --git a/kolibri/content/api.py b/kolibri/content/api.py\n--- a/kolibri/content/api.py\n+++ b/kolibri/content/api.py\n@@ -1,16 +1,19 @@\n import os\n+\n+from collections import OrderedDict\n from functools import reduce\n from random import sample\n \n from django.core.cache import cache\n from django.db.models import Q\n from django.db.models.aggregates import Count\n+from future.moves.urllib.parse import parse_qs, urlparse\n from kolibri.content import models, serializers\n from kolibri.content.content_db_router import get_active_content_database\n from kolibri.logger.models import ContentSessionLog, ContentSummaryLog\n from le_utils.constants import content_kinds\n from rest_framework import filters, pagination, viewsets\n-from rest_framework.decorators import detail_route\n+from rest_framework.decorators import detail_route, list_route\n from rest_framework.response import Response\n \n from .permissions import OnlyDeviceOwnerCanDelete\n@@ -212,6 +215,39 @@\n page_size_query_param = \"page_size\"\n \n \n+class AllContentCursorPagination(pagination.CursorPagination):\n+ page_size = 10\n+ ordering = 'lft'\n+ cursor_query_param = 'cursor'\n+\n+ def get_paginated_response(self, data):\n+ \"\"\"\n+ By default the get_paginated_response method of the CursorPagination class returns the url link\n+ to the next and previous queries of they exist.\n+ For Kolibri this is not very helpful, as we construct our URLs on the client side rather than\n+ directly querying passed in URLs.\n+ Instead, return the cursor value that points to the next and previous items, so that they can be put\n+ in a GET parameter in future queries.\n+ \"\"\"\n+ if self.has_next:\n+ # The CursorPagination class has no internal methods to just return the cursor value, only\n+ # the url of the next and previous, so we have to generate the URL, parse it, and then\n+ # extract the cursor parameter from it to return in the Response.\n+ next_item = parse_qs(urlparse(self.get_next_link()).query).get(self.cursor_query_param)\n+ else:\n+ next_item = None\n+ if self.has_previous:\n+ # Similarly to next, we have to create the previous link and then parse it to get the cursor value\n+ prev_item = parse_qs(urlparse(self.get_previous_link()).query).get(self.cursor_query_param)\n+ else:\n+ prev_item = None\n+\n+ return Response(OrderedDict([\n+ ('next', next_item),\n+ ('previous', prev_item),\n+ ('results', data)\n+ ]))\n+\n class ContentNodeViewset(viewsets.ModelViewSet):\n serializer_class = serializers.ContentNodeSerializer\n filter_backends = (filters.DjangoFilterBackend,)\n@@ -251,6 +287,17 @@\n next_item = this_item.get_root()\n return Response({'kind': next_item.kind, 'id': next_item.id, 'title': next_item.title})\n \n+ @list_route(methods=['get'], pagination_class=AllContentCursorPagination)\n+ def all_content(self, request, **kwargs):\n+ queryset = self.get_queryset().exclude(kind=content_kinds.TOPIC)\n+ page = self.paginate_queryset(queryset)\n+ if page is not None:\n+ serializer = self.get_serializer(page, many=True)\n+ return self.get_paginated_response(serializer.data)\n+\n+ serializer = self.get_serializer(queryset, many=True)\n+ return Response(serializer.data)\n+\n \n class FileViewset(viewsets.ModelViewSet):\n serializer_class = serializers.FileSerializer\n", "issue": "Reinstate 'all' on recommendations page with more efficient pagination\n## Summary\r\n\r\nCurrently we use page based pagination to show all content on the learn page. This can result in very slow API requests for large channels.\r\n\r\nFor now, this has been fixed by removing the `all` section from the learn page, but it should be reinstated for the anonymous user case, alongside a cursor based pagination that will allow for more efficient queries.\n", "before_files": [{"content": "import os\nfrom functools import reduce\nfrom random import sample\n\nfrom django.core.cache import cache\nfrom django.db.models import Q\nfrom django.db.models.aggregates import Count\nfrom kolibri.content import models, serializers\nfrom kolibri.content.content_db_router import get_active_content_database\nfrom kolibri.logger.models import ContentSessionLog, ContentSummaryLog\nfrom le_utils.constants import content_kinds\nfrom rest_framework import filters, pagination, viewsets\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.response import Response\n\nfrom .permissions import OnlyDeviceOwnerCanDelete\nfrom .utils.search import fuzz\nfrom .utils.paths import get_content_database_file_path\n\n\ndef _join_with_logical_operator(lst, operator):\n op = \") {operator} (\".format(operator=operator)\n return \"(({items}))\".format(items=op.join(lst))\n\n\nclass ChannelMetadataCacheViewSet(viewsets.ModelViewSet):\n permission_classes = (OnlyDeviceOwnerCanDelete,)\n serializer_class = serializers.ChannelMetadataCacheSerializer\n\n def get_queryset(self):\n return models.ChannelMetadataCache.objects.all()\n\n def destroy(self, request, pk=None):\n \"\"\"\n Destroys the ChannelMetadata object and its associated sqlite3 file on\n the filesystem.\n \"\"\"\n super(ChannelMetadataCacheViewSet, self).destroy(request)\n\n if self.delete_content_db_file(pk):\n response_msg = 'Channel {} removed from device'.format(pk)\n else:\n response_msg = 'Channel {} removed, but no content database was found'.format(pk)\n\n return Response(response_msg)\n\n def delete_content_db_file(self, channel_id):\n try:\n os.remove(get_content_database_file_path(channel_id))\n return True\n except OSError:\n return False\n\n\nclass ContentNodeFilter(filters.FilterSet):\n search = filters.django_filters.MethodFilter(action='title_description_filter')\n recommendations_for = filters.django_filters.MethodFilter()\n next_steps = filters.django_filters.MethodFilter()\n popular = filters.django_filters.MethodFilter()\n resume = filters.django_filters.MethodFilter()\n kind = filters.django_filters.MethodFilter()\n ids = filters.django_filters.MethodFilter()\n\n class Meta:\n model = models.ContentNode\n fields = ['parent', 'search', 'prerequisite_for', 'has_prerequisite', 'related', 'recommendations_for', 'ids', 'content_id']\n\n def title_description_filter(self, queryset, value):\n \"\"\"\n search for title or description that contains the keywords that are not necessary in adjacent\n \"\"\"\n exact_match = queryset.filter(Q(parent__isnull=False), Q(title__icontains=value) | Q(description__icontains=value))\n if exact_match:\n return exact_match\n # if no exact match, fuzzy search using the stemmed_metaphone field in ContentNode that covers the title and description\n fuzzed_tokens = [fuzz(word) for word in value.split()]\n if not fuzzed_tokens[0]:\n return []\n token_queries = [reduce(lambda x, y: x | y, [Q(stemmed_metaphone__contains=token) for token in tokens]) for tokens in fuzzed_tokens]\n return queryset.filter(\n Q(parent__isnull=False),\n reduce(lambda x, y: x & y, token_queries))\n\n def filter_recommendations_for(self, queryset, value):\n \"\"\"\n Recommend items that are similar to this piece of content.\n \"\"\"\n recc_node = queryset.get(pk=value)\n descendants = recc_node.get_descendants(include_self=False).exclude(kind__in=['topic', ''])\n siblings = recc_node.get_siblings(include_self=False).exclude(kind__in=['topic', ''])\n data = descendants | siblings # concatenates different querysets\n return data\n\n def filter_next_steps(self, queryset, value):\n \"\"\"\n Recommend uncompleted content, content that has user completed content as a prerequisite.\n\n :param queryset: all content nodes for this channel\n :param value: id of currently logged in user, or none if user is anonymous\n :return: uncompleted content nodes, or empty queryset if user is anonymous\n \"\"\"\n\n # if user is anonymous, don't return any nodes\n if not value:\n return queryset.none()\n\n tables = [\n '\"{summarylog_table}\" AS \"complete_log\"',\n '\"{summarylog_table}\" AS \"incomplete_log\"',\n '\"{content_table}\" AS \"complete_node\"',\n '\"{content_table}\" AS \"incomplete_node\"',\n ]\n table_names = {\n \"summarylog_table\": ContentSummaryLog._meta.db_table,\n \"content_table\": models.ContentNode._meta.db_table,\n }\n # aliases for sql table names\n sql_tables_and_aliases = [table.format(**table_names) for table in tables]\n # where conditions joined by ANDs\n where_statements = [\"NOT (incomplete_log.progress < 1 AND incomplete_log.content_id = incomplete_node.content_id)\",\n \"complete_log.user_id = '{user_id}'\".format(user_id=value),\n \"incomplete_log.user_id = '{user_id}'\".format(user_id=value),\n \"complete_log.progress = 1\",\n \"complete_node.rght = incomplete_node.lft - 1\",\n \"complete_log.content_id = complete_node.content_id\"]\n # custom SQL query to get uncompleted content based on mptt algorithm\n next_steps_recommendations = \"SELECT incomplete_node.* FROM {tables} WHERE {where}\".format(\n tables=\", \".join(sql_tables_and_aliases),\n where=_join_with_logical_operator(where_statements, \"AND\")\n )\n return models.ContentNode.objects.raw(next_steps_recommendations)\n\n def filter_popular(self, queryset, value):\n \"\"\"\n Recommend content that is popular with all users.\n\n :param queryset: all content nodes for this channel\n :param value: id of currently logged in user, or none if user is anonymous\n :return: 10 most popular content nodes\n \"\"\"\n if ContentSessionLog.objects.count() < 50:\n # return 25 random content nodes if not enough session logs\n pks = queryset.values_list('pk', flat=True).exclude(kind__in=['topic', ''])\n count = min(pks.count(), 25)\n return queryset.filter(pk__in=sample(list(pks), count))\n\n cache_key = 'popular_for_{}'.format(get_active_content_database())\n if cache.get(cache_key):\n return cache.get(cache_key)\n\n # get the most accessed content nodes\n content_counts_sorted = ContentSessionLog.objects \\\n .filter(channel_id=get_active_content_database()) \\\n .values_list('content_id', flat=True) \\\n .annotate(Count('content_id')) \\\n .order_by('-content_id__count')\n\n most_popular = queryset.filter(content_id__in=list(content_counts_sorted[:10]))\n\n # cache the popular results queryset for 10 minutes, for efficiency\n cache.set(cache_key, most_popular, 60 * 10)\n return most_popular\n\n def filter_resume(self, queryset, value):\n \"\"\"\n Recommend content that the user has recently engaged with, but not finished.\n\n :param queryset: all content nodes for this channel\n :param value: id of currently logged in user, or none if user is anonymous\n :return: 10 most recently viewed content nodes\n \"\"\"\n\n # if user is anonymous, return no nodes\n if not value:\n return queryset.none()\n\n # get the most recently viewed, but not finished, content nodes\n content_ids = ContentSummaryLog.objects \\\n .filter(user=value, channel_id=get_active_content_database()) \\\n .exclude(progress=1) \\\n .order_by('end_timestamp') \\\n .values_list('content_id', flat=True) \\\n .distinct()\n\n resume = queryset.filter(content_id__in=list(content_ids[:10]))\n\n return resume\n\n def filter_kind(self, queryset, value):\n \"\"\"\n Show only content of a given kind.\n\n :param queryset: all content nodes for this channel\n :param value: 'content' for everything except topics, or one of the content kind constants\n :return: content nodes of the given kind\n \"\"\"\n if value == 'content':\n return queryset.exclude(kind=content_kinds.TOPIC).order_by(\"lft\")\n return queryset.filter(kind=value).order_by(\"lft\")\n\n def filter_ids(self, queryset, value):\n return queryset.filter(pk__in=value.split(','))\n\n\nclass OptionalPageNumberPagination(pagination.PageNumberPagination):\n \"\"\"\n Pagination class that allows for page number-style pagination, when requested.\n To activate, the `page_size` argument must be set. For example, to request the first 20 records:\n `?page_size=20&page=1`\n \"\"\"\n page_size = None\n page_size_query_param = \"page_size\"\n\n\nclass ContentNodeViewset(viewsets.ModelViewSet):\n serializer_class = serializers.ContentNodeSerializer\n filter_backends = (filters.DjangoFilterBackend,)\n filter_class = ContentNodeFilter\n pagination_class = OptionalPageNumberPagination\n\n def get_queryset(self):\n return models.ContentNode.objects.all().select_related(\n 'parent',\n 'license',\n ).prefetch_related(\n 'assessmentmetadata',\n 'files',\n )\n\n @detail_route(methods=['get'])\n def descendants(self, request, **kwargs):\n node = self.get_object()\n kind = self.request.query_params.get('descendant_kind', None)\n descendants = node.get_descendants()\n if kind:\n descendants = descendants.filter(kind=kind)\n\n serializer = self.get_serializer(descendants, many=True)\n return Response(serializer.data)\n\n @detail_route(methods=['get'])\n def ancestors(self, request, **kwargs):\n return Response(self.get_object().get_ancestors().values('pk', 'title'))\n\n @detail_route(methods=['get'])\n def next_content(self, request, **kwargs):\n # retrieve the \"next\" content node, according to depth-first tree traversal\n this_item = self.get_object()\n next_item = models.ContentNode.objects.filter(tree_id=this_item.tree_id, lft__gt=this_item.rght).order_by(\"lft\").first()\n if not next_item:\n next_item = this_item.get_root()\n return Response({'kind': next_item.kind, 'id': next_item.id, 'title': next_item.title})\n\n\nclass FileViewset(viewsets.ModelViewSet):\n serializer_class = serializers.FileSerializer\n pagination_class = OptionalPageNumberPagination\n\n def get_queryset(self):\n return models.File.objects.all()\n", "path": "kolibri/content/api.py"}], "after_files": [{"content": "import os\n\nfrom collections import OrderedDict\nfrom functools import reduce\nfrom random import sample\n\nfrom django.core.cache import cache\nfrom django.db.models import Q\nfrom django.db.models.aggregates import Count\nfrom future.moves.urllib.parse import parse_qs, urlparse\nfrom kolibri.content import models, serializers\nfrom kolibri.content.content_db_router import get_active_content_database\nfrom kolibri.logger.models import ContentSessionLog, ContentSummaryLog\nfrom le_utils.constants import content_kinds\nfrom rest_framework import filters, pagination, viewsets\nfrom rest_framework.decorators import detail_route, list_route\nfrom rest_framework.response import Response\n\nfrom .permissions import OnlyDeviceOwnerCanDelete\nfrom .utils.search import fuzz\nfrom .utils.paths import get_content_database_file_path\n\n\ndef _join_with_logical_operator(lst, operator):\n op = \") {operator} (\".format(operator=operator)\n return \"(({items}))\".format(items=op.join(lst))\n\n\nclass ChannelMetadataCacheViewSet(viewsets.ModelViewSet):\n permission_classes = (OnlyDeviceOwnerCanDelete,)\n serializer_class = serializers.ChannelMetadataCacheSerializer\n\n def get_queryset(self):\n return models.ChannelMetadataCache.objects.all()\n\n def destroy(self, request, pk=None):\n \"\"\"\n Destroys the ChannelMetadata object and its associated sqlite3 file on\n the filesystem.\n \"\"\"\n super(ChannelMetadataCacheViewSet, self).destroy(request)\n\n if self.delete_content_db_file(pk):\n response_msg = 'Channel {} removed from device'.format(pk)\n else:\n response_msg = 'Channel {} removed, but no content database was found'.format(pk)\n\n return Response(response_msg)\n\n def delete_content_db_file(self, channel_id):\n try:\n os.remove(get_content_database_file_path(channel_id))\n return True\n except OSError:\n return False\n\n\nclass ContentNodeFilter(filters.FilterSet):\n search = filters.django_filters.MethodFilter(action='title_description_filter')\n recommendations_for = filters.django_filters.MethodFilter()\n next_steps = filters.django_filters.MethodFilter()\n popular = filters.django_filters.MethodFilter()\n resume = filters.django_filters.MethodFilter()\n kind = filters.django_filters.MethodFilter()\n ids = filters.django_filters.MethodFilter()\n\n class Meta:\n model = models.ContentNode\n fields = ['parent', 'search', 'prerequisite_for', 'has_prerequisite', 'related', 'recommendations_for', 'ids', 'content_id']\n\n def title_description_filter(self, queryset, value):\n \"\"\"\n search for title or description that contains the keywords that are not necessary in adjacent\n \"\"\"\n exact_match = queryset.filter(Q(parent__isnull=False), Q(title__icontains=value) | Q(description__icontains=value))\n if exact_match:\n return exact_match\n # if no exact match, fuzzy search using the stemmed_metaphone field in ContentNode that covers the title and description\n fuzzed_tokens = [fuzz(word) for word in value.split()]\n if not fuzzed_tokens[0]:\n return []\n token_queries = [reduce(lambda x, y: x | y, [Q(stemmed_metaphone__contains=token) for token in tokens]) for tokens in fuzzed_tokens]\n return queryset.filter(\n Q(parent__isnull=False),\n reduce(lambda x, y: x & y, token_queries))\n\n def filter_recommendations_for(self, queryset, value):\n \"\"\"\n Recommend items that are similar to this piece of content.\n \"\"\"\n recc_node = queryset.get(pk=value)\n descendants = recc_node.get_descendants(include_self=False).exclude(kind__in=['topic', ''])\n siblings = recc_node.get_siblings(include_self=False).exclude(kind__in=['topic', ''])\n data = descendants | siblings # concatenates different querysets\n return data\n\n def filter_next_steps(self, queryset, value):\n \"\"\"\n Recommend uncompleted content, content that has user completed content as a prerequisite.\n\n :param queryset: all content nodes for this channel\n :param value: id of currently logged in user, or none if user is anonymous\n :return: uncompleted content nodes, or empty queryset if user is anonymous\n \"\"\"\n\n # if user is anonymous, don't return any nodes\n if not value:\n return queryset.none()\n\n tables = [\n '\"{summarylog_table}\" AS \"complete_log\"',\n '\"{summarylog_table}\" AS \"incomplete_log\"',\n '\"{content_table}\" AS \"complete_node\"',\n '\"{content_table}\" AS \"incomplete_node\"',\n ]\n table_names = {\n \"summarylog_table\": ContentSummaryLog._meta.db_table,\n \"content_table\": models.ContentNode._meta.db_table,\n }\n # aliases for sql table names\n sql_tables_and_aliases = [table.format(**table_names) for table in tables]\n # where conditions joined by ANDs\n where_statements = [\"NOT (incomplete_log.progress < 1 AND incomplete_log.content_id = incomplete_node.content_id)\",\n \"complete_log.user_id = '{user_id}'\".format(user_id=value),\n \"incomplete_log.user_id = '{user_id}'\".format(user_id=value),\n \"complete_log.progress = 1\",\n \"complete_node.rght = incomplete_node.lft - 1\",\n \"complete_log.content_id = complete_node.content_id\"]\n # custom SQL query to get uncompleted content based on mptt algorithm\n next_steps_recommendations = \"SELECT incomplete_node.* FROM {tables} WHERE {where}\".format(\n tables=\", \".join(sql_tables_and_aliases),\n where=_join_with_logical_operator(where_statements, \"AND\")\n )\n return models.ContentNode.objects.raw(next_steps_recommendations)\n\n def filter_popular(self, queryset, value):\n \"\"\"\n Recommend content that is popular with all users.\n\n :param queryset: all content nodes for this channel\n :param value: id of currently logged in user, or none if user is anonymous\n :return: 10 most popular content nodes\n \"\"\"\n if ContentSessionLog.objects.count() < 50:\n # return 25 random content nodes if not enough session logs\n pks = queryset.values_list('pk', flat=True).exclude(kind__in=['topic', ''])\n count = min(pks.count(), 25)\n return queryset.filter(pk__in=sample(list(pks), count))\n\n cache_key = 'popular_for_{}'.format(get_active_content_database())\n if cache.get(cache_key):\n return cache.get(cache_key)\n\n # get the most accessed content nodes\n content_counts_sorted = ContentSessionLog.objects \\\n .filter(channel_id=get_active_content_database()) \\\n .values_list('content_id', flat=True) \\\n .annotate(Count('content_id')) \\\n .order_by('-content_id__count')\n\n most_popular = queryset.filter(content_id__in=list(content_counts_sorted[:10]))\n\n # cache the popular results queryset for 10 minutes, for efficiency\n cache.set(cache_key, most_popular, 60 * 10)\n return most_popular\n\n def filter_resume(self, queryset, value):\n \"\"\"\n Recommend content that the user has recently engaged with, but not finished.\n\n :param queryset: all content nodes for this channel\n :param value: id of currently logged in user, or none if user is anonymous\n :return: 10 most recently viewed content nodes\n \"\"\"\n\n # if user is anonymous, return no nodes\n if not value:\n return queryset.none()\n\n # get the most recently viewed, but not finished, content nodes\n content_ids = ContentSummaryLog.objects \\\n .filter(user=value, channel_id=get_active_content_database()) \\\n .exclude(progress=1) \\\n .order_by('end_timestamp') \\\n .values_list('content_id', flat=True) \\\n .distinct()\n\n resume = queryset.filter(content_id__in=list(content_ids[:10]))\n\n return resume\n\n def filter_kind(self, queryset, value):\n \"\"\"\n Show only content of a given kind.\n\n :param queryset: all content nodes for this channel\n :param value: 'content' for everything except topics, or one of the content kind constants\n :return: content nodes of the given kind\n \"\"\"\n if value == 'content':\n return queryset.exclude(kind=content_kinds.TOPIC).order_by(\"lft\")\n return queryset.filter(kind=value).order_by(\"lft\")\n\n def filter_ids(self, queryset, value):\n return queryset.filter(pk__in=value.split(','))\n\n\nclass OptionalPageNumberPagination(pagination.PageNumberPagination):\n \"\"\"\n Pagination class that allows for page number-style pagination, when requested.\n To activate, the `page_size` argument must be set. For example, to request the first 20 records:\n `?page_size=20&page=1`\n \"\"\"\n page_size = None\n page_size_query_param = \"page_size\"\n\n\nclass AllContentCursorPagination(pagination.CursorPagination):\n page_size = 10\n ordering = 'lft'\n cursor_query_param = 'cursor'\n\n def get_paginated_response(self, data):\n \"\"\"\n By default the get_paginated_response method of the CursorPagination class returns the url link\n to the next and previous queries of they exist.\n For Kolibri this is not very helpful, as we construct our URLs on the client side rather than\n directly querying passed in URLs.\n Instead, return the cursor value that points to the next and previous items, so that they can be put\n in a GET parameter in future queries.\n \"\"\"\n if self.has_next:\n # The CursorPagination class has no internal methods to just return the cursor value, only\n # the url of the next and previous, so we have to generate the URL, parse it, and then\n # extract the cursor parameter from it to return in the Response.\n next_item = parse_qs(urlparse(self.get_next_link()).query).get(self.cursor_query_param)\n else:\n next_item = None\n if self.has_previous:\n # Similarly to next, we have to create the previous link and then parse it to get the cursor value\n prev_item = parse_qs(urlparse(self.get_previous_link()).query).get(self.cursor_query_param)\n else:\n prev_item = None\n\n return Response(OrderedDict([\n ('next', next_item),\n ('previous', prev_item),\n ('results', data)\n ]))\n\nclass ContentNodeViewset(viewsets.ModelViewSet):\n serializer_class = serializers.ContentNodeSerializer\n filter_backends = (filters.DjangoFilterBackend,)\n filter_class = ContentNodeFilter\n pagination_class = OptionalPageNumberPagination\n\n def get_queryset(self):\n return models.ContentNode.objects.all().select_related(\n 'parent',\n 'license',\n ).prefetch_related(\n 'assessmentmetadata',\n 'files',\n )\n\n @detail_route(methods=['get'])\n def descendants(self, request, **kwargs):\n node = self.get_object()\n kind = self.request.query_params.get('descendant_kind', None)\n descendants = node.get_descendants()\n if kind:\n descendants = descendants.filter(kind=kind)\n\n serializer = self.get_serializer(descendants, many=True)\n return Response(serializer.data)\n\n @detail_route(methods=['get'])\n def ancestors(self, request, **kwargs):\n return Response(self.get_object().get_ancestors().values('pk', 'title'))\n\n @detail_route(methods=['get'])\n def next_content(self, request, **kwargs):\n # retrieve the \"next\" content node, according to depth-first tree traversal\n this_item = self.get_object()\n next_item = models.ContentNode.objects.filter(tree_id=this_item.tree_id, lft__gt=this_item.rght).order_by(\"lft\").first()\n if not next_item:\n next_item = this_item.get_root()\n return Response({'kind': next_item.kind, 'id': next_item.id, 'title': next_item.title})\n\n @list_route(methods=['get'], pagination_class=AllContentCursorPagination)\n def all_content(self, request, **kwargs):\n queryset = self.get_queryset().exclude(kind=content_kinds.TOPIC)\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass FileViewset(viewsets.ModelViewSet):\n serializer_class = serializers.FileSerializer\n pagination_class = OptionalPageNumberPagination\n\n def get_queryset(self):\n return models.File.objects.all()\n", "path": "kolibri/content/api.py"}]} | 3,293 | 786 |
gh_patches_debug_6379 | rasdani/github-patches | git_diff | cowrie__cowrie-993 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pinging a domain crashes
From within cowrie (docker current 7 Dec 2018), when the you ping a domain (not an IP) a stack trace errors shown below. Cowrie freezes., and since many bots use ping as info gathering/sandbox detection, cowry may miss everything subsequent to this command.
Environment:
Ubuntu 16.04
Python 3.5.2
docker pull cowrie/cowrie
sudo iptables -t nat -A PREROUTING -p tcp --dport 22 -j REDIRECT --to-port 2222
docker run -it -p 2222:2222 -p 2223:2223 cowrie/cowrie
```
2018-12-07T04:56:59+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,2,172.17.0.1] gett
ing shell
2018-12-07T04:57:07+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,2,172.17.0.1] CMD:
**ping google.com**
2018-12-07T04:57:07+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,2,172.17.0.1] Comm
and found: ping google.com
2018-12-07T04:57:07+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,2,172.17.0.1] Unha
ndled Error
Traceback (most recent call last):
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/service.py", line 45, in packetReceived
return f(packet)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/connection.py", line 249, in ssh_CHANNEL_D
ATA
log.callWithLogger(channel, channel.dataReceived, data)
--- <exception caught here> ---
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/session.py", line 112, in dataReceived
self.client.transport.write(data)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/session.py", line 163, in write
self.proto.dataReceived(data)
File "/cowrie/cowrie-git/src/cowrie/insults/insults.py", line 104, in dataReceived
insults.ServerProtocol.dataReceived(self, data)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/insults/insults.py", line 537, in dataReceived
self.terminalProtocol.keystrokeReceived(ch, None)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/recvline.py", line 225, in keystrokeReceived
m()
File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 325, in handle_RETURN
return recvline.RecvLine.handle_RETURN(self)
File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/recvline.py", line 292, in handle_RETURN
self.lineReceived(line)
File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 185, in lineReceived
self.cmdstack[-1].lineReceived(line)
File "/cowrie/cowrie-git/src/cowrie/shell/honeypot.py", line 106, in lineReceived
self.runCommand()
File "/cowrie/cowrie-git/src/cowrie/shell/honeypot.py", line 215, in runCommand
self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs'])
File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 306, in call_command
HoneyPotBaseProtocol.call_command(self, pp, cmd, *args)
File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 194, in call_command
obj.start()
File "/cowrie/cowrie-git/src/cowrie/commands/ping.py", line 70, in start
s = hashlib.md5(self.host).hexdigest()
builtins.TypeError: Unicode-objects must be encoded before hashing
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cowrie/commands/ping.py`
Content:
```
1 # Copyright (c) 2009 Upi Tamminen <[email protected]>
2 # See the COPYRIGHT file for more information
3
4 from __future__ import absolute_import, division
5
6 import getopt
7 import hashlib
8 import random
9 import re
10 import socket
11
12 from twisted.internet import reactor
13
14 from cowrie.shell.command import HoneyPotCommand
15
16 commands = {}
17
18
19 class command_ping(HoneyPotCommand):
20
21 def valid_ip(self, address):
22 try:
23 socket.inet_aton(address)
24 return True
25 except Exception:
26 return False
27
28 def start(self):
29 self.host = None
30 self.max = 0
31 self.running = False
32
33 try:
34 optlist, args = getopt.gnu_getopt(self.args, "c:")
35 except getopt.GetoptError as err:
36 self.write('ping: %s\n' % (err,))
37 self.exit()
38 return
39
40 for opt in optlist:
41 if opt[0] == '-c':
42 try:
43 self.max = int(opt[1])
44 except Exception:
45 self.max = 0
46 if self.max <= 0:
47 self.write('ping: bad number of packets to transmit.\n')
48 self.exit()
49 return
50
51 if len(args) == 0:
52 for l in (
53 'Usage: ping [-LRUbdfnqrvVaA] [-c count] [-i interval] [-w deadline]',
54 ' [-p pattern] [-s packetsize] [-t ttl] [-I interface or address]',
55 ' [-M mtu discovery hint] [-S sndbuf]',
56 ' [ -T timestamp option ] [ -Q tos ] [hop1 ...] destination',
57 ):
58 self.write('{0}\n'.format(l))
59 self.exit()
60 return
61 self.host = args[0].strip()
62
63 if re.match('^[0-9.]+$', self.host):
64 if self.valid_ip(self.host):
65 self.ip = self.host
66 else:
67 self.write('ping: unknown host %s\n' % (self.host,))
68 self.exit()
69 else:
70 s = hashlib.md5(self.host).hexdigest()
71 self.ip = '.'.join([str(int(x, 16)) for x in (s[0:2], s[2:4], s[4:6], s[6:8])])
72
73 self.running = True
74 self.write('PING %s (%s) 56(84) bytes of data.\n' % (self.host, self.ip))
75 self.scheduled = reactor.callLater(0.2, self.showreply)
76 self.count = 0
77
78 def showreply(self):
79 ms = 40 + random.random() * 10
80 self.write(
81 '64 bytes from {} ({}): icmp_seq={} ttl=50 time={:.1f} ms\n'.format(self.host, self.ip, self.count + 1, ms))
82 self.count += 1
83 if self.count == self.max:
84 self.running = False
85 self.write('\n')
86 self.printstatistics()
87 self.exit()
88 else:
89 self.scheduled = reactor.callLater(1, self.showreply)
90
91 def printstatistics(self):
92 self.write('--- %s ping statistics ---\n' % (self.host,))
93 self.write('%d packets transmitted, %d received, 0%% packet loss, time 907ms\n' % (self.count, self.count))
94 self.write('rtt min/avg/max/mdev = 48.264/50.352/52.441/2.100 ms\n')
95
96 def handle_CTRL_C(self):
97 if self.running is False:
98 return HoneyPotCommand.handle_CTRL_C(self)
99 else:
100 self.write('^C\n')
101 self.scheduled.cancel()
102 self.printstatistics()
103 self.exit()
104
105
106 commands['/bin/ping'] = command_ping
107 commands['ping'] = command_ping
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cowrie/commands/ping.py b/src/cowrie/commands/ping.py
--- a/src/cowrie/commands/ping.py
+++ b/src/cowrie/commands/ping.py
@@ -67,7 +67,7 @@
self.write('ping: unknown host %s\n' % (self.host,))
self.exit()
else:
- s = hashlib.md5(self.host).hexdigest()
+ s = hashlib.md5((self.host).encode("utf-8")).hexdigest()
self.ip = '.'.join([str(int(x, 16)) for x in (s[0:2], s[2:4], s[4:6], s[6:8])])
self.running = True
| {"golden_diff": "diff --git a/src/cowrie/commands/ping.py b/src/cowrie/commands/ping.py\n--- a/src/cowrie/commands/ping.py\n+++ b/src/cowrie/commands/ping.py\n@@ -67,7 +67,7 @@\n self.write('ping: unknown host %s\\n' % (self.host,))\n self.exit()\n else:\n- s = hashlib.md5(self.host).hexdigest()\n+ s = hashlib.md5((self.host).encode(\"utf-8\")).hexdigest()\n self.ip = '.'.join([str(int(x, 16)) for x in (s[0:2], s[2:4], s[4:6], s[6:8])])\n \n self.running = True\n", "issue": "pinging a domain crashes\nFrom within cowrie (docker current 7 Dec 2018), when the you ping a domain (not an IP) a stack trace errors shown below. Cowrie freezes., and since many bots use ping as info gathering/sandbox detection, cowry may miss everything subsequent to this command.\r\n\r\nEnvironment:\r\nUbuntu 16.04\r\nPython 3.5.2\r\ndocker pull cowrie/cowrie\r\nsudo iptables -t nat -A PREROUTING -p tcp --dport 22 -j REDIRECT --to-port 2222\r\ndocker run -it -p 2222:2222 -p 2223:2223 cowrie/cowrie\r\n\r\n\r\n```\r\n2018-12-07T04:56:59+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,2,172.17.0.1] gett\r\ning shell\r\n2018-12-07T04:57:07+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,2,172.17.0.1] CMD:\r\n **ping google.com**\r\n2018-12-07T04:57:07+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,2,172.17.0.1] Comm\r\nand found: ping google.com\r\n2018-12-07T04:57:07+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,2,172.17.0.1] Unha\r\nndled Error\r\n Traceback (most recent call last):\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py\", line 122, in callWithContext\r\n return self.currentContext().callWithContext(ctx, func, *args, **kw)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py\", line 85, in callWithContext\r\n return func(*args,**kw)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/service.py\", line 45, in packetReceived\r\n return f(packet)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/connection.py\", line 249, in ssh_CHANNEL_D\r\nATA\r\n log.callWithLogger(channel, channel.dataReceived, data)\r\n --- <exception caught here> ---\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/log.py\", line 103, in callWithLogger\r\n return callWithContext({\"system\": lp}, func, *args, **kw)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/log.py\", line 86, in callWithContext\r\n return context.call({ILogContext: newCtx}, func, *args, **kw)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py\", line 122, in callWithContext\r\n return self.currentContext().callWithContext(ctx, func, *args, **kw)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py\", line 85, in callWithContext\r\n return func(*args,**kw)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/session.py\", line 112, in dataReceived\r\n self.client.transport.write(data)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/session.py\", line 163, in write\r\n self.proto.dataReceived(data)\r\n File \"/cowrie/cowrie-git/src/cowrie/insults/insults.py\", line 104, in dataReceived\r\n insults.ServerProtocol.dataReceived(self, data)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/insults/insults.py\", line 537, in dataReceived\r\n self.terminalProtocol.keystrokeReceived(ch, None)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/recvline.py\", line 225, in keystrokeReceived\r\n m()\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/protocol.py\", line 325, in handle_RETURN\r\n return recvline.RecvLine.handle_RETURN(self)\r\n File \"/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/recvline.py\", line 292, in handle_RETURN\r\n self.lineReceived(line)\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/protocol.py\", line 185, in lineReceived\r\n self.cmdstack[-1].lineReceived(line)\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/honeypot.py\", line 106, in lineReceived\r\n self.runCommand()\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/honeypot.py\", line 215, in runCommand\r\n self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs'])\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/protocol.py\", line 306, in call_command\r\n HoneyPotBaseProtocol.call_command(self, pp, cmd, *args)\r\n File \"/cowrie/cowrie-git/src/cowrie/shell/protocol.py\", line 194, in call_command\r\n obj.start()\r\n File \"/cowrie/cowrie-git/src/cowrie/commands/ping.py\", line 70, in start\r\n s = hashlib.md5(self.host).hexdigest()\r\n builtins.TypeError: Unicode-objects must be encoded before hashing\r\n```\r\n\n", "before_files": [{"content": "# Copyright (c) 2009 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\nfrom __future__ import absolute_import, division\n\nimport getopt\nimport hashlib\nimport random\nimport re\nimport socket\n\nfrom twisted.internet import reactor\n\nfrom cowrie.shell.command import HoneyPotCommand\n\ncommands = {}\n\n\nclass command_ping(HoneyPotCommand):\n\n def valid_ip(self, address):\n try:\n socket.inet_aton(address)\n return True\n except Exception:\n return False\n\n def start(self):\n self.host = None\n self.max = 0\n self.running = False\n\n try:\n optlist, args = getopt.gnu_getopt(self.args, \"c:\")\n except getopt.GetoptError as err:\n self.write('ping: %s\\n' % (err,))\n self.exit()\n return\n\n for opt in optlist:\n if opt[0] == '-c':\n try:\n self.max = int(opt[1])\n except Exception:\n self.max = 0\n if self.max <= 0:\n self.write('ping: bad number of packets to transmit.\\n')\n self.exit()\n return\n\n if len(args) == 0:\n for l in (\n 'Usage: ping [-LRUbdfnqrvVaA] [-c count] [-i interval] [-w deadline]',\n ' [-p pattern] [-s packetsize] [-t ttl] [-I interface or address]',\n ' [-M mtu discovery hint] [-S sndbuf]',\n ' [ -T timestamp option ] [ -Q tos ] [hop1 ...] destination',\n ):\n self.write('{0}\\n'.format(l))\n self.exit()\n return\n self.host = args[0].strip()\n\n if re.match('^[0-9.]+$', self.host):\n if self.valid_ip(self.host):\n self.ip = self.host\n else:\n self.write('ping: unknown host %s\\n' % (self.host,))\n self.exit()\n else:\n s = hashlib.md5(self.host).hexdigest()\n self.ip = '.'.join([str(int(x, 16)) for x in (s[0:2], s[2:4], s[4:6], s[6:8])])\n\n self.running = True\n self.write('PING %s (%s) 56(84) bytes of data.\\n' % (self.host, self.ip))\n self.scheduled = reactor.callLater(0.2, self.showreply)\n self.count = 0\n\n def showreply(self):\n ms = 40 + random.random() * 10\n self.write(\n '64 bytes from {} ({}): icmp_seq={} ttl=50 time={:.1f} ms\\n'.format(self.host, self.ip, self.count + 1, ms))\n self.count += 1\n if self.count == self.max:\n self.running = False\n self.write('\\n')\n self.printstatistics()\n self.exit()\n else:\n self.scheduled = reactor.callLater(1, self.showreply)\n\n def printstatistics(self):\n self.write('--- %s ping statistics ---\\n' % (self.host,))\n self.write('%d packets transmitted, %d received, 0%% packet loss, time 907ms\\n' % (self.count, self.count))\n self.write('rtt min/avg/max/mdev = 48.264/50.352/52.441/2.100 ms\\n')\n\n def handle_CTRL_C(self):\n if self.running is False:\n return HoneyPotCommand.handle_CTRL_C(self)\n else:\n self.write('^C\\n')\n self.scheduled.cancel()\n self.printstatistics()\n self.exit()\n\n\ncommands['/bin/ping'] = command_ping\ncommands['ping'] = command_ping\n", "path": "src/cowrie/commands/ping.py"}], "after_files": [{"content": "# Copyright (c) 2009 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\nfrom __future__ import absolute_import, division\n\nimport getopt\nimport hashlib\nimport random\nimport re\nimport socket\n\nfrom twisted.internet import reactor\n\nfrom cowrie.shell.command import HoneyPotCommand\n\ncommands = {}\n\n\nclass command_ping(HoneyPotCommand):\n\n def valid_ip(self, address):\n try:\n socket.inet_aton(address)\n return True\n except Exception:\n return False\n\n def start(self):\n self.host = None\n self.max = 0\n self.running = False\n\n try:\n optlist, args = getopt.gnu_getopt(self.args, \"c:\")\n except getopt.GetoptError as err:\n self.write('ping: %s\\n' % (err,))\n self.exit()\n return\n\n for opt in optlist:\n if opt[0] == '-c':\n try:\n self.max = int(opt[1])\n except Exception:\n self.max = 0\n if self.max <= 0:\n self.write('ping: bad number of packets to transmit.\\n')\n self.exit()\n return\n\n if len(args) == 0:\n for l in (\n 'Usage: ping [-LRUbdfnqrvVaA] [-c count] [-i interval] [-w deadline]',\n ' [-p pattern] [-s packetsize] [-t ttl] [-I interface or address]',\n ' [-M mtu discovery hint] [-S sndbuf]',\n ' [ -T timestamp option ] [ -Q tos ] [hop1 ...] destination',\n ):\n self.write('{0}\\n'.format(l))\n self.exit()\n return\n self.host = args[0].strip()\n\n if re.match('^[0-9.]+$', self.host):\n if self.valid_ip(self.host):\n self.ip = self.host\n else:\n self.write('ping: unknown host %s\\n' % (self.host,))\n self.exit()\n else:\n s = hashlib.md5((self.host).encode(\"utf-8\")).hexdigest()\n self.ip = '.'.join([str(int(x, 16)) for x in (s[0:2], s[2:4], s[4:6], s[6:8])])\n\n self.running = True\n self.write('PING %s (%s) 56(84) bytes of data.\\n' % (self.host, self.ip))\n self.scheduled = reactor.callLater(0.2, self.showreply)\n self.count = 0\n\n def showreply(self):\n ms = 40 + random.random() * 10\n self.write(\n '64 bytes from {} ({}): icmp_seq={} ttl=50 time={:.1f} ms\\n'.format(self.host, self.ip, self.count + 1, ms))\n self.count += 1\n if self.count == self.max:\n self.running = False\n self.write('\\n')\n self.printstatistics()\n self.exit()\n else:\n self.scheduled = reactor.callLater(1, self.showreply)\n\n def printstatistics(self):\n self.write('--- %s ping statistics ---\\n' % (self.host,))\n self.write('%d packets transmitted, %d received, 0%% packet loss, time 907ms\\n' % (self.count, self.count))\n self.write('rtt min/avg/max/mdev = 48.264/50.352/52.441/2.100 ms\\n')\n\n def handle_CTRL_C(self):\n if self.running is False:\n return HoneyPotCommand.handle_CTRL_C(self)\n else:\n self.write('^C\\n')\n self.scheduled.cancel()\n self.printstatistics()\n self.exit()\n\n\ncommands['/bin/ping'] = command_ping\ncommands['ping'] = command_ping\n", "path": "src/cowrie/commands/ping.py"}]} | 2,734 | 167 |
gh_patches_debug_7969 | rasdani/github-patches | git_diff | urllib3__urllib3-3034 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add OpenGraph information to our documentation
Would be good to fill in some open graph fields for our documentation so when we link to things they show up with rich output / images rather than as a standard link. Really works nicely for Social Media, Slack, etc.
More information: https://ogp.me/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 from __future__ import annotations
2
3 import os
4 import sys
5 from datetime import date
6
7 # If extensions (or modules to document with autodoc) are in another directory,
8 # add these directories to sys.path here. If the directory is relative to the
9 # documentation root, use os.path.abspath to make it absolute, like shown here.
10
11 root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
12 sys.path.insert(0, root_path)
13
14 # https://docs.readthedocs.io/en/stable/builds.html#build-environment
15 if "READTHEDOCS" in os.environ:
16 import glob
17
18 if glob.glob("../changelog/*.*.rst"):
19 print("-- Found changes; running towncrier --", flush=True)
20 import subprocess
21
22 subprocess.run(
23 ["towncrier", "--yes", "--date", "not released yet"], cwd="..", check=True
24 )
25
26 import urllib3
27
28 # -- General configuration -----------------------------------------------------
29
30
31 # Add any Sphinx extension module names here, as strings. They can be extensions
32 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
33 extensions = [
34 "sphinx.ext.autodoc",
35 "sphinx_copybutton",
36 "sphinx.ext.doctest",
37 "sphinx.ext.intersphinx",
38 ]
39
40 # Test code blocks only when explicitly specified
41 doctest_test_doctest_blocks = ""
42
43 # Add any paths that contain templates here, relative to this directory.
44 templates_path = ["_templates"]
45
46 # The suffix of source filenames.
47 source_suffix = ".rst"
48
49 # The master toctree document.
50 master_doc = "index"
51
52 # General information about the project.
53 project = "urllib3"
54 copyright = f"{date.today().year}, Andrey Petrov"
55
56 # The short X.Y version.
57 version = urllib3.__version__
58 # The full version, including alpha/beta/rc tags.
59 release = version
60
61 # List of patterns, relative to source directory, that match files and
62 # directories to ignore when looking for source files.
63 exclude_patterns = ["_build"]
64
65 # The name of the Pygments (syntax highlighting) style to use.
66 pygments_style = "friendly"
67
68 # The theme to use for HTML and HTML Help pages. See the documentation for
69 # a list of builtin themes.
70 html_theme = "furo"
71 html_favicon = "images/favicon.png"
72
73 html_static_path = ["_static"]
74 html_theme_options = {
75 "announcement": """
76 <a style=\"text-decoration: none; color: white;\"
77 href=\"https://github.com/sponsors/urllib3\">
78 <img src=\"/en/latest/_static/favicon.png\"/> Support urllib3 on GitHub Sponsors
79 </a>
80 """,
81 "sidebar_hide_name": True,
82 "light_logo": "banner.svg",
83 "dark_logo": "dark-logo.svg",
84 }
85
86 intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
87
88 # Show typehints as content of the function or method
89 autodoc_typehints = "description"
90
91 # Warn about all references to unknown targets
92 nitpicky = True
93 # Except for these ones, which we expect to point to unknown targets:
94 nitpick_ignore = [
95 ("py:class", "_TYPE_SOCKS_OPTIONS"),
96 ("py:class", "_TYPE_SOCKET_OPTIONS"),
97 ("py:class", "_TYPE_TIMEOUT"),
98 ("py:class", "_TYPE_FIELD_VALUE"),
99 ("py:class", "_TYPE_BODY"),
100 ("py:class", "_HttplibHTTPResponse"),
101 ("py:class", "_HttplibHTTPMessage"),
102 ("py:class", "TracebackType"),
103 ("py:class", "Literal"),
104 ("py:class", "email.errors.MessageDefect"),
105 ("py:class", "MessageDefect"),
106 ("py:class", "http.client.HTTPMessage"),
107 ("py:class", "RequestHistory"),
108 ("py:class", "SSLTransportType"),
109 ("py:class", "VerifyMode"),
110 ("py:class", "_ssl._SSLContext"),
111 ("py:class", "urllib3._collections.HTTPHeaderDict"),
112 ("py:class", "urllib3._collections.RecentlyUsedContainer"),
113 ("py:class", "urllib3._request_methods.RequestMethods"),
114 ("py:class", "urllib3.contrib.socks._TYPE_SOCKS_OPTIONS"),
115 ("py:class", "urllib3.util.timeout._TYPE_DEFAULT"),
116 ("py:class", "BaseHTTPConnection"),
117 ]
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -35,8 +35,16 @@
"sphinx_copybutton",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
+ "sphinxext.opengraph",
]
+# Open Graph metadata
+ogp_title = "urllib3 documentation"
+ogp_site_url = "https://urllib3.readthedocs.io"
+ogp_type = "website"
+ogp_image = "https://github.com/urllib3/urllib3/raw/main/docs/_static/banner_github.svg"
+ogp_description = "urllib3 is a user-friendly HTTP client library for Python."
+
# Test code blocks only when explicitly specified
doctest_test_doctest_blocks = ""
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -35,8 +35,16 @@\n \"sphinx_copybutton\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n+ \"sphinxext.opengraph\",\n ]\n \n+# Open Graph metadata\n+ogp_title = \"urllib3 documentation\"\n+ogp_site_url = \"https://urllib3.readthedocs.io\"\n+ogp_type = \"website\"\n+ogp_image = \"https://github.com/urllib3/urllib3/raw/main/docs/_static/banner_github.svg\"\n+ogp_description = \"urllib3 is a user-friendly HTTP client library for Python.\"\n+\n # Test code blocks only when explicitly specified\n doctest_test_doctest_blocks = \"\"\n", "issue": "Add OpenGraph information to our documentation\nWould be good to fill in some open graph fields for our documentation so when we link to things they show up with rich output / images rather than as a standard link. Really works nicely for Social Media, Slack, etc.\r\n\r\nMore information: https://ogp.me/\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom datetime import date\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nroot_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\nsys.path.insert(0, root_path)\n\n# https://docs.readthedocs.io/en/stable/builds.html#build-environment\nif \"READTHEDOCS\" in os.environ:\n import glob\n\n if glob.glob(\"../changelog/*.*.rst\"):\n print(\"-- Found changes; running towncrier --\", flush=True)\n import subprocess\n\n subprocess.run(\n [\"towncrier\", \"--yes\", \"--date\", \"not released yet\"], cwd=\"..\", check=True\n )\n\nimport urllib3\n\n# -- General configuration -----------------------------------------------------\n\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx_copybutton\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n]\n\n# Test code blocks only when explicitly specified\ndoctest_test_doctest_blocks = \"\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"urllib3\"\ncopyright = f\"{date.today().year}, Andrey Petrov\"\n\n# The short X.Y version.\nversion = urllib3.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"friendly\"\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"furo\"\nhtml_favicon = \"images/favicon.png\"\n\nhtml_static_path = [\"_static\"]\nhtml_theme_options = {\n \"announcement\": \"\"\"\n <a style=\\\"text-decoration: none; color: white;\\\" \n href=\\\"https://github.com/sponsors/urllib3\\\">\n <img src=\\\"/en/latest/_static/favicon.png\\\"/> Support urllib3 on GitHub Sponsors\n </a>\n \"\"\",\n \"sidebar_hide_name\": True,\n \"light_logo\": \"banner.svg\",\n \"dark_logo\": \"dark-logo.svg\",\n}\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n\n# Show typehints as content of the function or method\nautodoc_typehints = \"description\"\n\n# Warn about all references to unknown targets\nnitpicky = True\n# Except for these ones, which we expect to point to unknown targets:\nnitpick_ignore = [\n (\"py:class\", \"_TYPE_SOCKS_OPTIONS\"),\n (\"py:class\", \"_TYPE_SOCKET_OPTIONS\"),\n (\"py:class\", \"_TYPE_TIMEOUT\"),\n (\"py:class\", \"_TYPE_FIELD_VALUE\"),\n (\"py:class\", \"_TYPE_BODY\"),\n (\"py:class\", \"_HttplibHTTPResponse\"),\n (\"py:class\", \"_HttplibHTTPMessage\"),\n (\"py:class\", \"TracebackType\"),\n (\"py:class\", \"Literal\"),\n (\"py:class\", \"email.errors.MessageDefect\"),\n (\"py:class\", \"MessageDefect\"),\n (\"py:class\", \"http.client.HTTPMessage\"),\n (\"py:class\", \"RequestHistory\"),\n (\"py:class\", \"SSLTransportType\"),\n (\"py:class\", \"VerifyMode\"),\n (\"py:class\", \"_ssl._SSLContext\"),\n (\"py:class\", \"urllib3._collections.HTTPHeaderDict\"),\n (\"py:class\", \"urllib3._collections.RecentlyUsedContainer\"),\n (\"py:class\", \"urllib3._request_methods.RequestMethods\"),\n (\"py:class\", \"urllib3.contrib.socks._TYPE_SOCKS_OPTIONS\"),\n (\"py:class\", \"urllib3.util.timeout._TYPE_DEFAULT\"),\n (\"py:class\", \"BaseHTTPConnection\"),\n]\n", "path": "docs/conf.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom datetime import date\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nroot_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\nsys.path.insert(0, root_path)\n\n# https://docs.readthedocs.io/en/stable/builds.html#build-environment\nif \"READTHEDOCS\" in os.environ:\n import glob\n\n if glob.glob(\"../changelog/*.*.rst\"):\n print(\"-- Found changes; running towncrier --\", flush=True)\n import subprocess\n\n subprocess.run(\n [\"towncrier\", \"--yes\", \"--date\", \"not released yet\"], cwd=\"..\", check=True\n )\n\nimport urllib3\n\n# -- General configuration -----------------------------------------------------\n\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx_copybutton\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinxext.opengraph\",\n]\n\n# Open Graph metadata\nogp_title = \"urllib3 documentation\"\nogp_site_url = \"https://urllib3.readthedocs.io\"\nogp_type = \"website\"\nogp_image = \"https://github.com/urllib3/urllib3/raw/main/docs/_static/banner_github.svg\"\nogp_description = \"urllib3 is a user-friendly HTTP client library for Python.\"\n\n# Test code blocks only when explicitly specified\ndoctest_test_doctest_blocks = \"\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"urllib3\"\ncopyright = f\"{date.today().year}, Andrey Petrov\"\n\n# The short X.Y version.\nversion = urllib3.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"friendly\"\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"furo\"\nhtml_favicon = \"images/favicon.png\"\n\nhtml_static_path = [\"_static\"]\nhtml_theme_options = {\n \"announcement\": \"\"\"\n <a style=\\\"text-decoration: none; color: white;\\\" \n href=\\\"https://github.com/sponsors/urllib3\\\">\n <img src=\\\"/en/latest/_static/favicon.png\\\"/> Support urllib3 on GitHub Sponsors\n </a>\n \"\"\",\n \"sidebar_hide_name\": True,\n \"light_logo\": \"banner.svg\",\n \"dark_logo\": \"dark-logo.svg\",\n}\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n\n# Show typehints as content of the function or method\nautodoc_typehints = \"description\"\n\n# Warn about all references to unknown targets\nnitpicky = True\n# Except for these ones, which we expect to point to unknown targets:\nnitpick_ignore = [\n (\"py:class\", \"_TYPE_SOCKS_OPTIONS\"),\n (\"py:class\", \"_TYPE_SOCKET_OPTIONS\"),\n (\"py:class\", \"_TYPE_TIMEOUT\"),\n (\"py:class\", \"_TYPE_FIELD_VALUE\"),\n (\"py:class\", \"_TYPE_BODY\"),\n (\"py:class\", \"_HttplibHTTPResponse\"),\n (\"py:class\", \"_HttplibHTTPMessage\"),\n (\"py:class\", \"TracebackType\"),\n (\"py:class\", \"Literal\"),\n (\"py:class\", \"email.errors.MessageDefect\"),\n (\"py:class\", \"MessageDefect\"),\n (\"py:class\", \"http.client.HTTPMessage\"),\n (\"py:class\", \"RequestHistory\"),\n (\"py:class\", \"SSLTransportType\"),\n (\"py:class\", \"VerifyMode\"),\n (\"py:class\", \"_ssl._SSLContext\"),\n (\"py:class\", \"urllib3._collections.HTTPHeaderDict\"),\n (\"py:class\", \"urllib3._collections.RecentlyUsedContainer\"),\n (\"py:class\", \"urllib3._request_methods.RequestMethods\"),\n (\"py:class\", \"urllib3.contrib.socks._TYPE_SOCKS_OPTIONS\"),\n (\"py:class\", \"urllib3.util.timeout._TYPE_DEFAULT\"),\n (\"py:class\", \"BaseHTTPConnection\"),\n]\n", "path": "docs/conf.py"}]} | 1,513 | 178 |
gh_patches_debug_2498 | rasdani/github-patches | git_diff | DDMAL__CantusDB-1116 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Admin Area, Change Source page: we should hide "Source Status" field
Debra got tripped up trying to publish a source - instead of checking the "Published" checkbox, she selected something in the "Source Status" dropdown.
~~Everything regarding source visibility is handled by our `published` field, so I think "Source Status" is vestigal and no longer needed. We may not even use it anywhere in our templates/views. With this in mind, it may be worth considering removing this field entirely. (Any data related to this field that was in OldCantus when we last synced is saved in sources' `json_info` field, so we wouldn't really need to worry about losing data.)~~ (it was decided that this information may eventually be useful, so we'll not remove the field from the model)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/admin.py`
Content:
```
1 from django.contrib import admin
2 from main_app.models import *
3 from main_app.forms import (
4 AdminCenturyForm,
5 AdminChantForm,
6 AdminFeastForm,
7 AdminGenreForm,
8 AdminNotationForm,
9 AdminOfficeForm,
10 AdminProvenanceForm,
11 AdminRismSiglumForm,
12 AdminSegmentForm,
13 AdminSequenceForm,
14 AdminSourceForm,
15 )
16
17 # these fields should not be editable by all classes
18 EXCLUDE = (
19 "created_by",
20 "last_updated_by",
21 "json_info",
22 )
23
24
25 class BaseModelAdmin(admin.ModelAdmin):
26 exclude = EXCLUDE
27
28 # if an object is created in the admin interface, assign the user to the created_by field
29 # else if an object is updated in the admin interface, assign the user to the last_updated_by field
30 def save_model(self, request, obj, form, change):
31 if change:
32 obj.last_updated_by = request.user
33 else:
34 obj.created_by = request.user
35 super().save_model(request, obj, form, change)
36
37
38 class CenturyAdmin(BaseModelAdmin):
39 search_fields = ("name",)
40 form = AdminCenturyForm
41
42
43 class ChantAdmin(BaseModelAdmin):
44 @admin.display(description="Source Siglum")
45 def get_source_siglum(self, obj):
46 if obj.source:
47 return obj.source.siglum
48
49 list_display = (
50 "incipit",
51 "get_source_siglum",
52 "genre",
53 )
54 search_fields = (
55 "title",
56 "incipit",
57 "cantus_id",
58 "id",
59 )
60
61 readonly_fields = (
62 "date_created",
63 "date_updated",
64 )
65
66 list_filter = (
67 "genre",
68 "office",
69 )
70 exclude = EXCLUDE + (
71 "col1",
72 "col2",
73 "col3",
74 "next_chant",
75 "s_sequence",
76 "is_last_chant_in_feast",
77 "visible_status",
78 "date",
79 "volpiano_notes",
80 "volpiano_intervals",
81 "title",
82 "differentiae_database",
83 )
84 form = AdminChantForm
85 raw_id_fields = (
86 "source",
87 "feast",
88 )
89 ordering = ("source__siglum",)
90
91
92 class DifferentiaAdmin(BaseModelAdmin):
93 search_fields = (
94 "differentia_id",
95 "id",
96 )
97
98
99 class FeastAdmin(BaseModelAdmin):
100 search_fields = (
101 "name",
102 "feast_code",
103 )
104 list_display = (
105 "name",
106 "month",
107 "day",
108 "feast_code",
109 )
110 form = AdminFeastForm
111
112
113 class GenreAdmin(BaseModelAdmin):
114 search_fields = ("name",)
115 form = AdminGenreForm
116
117
118 class NotationAdmin(BaseModelAdmin):
119 search_fields = ("name",)
120 form = AdminNotationForm
121
122
123 class OfficeAdmin(BaseModelAdmin):
124 search_fields = ("name",)
125 form = AdminOfficeForm
126
127
128 class ProvenanceAdmin(BaseModelAdmin):
129 search_fields = ("name",)
130 form = AdminProvenanceForm
131
132
133 class RismSiglumAdmin(BaseModelAdmin):
134 search_fields = ("name",)
135 form = AdminRismSiglumForm
136
137
138 class SegmentAdmin(BaseModelAdmin):
139 search_fields = ("name",)
140 form = AdminSegmentForm
141
142
143 class SequenceAdmin(BaseModelAdmin):
144 @admin.display(description="Source Siglum")
145 def get_source_siglum(self, obj):
146 if obj.source:
147 return obj.source.siglum
148
149 search_fields = (
150 "title",
151 "incipit",
152 "cantus_id",
153 "id",
154 )
155 exclude = EXCLUDE + (
156 "c_sequence",
157 "next_chant",
158 "is_last_chant_in_feast",
159 "visible_status",
160 )
161 list_display = ("incipit", "get_source_siglum", "genre")
162 list_filter = (
163 "genre",
164 "office",
165 )
166 raw_id_fields = (
167 "source",
168 "feast",
169 )
170 ordering = ("source__siglum",)
171 form = AdminSequenceForm
172
173
174 class SourceAdmin(BaseModelAdmin):
175 # These search fields are also available on the user-source inline relationship in the user admin page
176 search_fields = (
177 "siglum",
178 "title",
179 "id",
180 )
181 readonly_fields = (
182 "number_of_chants",
183 "number_of_melodies",
184 "date_created",
185 "date_updated",
186 )
187 # from the Django docs:
188 # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript “filter” interface
189 # that allows searching within the options. The unselected and selected options appear in two boxes side by side.
190 filter_horizontal = (
191 "century",
192 "notation",
193 "current_editors",
194 "inventoried_by",
195 "full_text_entered_by",
196 "melodies_entered_by",
197 "proofreaders",
198 "other_editors",
199 )
200
201 list_display = (
202 "title",
203 "siglum",
204 "id",
205 )
206
207 list_filter = (
208 "full_source",
209 "segment",
210 "source_status",
211 "published",
212 "century",
213 )
214
215 ordering = ("siglum",)
216
217 form = AdminSourceForm
218
219
220 admin.site.register(Century, CenturyAdmin)
221 admin.site.register(Chant, ChantAdmin)
222 admin.site.register(Differentia, DifferentiaAdmin)
223 admin.site.register(Feast, FeastAdmin)
224 admin.site.register(Genre, GenreAdmin)
225 admin.site.register(Notation, NotationAdmin)
226 admin.site.register(Office, OfficeAdmin)
227 admin.site.register(Provenance, ProvenanceAdmin)
228 admin.site.register(RismSiglum, RismSiglumAdmin)
229 admin.site.register(Segment, SegmentAdmin)
230 admin.site.register(Sequence, SequenceAdmin)
231 admin.site.register(Source, SourceAdmin)
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py
--- a/django/cantusdb_project/main_app/admin.py
+++ b/django/cantusdb_project/main_app/admin.py
@@ -172,6 +172,8 @@
class SourceAdmin(BaseModelAdmin):
+ exclude = ("source_status",)
+
# These search fields are also available on the user-source inline relationship in the user admin page
search_fields = (
"siglum",
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py\n--- a/django/cantusdb_project/main_app/admin.py\n+++ b/django/cantusdb_project/main_app/admin.py\n@@ -172,6 +172,8 @@\n \n \n class SourceAdmin(BaseModelAdmin):\n+ exclude = (\"source_status\",)\n+\n # These search fields are also available on the user-source inline relationship in the user admin page\n search_fields = (\n \"siglum\",\n", "issue": "Admin Area, Change Source page: we should hide \"Source Status\" field\nDebra got tripped up trying to publish a source - instead of checking the \"Published\" checkbox, she selected something in the \"Source Status\" dropdown.\r\n\r\n~~Everything regarding source visibility is handled by our `published` field, so I think \"Source Status\" is vestigal and no longer needed. We may not even use it anywhere in our templates/views. With this in mind, it may be worth considering removing this field entirely. (Any data related to this field that was in OldCantus when we last synced is saved in sources' `json_info` field, so we wouldn't really need to worry about losing data.)~~ (it was decided that this information may eventually be useful, so we'll not remove the field from the model)\n", "before_files": [{"content": "from django.contrib import admin\nfrom main_app.models import *\nfrom main_app.forms import (\n AdminCenturyForm,\n AdminChantForm,\n AdminFeastForm,\n AdminGenreForm,\n AdminNotationForm,\n AdminOfficeForm,\n AdminProvenanceForm,\n AdminRismSiglumForm,\n AdminSegmentForm,\n AdminSequenceForm,\n AdminSourceForm,\n)\n\n# these fields should not be editable by all classes\nEXCLUDE = (\n \"created_by\",\n \"last_updated_by\",\n \"json_info\",\n)\n\n\nclass BaseModelAdmin(admin.ModelAdmin):\n exclude = EXCLUDE\n\n # if an object is created in the admin interface, assign the user to the created_by field\n # else if an object is updated in the admin interface, assign the user to the last_updated_by field\n def save_model(self, request, obj, form, change):\n if change:\n obj.last_updated_by = request.user\n else:\n obj.created_by = request.user\n super().save_model(request, obj, form, change)\n\n\nclass CenturyAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminCenturyForm\n\n\nclass ChantAdmin(BaseModelAdmin):\n @admin.display(description=\"Source Siglum\")\n def get_source_siglum(self, obj):\n if obj.source:\n return obj.source.siglum\n\n list_display = (\n \"incipit\",\n \"get_source_siglum\",\n \"genre\",\n )\n search_fields = (\n \"title\",\n \"incipit\",\n \"cantus_id\",\n \"id\",\n )\n\n readonly_fields = (\n \"date_created\",\n \"date_updated\",\n )\n\n list_filter = (\n \"genre\",\n \"office\",\n )\n exclude = EXCLUDE + (\n \"col1\",\n \"col2\",\n \"col3\",\n \"next_chant\",\n \"s_sequence\",\n \"is_last_chant_in_feast\",\n \"visible_status\",\n \"date\",\n \"volpiano_notes\",\n \"volpiano_intervals\",\n \"title\",\n \"differentiae_database\",\n )\n form = AdminChantForm\n raw_id_fields = (\n \"source\",\n \"feast\",\n )\n ordering = (\"source__siglum\",)\n\n\nclass DifferentiaAdmin(BaseModelAdmin):\n search_fields = (\n \"differentia_id\",\n \"id\",\n )\n\n\nclass FeastAdmin(BaseModelAdmin):\n search_fields = (\n \"name\",\n \"feast_code\",\n )\n list_display = (\n \"name\",\n \"month\",\n \"day\",\n \"feast_code\",\n )\n form = AdminFeastForm\n\n\nclass GenreAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminGenreForm\n\n\nclass NotationAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminNotationForm\n\n\nclass OfficeAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminOfficeForm\n\n\nclass ProvenanceAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminProvenanceForm\n\n\nclass RismSiglumAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminRismSiglumForm\n\n\nclass SegmentAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminSegmentForm\n\n\nclass SequenceAdmin(BaseModelAdmin):\n @admin.display(description=\"Source Siglum\")\n def get_source_siglum(self, obj):\n if obj.source:\n return obj.source.siglum\n\n search_fields = (\n \"title\",\n \"incipit\",\n \"cantus_id\",\n \"id\",\n )\n exclude = EXCLUDE + (\n \"c_sequence\",\n \"next_chant\",\n \"is_last_chant_in_feast\",\n \"visible_status\",\n )\n list_display = (\"incipit\", \"get_source_siglum\", \"genre\")\n list_filter = (\n \"genre\",\n \"office\",\n )\n raw_id_fields = (\n \"source\",\n \"feast\",\n )\n ordering = (\"source__siglum\",)\n form = AdminSequenceForm\n\n\nclass SourceAdmin(BaseModelAdmin):\n # These search fields are also available on the user-source inline relationship in the user admin page\n search_fields = (\n \"siglum\",\n \"title\",\n \"id\",\n )\n readonly_fields = (\n \"number_of_chants\",\n \"number_of_melodies\",\n \"date_created\",\n \"date_updated\",\n )\n # from the Django docs:\n # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript \u201cfilter\u201d interface\n # that allows searching within the options. The unselected and selected options appear in two boxes side by side.\n filter_horizontal = (\n \"century\",\n \"notation\",\n \"current_editors\",\n \"inventoried_by\",\n \"full_text_entered_by\",\n \"melodies_entered_by\",\n \"proofreaders\",\n \"other_editors\",\n )\n\n list_display = (\n \"title\",\n \"siglum\",\n \"id\",\n )\n\n list_filter = (\n \"full_source\",\n \"segment\",\n \"source_status\",\n \"published\",\n \"century\",\n )\n\n ordering = (\"siglum\",)\n\n form = AdminSourceForm\n\n\nadmin.site.register(Century, CenturyAdmin)\nadmin.site.register(Chant, ChantAdmin)\nadmin.site.register(Differentia, DifferentiaAdmin)\nadmin.site.register(Feast, FeastAdmin)\nadmin.site.register(Genre, GenreAdmin)\nadmin.site.register(Notation, NotationAdmin)\nadmin.site.register(Office, OfficeAdmin)\nadmin.site.register(Provenance, ProvenanceAdmin)\nadmin.site.register(RismSiglum, RismSiglumAdmin)\nadmin.site.register(Segment, SegmentAdmin)\nadmin.site.register(Sequence, SequenceAdmin)\nadmin.site.register(Source, SourceAdmin)\n", "path": "django/cantusdb_project/main_app/admin.py"}], "after_files": [{"content": "from django.contrib import admin\nfrom main_app.models import *\nfrom main_app.forms import (\n AdminCenturyForm,\n AdminChantForm,\n AdminFeastForm,\n AdminGenreForm,\n AdminNotationForm,\n AdminOfficeForm,\n AdminProvenanceForm,\n AdminRismSiglumForm,\n AdminSegmentForm,\n AdminSequenceForm,\n AdminSourceForm,\n)\n\n# these fields should not be editable by all classes\nEXCLUDE = (\n \"created_by\",\n \"last_updated_by\",\n \"json_info\",\n)\n\n\nclass BaseModelAdmin(admin.ModelAdmin):\n exclude = EXCLUDE\n\n # if an object is created in the admin interface, assign the user to the created_by field\n # else if an object is updated in the admin interface, assign the user to the last_updated_by field\n def save_model(self, request, obj, form, change):\n if change:\n obj.last_updated_by = request.user\n else:\n obj.created_by = request.user\n super().save_model(request, obj, form, change)\n\n\nclass CenturyAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminCenturyForm\n\n\nclass ChantAdmin(BaseModelAdmin):\n @admin.display(description=\"Source Siglum\")\n def get_source_siglum(self, obj):\n if obj.source:\n return obj.source.siglum\n\n list_display = (\n \"incipit\",\n \"get_source_siglum\",\n \"genre\",\n )\n search_fields = (\n \"title\",\n \"incipit\",\n \"cantus_id\",\n \"id\",\n )\n\n readonly_fields = (\n \"date_created\",\n \"date_updated\",\n )\n\n list_filter = (\n \"genre\",\n \"office\",\n )\n exclude = EXCLUDE + (\n \"col1\",\n \"col2\",\n \"col3\",\n \"next_chant\",\n \"s_sequence\",\n \"is_last_chant_in_feast\",\n \"visible_status\",\n \"date\",\n \"volpiano_notes\",\n \"volpiano_intervals\",\n \"title\",\n \"differentiae_database\",\n )\n form = AdminChantForm\n raw_id_fields = (\n \"source\",\n \"feast\",\n )\n ordering = (\"source__siglum\",)\n\n\nclass DifferentiaAdmin(BaseModelAdmin):\n search_fields = (\n \"differentia_id\",\n \"id\",\n )\n\n\nclass FeastAdmin(BaseModelAdmin):\n search_fields = (\n \"name\",\n \"feast_code\",\n )\n list_display = (\n \"name\",\n \"month\",\n \"day\",\n \"feast_code\",\n )\n form = AdminFeastForm\n\n\nclass GenreAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminGenreForm\n\n\nclass NotationAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminNotationForm\n\n\nclass OfficeAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminOfficeForm\n\n\nclass ProvenanceAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminProvenanceForm\n\n\nclass RismSiglumAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminRismSiglumForm\n\n\nclass SegmentAdmin(BaseModelAdmin):\n search_fields = (\"name\",)\n form = AdminSegmentForm\n\n\nclass SequenceAdmin(BaseModelAdmin):\n @admin.display(description=\"Source Siglum\")\n def get_source_siglum(self, obj):\n if obj.source:\n return obj.source.siglum\n\n search_fields = (\n \"title\",\n \"incipit\",\n \"cantus_id\",\n \"id\",\n )\n exclude = EXCLUDE + (\n \"c_sequence\",\n \"next_chant\",\n \"is_last_chant_in_feast\",\n \"visible_status\",\n )\n list_display = (\"incipit\", \"get_source_siglum\", \"genre\")\n list_filter = (\n \"genre\",\n \"office\",\n )\n raw_id_fields = (\n \"source\",\n \"feast\",\n )\n ordering = (\"source__siglum\",)\n form = AdminSequenceForm\n\n\nclass SourceAdmin(BaseModelAdmin):\n exclude = (\"source_status\",)\n\n # These search fields are also available on the user-source inline relationship in the user admin page\n search_fields = (\n \"siglum\",\n \"title\",\n \"id\",\n )\n readonly_fields = (\n \"number_of_chants\",\n \"number_of_melodies\",\n \"date_created\",\n \"date_updated\",\n )\n # from the Django docs:\n # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript \u201cfilter\u201d interface\n # that allows searching within the options. The unselected and selected options appear in two boxes side by side.\n filter_horizontal = (\n \"century\",\n \"notation\",\n \"current_editors\",\n \"inventoried_by\",\n \"full_text_entered_by\",\n \"melodies_entered_by\",\n \"proofreaders\",\n \"other_editors\",\n )\n\n list_display = (\n \"title\",\n \"siglum\",\n \"id\",\n )\n\n list_filter = (\n \"full_source\",\n \"segment\",\n \"source_status\",\n \"published\",\n \"century\",\n )\n\n ordering = (\"siglum\",)\n\n form = AdminSourceForm\n\n\nadmin.site.register(Century, CenturyAdmin)\nadmin.site.register(Chant, ChantAdmin)\nadmin.site.register(Differentia, DifferentiaAdmin)\nadmin.site.register(Feast, FeastAdmin)\nadmin.site.register(Genre, GenreAdmin)\nadmin.site.register(Notation, NotationAdmin)\nadmin.site.register(Office, OfficeAdmin)\nadmin.site.register(Provenance, ProvenanceAdmin)\nadmin.site.register(RismSiglum, RismSiglumAdmin)\nadmin.site.register(Segment, SegmentAdmin)\nadmin.site.register(Sequence, SequenceAdmin)\nadmin.site.register(Source, SourceAdmin)\n", "path": "django/cantusdb_project/main_app/admin.py"}]} | 2,330 | 119 |
gh_patches_debug_22207 | rasdani/github-patches | git_diff | getsentry__sentry-python-1314 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Falcon sentry integration catch all http errors
Hi,
seems that the falcon integration catches all the http errors and not just the 500 http error code as the documentation says, https://docs.sentry.io/platforms/python/falcon/#behavior
https://github.com/getsentry/sentry-python/blob/7d482b5bfa1d4f58eb090818496eba8fee8e63aa/sentry_sdk/integrations/falcon.py#L156
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/falcon.py`
Content:
```
1 from __future__ import absolute_import
2
3 from sentry_sdk.hub import Hub
4 from sentry_sdk.integrations import Integration, DidNotEnable
5 from sentry_sdk.integrations._wsgi_common import RequestExtractor
6 from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
7 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
8
9 from sentry_sdk._types import MYPY
10
11 if MYPY:
12 from typing import Any
13 from typing import Dict
14 from typing import Optional
15
16 from sentry_sdk._types import EventProcessor
17
18 try:
19 import falcon # type: ignore
20 import falcon.api_helpers # type: ignore
21
22 from falcon import __version__ as FALCON_VERSION
23 except ImportError:
24 raise DidNotEnable("Falcon not installed")
25
26
27 class FalconRequestExtractor(RequestExtractor):
28 def env(self):
29 # type: () -> Dict[str, Any]
30 return self.request.env
31
32 def cookies(self):
33 # type: () -> Dict[str, Any]
34 return self.request.cookies
35
36 def form(self):
37 # type: () -> None
38 return None # No such concept in Falcon
39
40 def files(self):
41 # type: () -> None
42 return None # No such concept in Falcon
43
44 def raw_data(self):
45 # type: () -> Optional[str]
46
47 # As request data can only be read once we won't make this available
48 # to Sentry. Just send back a dummy string in case there was a
49 # content length.
50 # TODO(jmagnusson): Figure out if there's a way to support this
51 content_length = self.content_length()
52 if content_length > 0:
53 return "[REQUEST_CONTAINING_RAW_DATA]"
54 else:
55 return None
56
57 def json(self):
58 # type: () -> Optional[Dict[str, Any]]
59 try:
60 return self.request.media
61 except falcon.errors.HTTPBadRequest:
62 # NOTE(jmagnusson): We return `falcon.Request._media` here because
63 # falcon 1.4 doesn't do proper type checking in
64 # `falcon.Request.media`. This has been fixed in 2.0.
65 # Relevant code: https://github.com/falconry/falcon/blob/1.4.1/falcon/request.py#L953
66 return self.request._media
67
68
69 class SentryFalconMiddleware(object):
70 """Captures exceptions in Falcon requests and send to Sentry"""
71
72 def process_request(self, req, resp, *args, **kwargs):
73 # type: (Any, Any, *Any, **Any) -> None
74 hub = Hub.current
75 integration = hub.get_integration(FalconIntegration)
76 if integration is None:
77 return
78
79 with hub.configure_scope() as scope:
80 scope._name = "falcon"
81 scope.add_event_processor(_make_request_event_processor(req, integration))
82
83
84 TRANSACTION_STYLE_VALUES = ("uri_template", "path")
85
86
87 class FalconIntegration(Integration):
88 identifier = "falcon"
89
90 transaction_style = None
91
92 def __init__(self, transaction_style="uri_template"):
93 # type: (str) -> None
94 if transaction_style not in TRANSACTION_STYLE_VALUES:
95 raise ValueError(
96 "Invalid value for transaction_style: %s (must be in %s)"
97 % (transaction_style, TRANSACTION_STYLE_VALUES)
98 )
99 self.transaction_style = transaction_style
100
101 @staticmethod
102 def setup_once():
103 # type: () -> None
104 try:
105 version = tuple(map(int, FALCON_VERSION.split(".")))
106 except (ValueError, TypeError):
107 raise DidNotEnable("Unparsable Falcon version: {}".format(FALCON_VERSION))
108
109 if version < (1, 4):
110 raise DidNotEnable("Falcon 1.4 or newer required.")
111
112 _patch_wsgi_app()
113 _patch_handle_exception()
114 _patch_prepare_middleware()
115
116
117 def _patch_wsgi_app():
118 # type: () -> None
119 original_wsgi_app = falcon.API.__call__
120
121 def sentry_patched_wsgi_app(self, env, start_response):
122 # type: (falcon.API, Any, Any) -> Any
123 hub = Hub.current
124 integration = hub.get_integration(FalconIntegration)
125 if integration is None:
126 return original_wsgi_app(self, env, start_response)
127
128 sentry_wrapped = SentryWsgiMiddleware(
129 lambda envi, start_resp: original_wsgi_app(self, envi, start_resp)
130 )
131
132 return sentry_wrapped(env, start_response)
133
134 falcon.API.__call__ = sentry_patched_wsgi_app
135
136
137 def _patch_handle_exception():
138 # type: () -> None
139 original_handle_exception = falcon.API._handle_exception
140
141 def sentry_patched_handle_exception(self, *args):
142 # type: (falcon.API, *Any) -> Any
143 # NOTE(jmagnusson): falcon 2.0 changed falcon.API._handle_exception
144 # method signature from `(ex, req, resp, params)` to
145 # `(req, resp, ex, params)`
146 if isinstance(args[0], Exception):
147 ex = args[0]
148 else:
149 ex = args[2]
150
151 was_handled = original_handle_exception(self, *args)
152
153 hub = Hub.current
154 integration = hub.get_integration(FalconIntegration)
155
156 if integration is not None and not _is_falcon_http_error(ex):
157 # If an integration is there, a client has to be there.
158 client = hub.client # type: Any
159
160 event, hint = event_from_exception(
161 ex,
162 client_options=client.options,
163 mechanism={"type": "falcon", "handled": False},
164 )
165 hub.capture_event(event, hint=hint)
166
167 return was_handled
168
169 falcon.API._handle_exception = sentry_patched_handle_exception
170
171
172 def _patch_prepare_middleware():
173 # type: () -> None
174 original_prepare_middleware = falcon.api_helpers.prepare_middleware
175
176 def sentry_patched_prepare_middleware(
177 middleware=None, independent_middleware=False
178 ):
179 # type: (Any, Any) -> Any
180 hub = Hub.current
181 integration = hub.get_integration(FalconIntegration)
182 if integration is not None:
183 middleware = [SentryFalconMiddleware()] + (middleware or [])
184 return original_prepare_middleware(middleware, independent_middleware)
185
186 falcon.api_helpers.prepare_middleware = sentry_patched_prepare_middleware
187
188
189 def _is_falcon_http_error(ex):
190 # type: (BaseException) -> bool
191 return isinstance(ex, (falcon.HTTPError, falcon.http_status.HTTPStatus))
192
193
194 def _make_request_event_processor(req, integration):
195 # type: (falcon.Request, FalconIntegration) -> EventProcessor
196
197 def inner(event, hint):
198 # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
199 if integration.transaction_style == "uri_template":
200 event["transaction"] = req.uri_template
201 elif integration.transaction_style == "path":
202 event["transaction"] = req.path
203
204 with capture_internal_exceptions():
205 FalconRequestExtractor(req).extract_into_event(event)
206
207 return event
208
209 return inner
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/integrations/falcon.py b/sentry_sdk/integrations/falcon.py
--- a/sentry_sdk/integrations/falcon.py
+++ b/sentry_sdk/integrations/falcon.py
@@ -153,7 +153,7 @@
hub = Hub.current
integration = hub.get_integration(FalconIntegration)
- if integration is not None and not _is_falcon_http_error(ex):
+ if integration is not None and _exception_leads_to_http_5xx(ex):
# If an integration is there, a client has to be there.
client = hub.client # type: Any
@@ -186,9 +186,15 @@
falcon.api_helpers.prepare_middleware = sentry_patched_prepare_middleware
-def _is_falcon_http_error(ex):
- # type: (BaseException) -> bool
- return isinstance(ex, (falcon.HTTPError, falcon.http_status.HTTPStatus))
+def _exception_leads_to_http_5xx(ex):
+ # type: (Exception) -> bool
+ is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or "").startswith(
+ "5"
+ )
+ is_unhandled_error = not isinstance(
+ ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)
+ )
+ return is_server_error or is_unhandled_error
def _make_request_event_processor(req, integration):
| {"golden_diff": "diff --git a/sentry_sdk/integrations/falcon.py b/sentry_sdk/integrations/falcon.py\n--- a/sentry_sdk/integrations/falcon.py\n+++ b/sentry_sdk/integrations/falcon.py\n@@ -153,7 +153,7 @@\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n \n- if integration is not None and not _is_falcon_http_error(ex):\n+ if integration is not None and _exception_leads_to_http_5xx(ex):\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n \n@@ -186,9 +186,15 @@\n falcon.api_helpers.prepare_middleware = sentry_patched_prepare_middleware\n \n \n-def _is_falcon_http_error(ex):\n- # type: (BaseException) -> bool\n- return isinstance(ex, (falcon.HTTPError, falcon.http_status.HTTPStatus))\n+def _exception_leads_to_http_5xx(ex):\n+ # type: (Exception) -> bool\n+ is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or \"\").startswith(\n+ \"5\"\n+ )\n+ is_unhandled_error = not isinstance(\n+ ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)\n+ )\n+ return is_server_error or is_unhandled_error\n \n \n def _make_request_event_processor(req, integration):\n", "issue": "Falcon sentry integration catch all http errors\nHi,\r\nseems that the falcon integration catches all the http errors and not just the 500 http error code as the documentation says, https://docs.sentry.io/platforms/python/falcon/#behavior\r\n\r\nhttps://github.com/getsentry/sentry-python/blob/7d482b5bfa1d4f58eb090818496eba8fee8e63aa/sentry_sdk/integrations/falcon.py#L156\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration, DidNotEnable\nfrom sentry_sdk.integrations._wsgi_common import RequestExtractor\nfrom sentry_sdk.integrations.wsgi import SentryWsgiMiddleware\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Dict\n from typing import Optional\n\n from sentry_sdk._types import EventProcessor\n\ntry:\n import falcon # type: ignore\n import falcon.api_helpers # type: ignore\n\n from falcon import __version__ as FALCON_VERSION\nexcept ImportError:\n raise DidNotEnable(\"Falcon not installed\")\n\n\nclass FalconRequestExtractor(RequestExtractor):\n def env(self):\n # type: () -> Dict[str, Any]\n return self.request.env\n\n def cookies(self):\n # type: () -> Dict[str, Any]\n return self.request.cookies\n\n def form(self):\n # type: () -> None\n return None # No such concept in Falcon\n\n def files(self):\n # type: () -> None\n return None # No such concept in Falcon\n\n def raw_data(self):\n # type: () -> Optional[str]\n\n # As request data can only be read once we won't make this available\n # to Sentry. Just send back a dummy string in case there was a\n # content length.\n # TODO(jmagnusson): Figure out if there's a way to support this\n content_length = self.content_length()\n if content_length > 0:\n return \"[REQUEST_CONTAINING_RAW_DATA]\"\n else:\n return None\n\n def json(self):\n # type: () -> Optional[Dict[str, Any]]\n try:\n return self.request.media\n except falcon.errors.HTTPBadRequest:\n # NOTE(jmagnusson): We return `falcon.Request._media` here because\n # falcon 1.4 doesn't do proper type checking in\n # `falcon.Request.media`. This has been fixed in 2.0.\n # Relevant code: https://github.com/falconry/falcon/blob/1.4.1/falcon/request.py#L953\n return self.request._media\n\n\nclass SentryFalconMiddleware(object):\n \"\"\"Captures exceptions in Falcon requests and send to Sentry\"\"\"\n\n def process_request(self, req, resp, *args, **kwargs):\n # type: (Any, Any, *Any, **Any) -> None\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n if integration is None:\n return\n\n with hub.configure_scope() as scope:\n scope._name = \"falcon\"\n scope.add_event_processor(_make_request_event_processor(req, integration))\n\n\nTRANSACTION_STYLE_VALUES = (\"uri_template\", \"path\")\n\n\nclass FalconIntegration(Integration):\n identifier = \"falcon\"\n\n transaction_style = None\n\n def __init__(self, transaction_style=\"uri_template\"):\n # type: (str) -> None\n if transaction_style not in TRANSACTION_STYLE_VALUES:\n raise ValueError(\n \"Invalid value for transaction_style: %s (must be in %s)\"\n % (transaction_style, TRANSACTION_STYLE_VALUES)\n )\n self.transaction_style = transaction_style\n\n @staticmethod\n def setup_once():\n # type: () -> None\n try:\n version = tuple(map(int, FALCON_VERSION.split(\".\")))\n except (ValueError, TypeError):\n raise DidNotEnable(\"Unparsable Falcon version: {}\".format(FALCON_VERSION))\n\n if version < (1, 4):\n raise DidNotEnable(\"Falcon 1.4 or newer required.\")\n\n _patch_wsgi_app()\n _patch_handle_exception()\n _patch_prepare_middleware()\n\n\ndef _patch_wsgi_app():\n # type: () -> None\n original_wsgi_app = falcon.API.__call__\n\n def sentry_patched_wsgi_app(self, env, start_response):\n # type: (falcon.API, Any, Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n if integration is None:\n return original_wsgi_app(self, env, start_response)\n\n sentry_wrapped = SentryWsgiMiddleware(\n lambda envi, start_resp: original_wsgi_app(self, envi, start_resp)\n )\n\n return sentry_wrapped(env, start_response)\n\n falcon.API.__call__ = sentry_patched_wsgi_app\n\n\ndef _patch_handle_exception():\n # type: () -> None\n original_handle_exception = falcon.API._handle_exception\n\n def sentry_patched_handle_exception(self, *args):\n # type: (falcon.API, *Any) -> Any\n # NOTE(jmagnusson): falcon 2.0 changed falcon.API._handle_exception\n # method signature from `(ex, req, resp, params)` to\n # `(req, resp, ex, params)`\n if isinstance(args[0], Exception):\n ex = args[0]\n else:\n ex = args[2]\n\n was_handled = original_handle_exception(self, *args)\n\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n\n if integration is not None and not _is_falcon_http_error(ex):\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n ex,\n client_options=client.options,\n mechanism={\"type\": \"falcon\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n return was_handled\n\n falcon.API._handle_exception = sentry_patched_handle_exception\n\n\ndef _patch_prepare_middleware():\n # type: () -> None\n original_prepare_middleware = falcon.api_helpers.prepare_middleware\n\n def sentry_patched_prepare_middleware(\n middleware=None, independent_middleware=False\n ):\n # type: (Any, Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n if integration is not None:\n middleware = [SentryFalconMiddleware()] + (middleware or [])\n return original_prepare_middleware(middleware, independent_middleware)\n\n falcon.api_helpers.prepare_middleware = sentry_patched_prepare_middleware\n\n\ndef _is_falcon_http_error(ex):\n # type: (BaseException) -> bool\n return isinstance(ex, (falcon.HTTPError, falcon.http_status.HTTPStatus))\n\n\ndef _make_request_event_processor(req, integration):\n # type: (falcon.Request, FalconIntegration) -> EventProcessor\n\n def inner(event, hint):\n # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]\n if integration.transaction_style == \"uri_template\":\n event[\"transaction\"] = req.uri_template\n elif integration.transaction_style == \"path\":\n event[\"transaction\"] = req.path\n\n with capture_internal_exceptions():\n FalconRequestExtractor(req).extract_into_event(event)\n\n return event\n\n return inner\n", "path": "sentry_sdk/integrations/falcon.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration, DidNotEnable\nfrom sentry_sdk.integrations._wsgi_common import RequestExtractor\nfrom sentry_sdk.integrations.wsgi import SentryWsgiMiddleware\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Dict\n from typing import Optional\n\n from sentry_sdk._types import EventProcessor\n\ntry:\n import falcon # type: ignore\n import falcon.api_helpers # type: ignore\n\n from falcon import __version__ as FALCON_VERSION\nexcept ImportError:\n raise DidNotEnable(\"Falcon not installed\")\n\n\nclass FalconRequestExtractor(RequestExtractor):\n def env(self):\n # type: () -> Dict[str, Any]\n return self.request.env\n\n def cookies(self):\n # type: () -> Dict[str, Any]\n return self.request.cookies\n\n def form(self):\n # type: () -> None\n return None # No such concept in Falcon\n\n def files(self):\n # type: () -> None\n return None # No such concept in Falcon\n\n def raw_data(self):\n # type: () -> Optional[str]\n\n # As request data can only be read once we won't make this available\n # to Sentry. Just send back a dummy string in case there was a\n # content length.\n # TODO(jmagnusson): Figure out if there's a way to support this\n content_length = self.content_length()\n if content_length > 0:\n return \"[REQUEST_CONTAINING_RAW_DATA]\"\n else:\n return None\n\n def json(self):\n # type: () -> Optional[Dict[str, Any]]\n try:\n return self.request.media\n except falcon.errors.HTTPBadRequest:\n # NOTE(jmagnusson): We return `falcon.Request._media` here because\n # falcon 1.4 doesn't do proper type checking in\n # `falcon.Request.media`. This has been fixed in 2.0.\n # Relevant code: https://github.com/falconry/falcon/blob/1.4.1/falcon/request.py#L953\n return self.request._media\n\n\nclass SentryFalconMiddleware(object):\n \"\"\"Captures exceptions in Falcon requests and send to Sentry\"\"\"\n\n def process_request(self, req, resp, *args, **kwargs):\n # type: (Any, Any, *Any, **Any) -> None\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n if integration is None:\n return\n\n with hub.configure_scope() as scope:\n scope._name = \"falcon\"\n scope.add_event_processor(_make_request_event_processor(req, integration))\n\n\nTRANSACTION_STYLE_VALUES = (\"uri_template\", \"path\")\n\n\nclass FalconIntegration(Integration):\n identifier = \"falcon\"\n\n transaction_style = None\n\n def __init__(self, transaction_style=\"uri_template\"):\n # type: (str) -> None\n if transaction_style not in TRANSACTION_STYLE_VALUES:\n raise ValueError(\n \"Invalid value for transaction_style: %s (must be in %s)\"\n % (transaction_style, TRANSACTION_STYLE_VALUES)\n )\n self.transaction_style = transaction_style\n\n @staticmethod\n def setup_once():\n # type: () -> None\n try:\n version = tuple(map(int, FALCON_VERSION.split(\".\")))\n except (ValueError, TypeError):\n raise DidNotEnable(\"Unparsable Falcon version: {}\".format(FALCON_VERSION))\n\n if version < (1, 4):\n raise DidNotEnable(\"Falcon 1.4 or newer required.\")\n\n _patch_wsgi_app()\n _patch_handle_exception()\n _patch_prepare_middleware()\n\n\ndef _patch_wsgi_app():\n # type: () -> None\n original_wsgi_app = falcon.API.__call__\n\n def sentry_patched_wsgi_app(self, env, start_response):\n # type: (falcon.API, Any, Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n if integration is None:\n return original_wsgi_app(self, env, start_response)\n\n sentry_wrapped = SentryWsgiMiddleware(\n lambda envi, start_resp: original_wsgi_app(self, envi, start_resp)\n )\n\n return sentry_wrapped(env, start_response)\n\n falcon.API.__call__ = sentry_patched_wsgi_app\n\n\ndef _patch_handle_exception():\n # type: () -> None\n original_handle_exception = falcon.API._handle_exception\n\n def sentry_patched_handle_exception(self, *args):\n # type: (falcon.API, *Any) -> Any\n # NOTE(jmagnusson): falcon 2.0 changed falcon.API._handle_exception\n # method signature from `(ex, req, resp, params)` to\n # `(req, resp, ex, params)`\n if isinstance(args[0], Exception):\n ex = args[0]\n else:\n ex = args[2]\n\n was_handled = original_handle_exception(self, *args)\n\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n\n if integration is not None and _exception_leads_to_http_5xx(ex):\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n ex,\n client_options=client.options,\n mechanism={\"type\": \"falcon\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n return was_handled\n\n falcon.API._handle_exception = sentry_patched_handle_exception\n\n\ndef _patch_prepare_middleware():\n # type: () -> None\n original_prepare_middleware = falcon.api_helpers.prepare_middleware\n\n def sentry_patched_prepare_middleware(\n middleware=None, independent_middleware=False\n ):\n # type: (Any, Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n if integration is not None:\n middleware = [SentryFalconMiddleware()] + (middleware or [])\n return original_prepare_middleware(middleware, independent_middleware)\n\n falcon.api_helpers.prepare_middleware = sentry_patched_prepare_middleware\n\n\ndef _exception_leads_to_http_5xx(ex):\n # type: (Exception) -> bool\n is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or \"\").startswith(\n \"5\"\n )\n is_unhandled_error = not isinstance(\n ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)\n )\n return is_server_error or is_unhandled_error\n\n\ndef _make_request_event_processor(req, integration):\n # type: (falcon.Request, FalconIntegration) -> EventProcessor\n\n def inner(event, hint):\n # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]\n if integration.transaction_style == \"uri_template\":\n event[\"transaction\"] = req.uri_template\n elif integration.transaction_style == \"path\":\n event[\"transaction\"] = req.path\n\n with capture_internal_exceptions():\n FalconRequestExtractor(req).extract_into_event(event)\n\n return event\n\n return inner\n", "path": "sentry_sdk/integrations/falcon.py"}]} | 2,485 | 321 |
gh_patches_debug_54057 | rasdani/github-patches | git_diff | docker__docker-py-1819 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Log message not replacing string format placeholder
[This line](https://github.com/docker/docker-py/blob/e9fab1432b974ceaa888b371e382dfcf2f6556e4/docker/auth.py#L205) in the `parse_auth` function in `auth.py` is using a string format placeholder but is not calling `format` on the string to replace it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/auth.py`
Content:
```
1 import base64
2 import json
3 import logging
4 import os
5
6 import dockerpycreds
7 import six
8
9 from . import errors
10 from .constants import IS_WINDOWS_PLATFORM
11
12 INDEX_NAME = 'docker.io'
13 INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
14 DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
15 LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
16 TOKEN_USERNAME = '<token>'
17
18 log = logging.getLogger(__name__)
19
20
21 def resolve_repository_name(repo_name):
22 if '://' in repo_name:
23 raise errors.InvalidRepository(
24 'Repository name cannot contain a scheme ({0})'.format(repo_name)
25 )
26
27 index_name, remote_name = split_repo_name(repo_name)
28 if index_name[0] == '-' or index_name[-1] == '-':
29 raise errors.InvalidRepository(
30 'Invalid index name ({0}). Cannot begin or end with a'
31 ' hyphen.'.format(index_name)
32 )
33 return resolve_index_name(index_name), remote_name
34
35
36 def resolve_index_name(index_name):
37 index_name = convert_to_hostname(index_name)
38 if index_name == 'index.' + INDEX_NAME:
39 index_name = INDEX_NAME
40 return index_name
41
42
43 def get_config_header(client, registry):
44 log.debug('Looking for auth config')
45 if not client._auth_configs:
46 log.debug(
47 "No auth config in memory - loading from filesystem"
48 )
49 client._auth_configs = load_config()
50 authcfg = resolve_authconfig(client._auth_configs, registry)
51 # Do not fail here if no authentication exists for this
52 # specific registry as we can have a readonly pull. Just
53 # put the header if we can.
54 if authcfg:
55 log.debug('Found auth config')
56 # auth_config needs to be a dict in the format used by
57 # auth.py username , password, serveraddress, email
58 return encode_header(authcfg)
59 log.debug('No auth config found')
60 return None
61
62
63 def split_repo_name(repo_name):
64 parts = repo_name.split('/', 1)
65 if len(parts) == 1 or (
66 '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
67 ):
68 # This is a docker index repo (ex: username/foobar or ubuntu)
69 return INDEX_NAME, repo_name
70 return tuple(parts)
71
72
73 def get_credential_store(authconfig, registry):
74 if not registry or registry == INDEX_NAME:
75 registry = 'https://index.docker.io/v1/'
76
77 return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(
78 'credsStore'
79 )
80
81
82 def resolve_authconfig(authconfig, registry=None):
83 """
84 Returns the authentication data from the given auth configuration for a
85 specific registry. As with the Docker client, legacy entries in the config
86 with full URLs are stripped down to hostnames before checking for a match.
87 Returns None if no match was found.
88 """
89
90 if 'credHelpers' in authconfig or 'credsStore' in authconfig:
91 store_name = get_credential_store(authconfig, registry)
92 if store_name is not None:
93 log.debug(
94 'Using credentials store "{0}"'.format(store_name)
95 )
96 return _resolve_authconfig_credstore(
97 authconfig, registry, store_name
98 )
99
100 # Default to the public index server
101 registry = resolve_index_name(registry) if registry else INDEX_NAME
102 log.debug("Looking for auth entry for {0}".format(repr(registry)))
103
104 if registry in authconfig:
105 log.debug("Found {0}".format(repr(registry)))
106 return authconfig[registry]
107
108 for key, config in six.iteritems(authconfig):
109 if resolve_index_name(key) == registry:
110 log.debug("Found {0}".format(repr(key)))
111 return config
112
113 log.debug("No entry found")
114 return None
115
116
117 def _resolve_authconfig_credstore(authconfig, registry, credstore_name):
118 if not registry or registry == INDEX_NAME:
119 # The ecosystem is a little schizophrenic with index.docker.io VS
120 # docker.io - in that case, it seems the full URL is necessary.
121 registry = INDEX_URL
122 log.debug("Looking for auth entry for {0}".format(repr(registry)))
123 store = dockerpycreds.Store(credstore_name)
124 try:
125 data = store.get(registry)
126 res = {
127 'ServerAddress': registry,
128 }
129 if data['Username'] == TOKEN_USERNAME:
130 res['IdentityToken'] = data['Secret']
131 else:
132 res.update({
133 'Username': data['Username'],
134 'Password': data['Secret'],
135 })
136 return res
137 except dockerpycreds.CredentialsNotFound as e:
138 log.debug('No entry found')
139 return None
140 except dockerpycreds.StoreError as e:
141 raise errors.DockerException(
142 'Credentials store error: {0}'.format(repr(e))
143 )
144
145
146 def convert_to_hostname(url):
147 return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
148
149
150 def decode_auth(auth):
151 if isinstance(auth, six.string_types):
152 auth = auth.encode('ascii')
153 s = base64.b64decode(auth)
154 login, pwd = s.split(b':', 1)
155 return login.decode('utf8'), pwd.decode('utf8')
156
157
158 def encode_header(auth):
159 auth_json = json.dumps(auth).encode('ascii')
160 return base64.urlsafe_b64encode(auth_json)
161
162
163 def parse_auth(entries, raise_on_error=False):
164 """
165 Parses authentication entries
166
167 Args:
168 entries: Dict of authentication entries.
169 raise_on_error: If set to true, an invalid format will raise
170 InvalidConfigFile
171
172 Returns:
173 Authentication registry.
174 """
175
176 conf = {}
177 for registry, entry in six.iteritems(entries):
178 if not isinstance(entry, dict):
179 log.debug(
180 'Config entry for key {0} is not auth config'.format(registry)
181 )
182 # We sometimes fall back to parsing the whole config as if it was
183 # the auth config by itself, for legacy purposes. In that case, we
184 # fail silently and return an empty conf if any of the keys is not
185 # formatted properly.
186 if raise_on_error:
187 raise errors.InvalidConfigFile(
188 'Invalid configuration for registry {0}'.format(registry)
189 )
190 return {}
191 if 'identitytoken' in entry:
192 log.debug('Found an IdentityToken entry for registry {0}'.format(
193 registry
194 ))
195 conf[registry] = {
196 'IdentityToken': entry['identitytoken']
197 }
198 continue # Other values are irrelevant if we have a token, skip.
199
200 if 'auth' not in entry:
201 # Starting with engine v1.11 (API 1.23), an empty dictionary is
202 # a valid value in the auths config.
203 # https://github.com/docker/compose/issues/3265
204 log.debug(
205 'Auth data for {0} is absent. Client might be using a '
206 'credentials store instead.'
207 )
208 conf[registry] = {}
209 continue
210
211 username, password = decode_auth(entry['auth'])
212 log.debug(
213 'Found entry (registry={0}, username={1})'
214 .format(repr(registry), repr(username))
215 )
216
217 conf[registry] = {
218 'username': username,
219 'password': password,
220 'email': entry.get('email'),
221 'serveraddress': registry,
222 }
223 return conf
224
225
226 def find_config_file(config_path=None):
227 paths = list(filter(None, [
228 config_path, # 1
229 config_path_from_environment(), # 2
230 os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
231 os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
232 ]))
233
234 log.debug("Trying paths: {0}".format(repr(paths)))
235
236 for path in paths:
237 if os.path.exists(path):
238 log.debug("Found file at path: {0}".format(path))
239 return path
240
241 log.debug("No config file found")
242
243 return None
244
245
246 def config_path_from_environment():
247 config_dir = os.environ.get('DOCKER_CONFIG')
248 if not config_dir:
249 return None
250 return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
251
252
253 def home_dir():
254 """
255 Get the user's home directory, using the same logic as the Docker Engine
256 client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
257 """
258 if IS_WINDOWS_PLATFORM:
259 return os.environ.get('USERPROFILE', '')
260 else:
261 return os.path.expanduser('~')
262
263
264 def load_config(config_path=None):
265 """
266 Loads authentication data from a Docker configuration file in the given
267 root directory or if config_path is passed use given path.
268 Lookup priority:
269 explicit config_path parameter > DOCKER_CONFIG environment variable >
270 ~/.docker/config.json > ~/.dockercfg
271 """
272 config_file = find_config_file(config_path)
273
274 if not config_file:
275 return {}
276
277 try:
278 with open(config_file) as f:
279 data = json.load(f)
280 res = {}
281 if data.get('auths'):
282 log.debug("Found 'auths' section")
283 res.update(parse_auth(data['auths'], raise_on_error=True))
284 if data.get('HttpHeaders'):
285 log.debug("Found 'HttpHeaders' section")
286 res.update({'HttpHeaders': data['HttpHeaders']})
287 if data.get('credsStore'):
288 log.debug("Found 'credsStore' section")
289 res.update({'credsStore': data['credsStore']})
290 if data.get('credHelpers'):
291 log.debug("Found 'credHelpers' section")
292 res.update({'credHelpers': data['credHelpers']})
293 if res:
294 return res
295 else:
296 log.debug("Couldn't find 'auths' or 'HttpHeaders' sections")
297 f.seek(0)
298 return parse_auth(json.load(f))
299 except (IOError, KeyError, ValueError) as e:
300 # Likely missing new Docker config file or it's in an
301 # unknown format, continue to attempt to read old location
302 # and format.
303 log.debug(e)
304
305 log.debug("Attempting to parse legacy auth file format")
306 try:
307 data = []
308 with open(config_file) as f:
309 for line in f.readlines():
310 data.append(line.strip().split(' = ')[1])
311 if len(data) < 2:
312 # Not enough data
313 raise errors.InvalidConfigFile(
314 'Invalid or empty configuration file!'
315 )
316
317 username, password = decode_auth(data[0])
318 return {
319 INDEX_NAME: {
320 'username': username,
321 'password': password,
322 'email': data[1],
323 'serveraddress': INDEX_URL,
324 }
325 }
326 except Exception as e:
327 log.debug(e)
328 pass
329
330 log.debug("All parsing attempts failed - returning empty config")
331 return {}
332
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/auth.py b/docker/auth.py
--- a/docker/auth.py
+++ b/docker/auth.py
@@ -203,7 +203,7 @@
# https://github.com/docker/compose/issues/3265
log.debug(
'Auth data for {0} is absent. Client might be using a '
- 'credentials store instead.'
+ 'credentials store instead.'.format(registry)
)
conf[registry] = {}
continue
| {"golden_diff": "diff --git a/docker/auth.py b/docker/auth.py\n--- a/docker/auth.py\n+++ b/docker/auth.py\n@@ -203,7 +203,7 @@\n # https://github.com/docker/compose/issues/3265\n log.debug(\n 'Auth data for {0} is absent. Client might be using a '\n- 'credentials store instead.'\n+ 'credentials store instead.'.format(registry)\n )\n conf[registry] = {}\n continue\n", "issue": "Log message not replacing string format placeholder\n[This line](https://github.com/docker/docker-py/blob/e9fab1432b974ceaa888b371e382dfcf2f6556e4/docker/auth.py#L205) in the `parse_auth` function in `auth.py` is using a string format placeholder but is not calling `format` on the string to replace it.\n", "before_files": [{"content": "import base64\nimport json\nimport logging\nimport os\n\nimport dockerpycreds\nimport six\n\nfrom . import errors\nfrom .constants import IS_WINDOWS_PLATFORM\n\nINDEX_NAME = 'docker.io'\nINDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)\nDOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')\nLEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'\nTOKEN_USERNAME = '<token>'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name):\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name)\n )\n\n index_name, remote_name = split_repo_name(repo_name)\n if index_name[0] == '-' or index_name[-1] == '-':\n raise errors.InvalidRepository(\n 'Invalid index name ({0}). Cannot begin or end with a'\n ' hyphen.'.format(index_name)\n )\n return resolve_index_name(index_name), remote_name\n\n\ndef resolve_index_name(index_name):\n index_name = convert_to_hostname(index_name)\n if index_name == 'index.' + INDEX_NAME:\n index_name = INDEX_NAME\n return index_name\n\n\ndef get_config_header(client, registry):\n log.debug('Looking for auth config')\n if not client._auth_configs:\n log.debug(\n \"No auth config in memory - loading from filesystem\"\n )\n client._auth_configs = load_config()\n authcfg = resolve_authconfig(client._auth_configs, registry)\n # Do not fail here if no authentication exists for this\n # specific registry as we can have a readonly pull. Just\n # put the header if we can.\n if authcfg:\n log.debug('Found auth config')\n # auth_config needs to be a dict in the format used by\n # auth.py username , password, serveraddress, email\n return encode_header(authcfg)\n log.debug('No auth config found')\n return None\n\n\ndef split_repo_name(repo_name):\n parts = repo_name.split('/', 1)\n if len(parts) == 1 or (\n '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'\n ):\n # This is a docker index repo (ex: username/foobar or ubuntu)\n return INDEX_NAME, repo_name\n return tuple(parts)\n\n\ndef get_credential_store(authconfig, registry):\n if not registry or registry == INDEX_NAME:\n registry = 'https://index.docker.io/v1/'\n\n return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(\n 'credsStore'\n )\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n\n if 'credHelpers' in authconfig or 'credsStore' in authconfig:\n store_name = get_credential_store(authconfig, registry)\n if store_name is not None:\n log.debug(\n 'Using credentials store \"{0}\"'.format(store_name)\n )\n return _resolve_authconfig_credstore(\n authconfig, registry, store_name\n )\n\n # Default to the public index server\n registry = resolve_index_name(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n if registry in authconfig:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authconfig[registry]\n\n for key, config in six.iteritems(authconfig):\n if resolve_index_name(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return config\n\n log.debug(\"No entry found\")\n return None\n\n\ndef _resolve_authconfig_credstore(authconfig, registry, credstore_name):\n if not registry or registry == INDEX_NAME:\n # The ecosystem is a little schizophrenic with index.docker.io VS\n # docker.io - in that case, it seems the full URL is necessary.\n registry = INDEX_URL\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n store = dockerpycreds.Store(credstore_name)\n try:\n data = store.get(registry)\n res = {\n 'ServerAddress': registry,\n }\n if data['Username'] == TOKEN_USERNAME:\n res['IdentityToken'] = data['Secret']\n else:\n res.update({\n 'Username': data['Username'],\n 'Password': data['Secret'],\n })\n return res\n except dockerpycreds.CredentialsNotFound as e:\n log.debug('No entry found')\n return None\n except dockerpycreds.StoreError as e:\n raise errors.DockerException(\n 'Credentials store error: {0}'.format(repr(e))\n )\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('utf8'), pwd.decode('utf8')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries, raise_on_error=False):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n raise_on_error: If set to true, an invalid format will raise\n InvalidConfigFile\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n if not isinstance(entry, dict):\n log.debug(\n 'Config entry for key {0} is not auth config'.format(registry)\n )\n # We sometimes fall back to parsing the whole config as if it was\n # the auth config by itself, for legacy purposes. In that case, we\n # fail silently and return an empty conf if any of the keys is not\n # formatted properly.\n if raise_on_error:\n raise errors.InvalidConfigFile(\n 'Invalid configuration for registry {0}'.format(registry)\n )\n return {}\n if 'identitytoken' in entry:\n log.debug('Found an IdentityToken entry for registry {0}'.format(\n registry\n ))\n conf[registry] = {\n 'IdentityToken': entry['identitytoken']\n }\n continue # Other values are irrelevant if we have a token, skip.\n\n if 'auth' not in entry:\n # Starting with engine v1.11 (API 1.23), an empty dictionary is\n # a valid value in the auths config.\n # https://github.com/docker/compose/issues/3265\n log.debug(\n 'Auth data for {0} is absent. Client might be using a '\n 'credentials store instead.'\n )\n conf[registry] = {}\n continue\n\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry.get('email'),\n 'serveraddress': registry,\n }\n return conf\n\n\ndef find_config_file(config_path=None):\n paths = list(filter(None, [\n config_path, # 1\n config_path_from_environment(), # 2\n os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3\n os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4\n ]))\n\n log.debug(\"Trying paths: {0}\".format(repr(paths)))\n\n for path in paths:\n if os.path.exists(path):\n log.debug(\"Found file at path: {0}\".format(path))\n return path\n\n log.debug(\"No config file found\")\n\n return None\n\n\ndef config_path_from_environment():\n config_dir = os.environ.get('DOCKER_CONFIG')\n if not config_dir:\n return None\n return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))\n\n\ndef home_dir():\n \"\"\"\n Get the user's home directory, using the same logic as the Docker Engine\n client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.\n \"\"\"\n if IS_WINDOWS_PLATFORM:\n return os.environ.get('USERPROFILE', '')\n else:\n return os.path.expanduser('~')\n\n\ndef load_config(config_path=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n config_file = find_config_file(config_path)\n\n if not config_file:\n return {}\n\n try:\n with open(config_file) as f:\n data = json.load(f)\n res = {}\n if data.get('auths'):\n log.debug(\"Found 'auths' section\")\n res.update(parse_auth(data['auths'], raise_on_error=True))\n if data.get('HttpHeaders'):\n log.debug(\"Found 'HttpHeaders' section\")\n res.update({'HttpHeaders': data['HttpHeaders']})\n if data.get('credsStore'):\n log.debug(\"Found 'credsStore' section\")\n res.update({'credsStore': data['credsStore']})\n if data.get('credHelpers'):\n log.debug(\"Found 'credHelpers' section\")\n res.update({'credHelpers': data['credHelpers']})\n if res:\n return res\n else:\n log.debug(\"Couldn't find 'auths' or 'HttpHeaders' sections\")\n f.seek(0)\n return parse_auth(json.load(f))\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth.py"}], "after_files": [{"content": "import base64\nimport json\nimport logging\nimport os\n\nimport dockerpycreds\nimport six\n\nfrom . import errors\nfrom .constants import IS_WINDOWS_PLATFORM\n\nINDEX_NAME = 'docker.io'\nINDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)\nDOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')\nLEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'\nTOKEN_USERNAME = '<token>'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name):\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name)\n )\n\n index_name, remote_name = split_repo_name(repo_name)\n if index_name[0] == '-' or index_name[-1] == '-':\n raise errors.InvalidRepository(\n 'Invalid index name ({0}). Cannot begin or end with a'\n ' hyphen.'.format(index_name)\n )\n return resolve_index_name(index_name), remote_name\n\n\ndef resolve_index_name(index_name):\n index_name = convert_to_hostname(index_name)\n if index_name == 'index.' + INDEX_NAME:\n index_name = INDEX_NAME\n return index_name\n\n\ndef get_config_header(client, registry):\n log.debug('Looking for auth config')\n if not client._auth_configs:\n log.debug(\n \"No auth config in memory - loading from filesystem\"\n )\n client._auth_configs = load_config()\n authcfg = resolve_authconfig(client._auth_configs, registry)\n # Do not fail here if no authentication exists for this\n # specific registry as we can have a readonly pull. Just\n # put the header if we can.\n if authcfg:\n log.debug('Found auth config')\n # auth_config needs to be a dict in the format used by\n # auth.py username , password, serveraddress, email\n return encode_header(authcfg)\n log.debug('No auth config found')\n return None\n\n\ndef split_repo_name(repo_name):\n parts = repo_name.split('/', 1)\n if len(parts) == 1 or (\n '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'\n ):\n # This is a docker index repo (ex: username/foobar or ubuntu)\n return INDEX_NAME, repo_name\n return tuple(parts)\n\n\ndef get_credential_store(authconfig, registry):\n if not registry or registry == INDEX_NAME:\n registry = 'https://index.docker.io/v1/'\n\n return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(\n 'credsStore'\n )\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n\n if 'credHelpers' in authconfig or 'credsStore' in authconfig:\n store_name = get_credential_store(authconfig, registry)\n if store_name is not None:\n log.debug(\n 'Using credentials store \"{0}\"'.format(store_name)\n )\n return _resolve_authconfig_credstore(\n authconfig, registry, store_name\n )\n\n # Default to the public index server\n registry = resolve_index_name(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n if registry in authconfig:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authconfig[registry]\n\n for key, config in six.iteritems(authconfig):\n if resolve_index_name(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return config\n\n log.debug(\"No entry found\")\n return None\n\n\ndef _resolve_authconfig_credstore(authconfig, registry, credstore_name):\n if not registry or registry == INDEX_NAME:\n # The ecosystem is a little schizophrenic with index.docker.io VS\n # docker.io - in that case, it seems the full URL is necessary.\n registry = INDEX_URL\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n store = dockerpycreds.Store(credstore_name)\n try:\n data = store.get(registry)\n res = {\n 'ServerAddress': registry,\n }\n if data['Username'] == TOKEN_USERNAME:\n res['IdentityToken'] = data['Secret']\n else:\n res.update({\n 'Username': data['Username'],\n 'Password': data['Secret'],\n })\n return res\n except dockerpycreds.CredentialsNotFound as e:\n log.debug('No entry found')\n return None\n except dockerpycreds.StoreError as e:\n raise errors.DockerException(\n 'Credentials store error: {0}'.format(repr(e))\n )\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('utf8'), pwd.decode('utf8')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries, raise_on_error=False):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n raise_on_error: If set to true, an invalid format will raise\n InvalidConfigFile\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n if not isinstance(entry, dict):\n log.debug(\n 'Config entry for key {0} is not auth config'.format(registry)\n )\n # We sometimes fall back to parsing the whole config as if it was\n # the auth config by itself, for legacy purposes. In that case, we\n # fail silently and return an empty conf if any of the keys is not\n # formatted properly.\n if raise_on_error:\n raise errors.InvalidConfigFile(\n 'Invalid configuration for registry {0}'.format(registry)\n )\n return {}\n if 'identitytoken' in entry:\n log.debug('Found an IdentityToken entry for registry {0}'.format(\n registry\n ))\n conf[registry] = {\n 'IdentityToken': entry['identitytoken']\n }\n continue # Other values are irrelevant if we have a token, skip.\n\n if 'auth' not in entry:\n # Starting with engine v1.11 (API 1.23), an empty dictionary is\n # a valid value in the auths config.\n # https://github.com/docker/compose/issues/3265\n log.debug(\n 'Auth data for {0} is absent. Client might be using a '\n 'credentials store instead.'.format(registry)\n )\n conf[registry] = {}\n continue\n\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry.get('email'),\n 'serveraddress': registry,\n }\n return conf\n\n\ndef find_config_file(config_path=None):\n paths = list(filter(None, [\n config_path, # 1\n config_path_from_environment(), # 2\n os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3\n os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4\n ]))\n\n log.debug(\"Trying paths: {0}\".format(repr(paths)))\n\n for path in paths:\n if os.path.exists(path):\n log.debug(\"Found file at path: {0}\".format(path))\n return path\n\n log.debug(\"No config file found\")\n\n return None\n\n\ndef config_path_from_environment():\n config_dir = os.environ.get('DOCKER_CONFIG')\n if not config_dir:\n return None\n return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))\n\n\ndef home_dir():\n \"\"\"\n Get the user's home directory, using the same logic as the Docker Engine\n client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.\n \"\"\"\n if IS_WINDOWS_PLATFORM:\n return os.environ.get('USERPROFILE', '')\n else:\n return os.path.expanduser('~')\n\n\ndef load_config(config_path=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n config_file = find_config_file(config_path)\n\n if not config_file:\n return {}\n\n try:\n with open(config_file) as f:\n data = json.load(f)\n res = {}\n if data.get('auths'):\n log.debug(\"Found 'auths' section\")\n res.update(parse_auth(data['auths'], raise_on_error=True))\n if data.get('HttpHeaders'):\n log.debug(\"Found 'HttpHeaders' section\")\n res.update({'HttpHeaders': data['HttpHeaders']})\n if data.get('credsStore'):\n log.debug(\"Found 'credsStore' section\")\n res.update({'credsStore': data['credsStore']})\n if data.get('credHelpers'):\n log.debug(\"Found 'credHelpers' section\")\n res.update({'credHelpers': data['credHelpers']})\n if res:\n return res\n else:\n log.debug(\"Couldn't find 'auths' or 'HttpHeaders' sections\")\n f.seek(0)\n return parse_auth(json.load(f))\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth.py"}]} | 3,644 | 105 |
gh_patches_debug_36993 | rasdani/github-patches | git_diff | sanic-org__sanic-917 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sanic exceptions
How does sanic exceptions are supposed to work? The docs states that
> Exceptions can be thrown from within request handlers and will automatically be handled by Sanic. Exceptions take a message as their first argument, and can also take a status code to be passed back in the HTTP response.
This is my route
```python
@app.route("/")
async def test(request):
abort(401)
```
When I make a request on the path I get a response of :
>Internal Server Error
The server encountered an internal error and cannot complete your request.
`2017-08-24 10:18:43 - (sanic)[ERROR]: Traceback (most recent call last):
File "/home/nikos/.virtualenvs/3.6/lib/python3.6/site-packages/sanic/app.py", line 503, in handle_request
response = await response
File "/home/nikos/Desktop/Side Projects/micro/test2.py", line 15, in test
abort(401)
File "/home/nikos/.virtualenvs/3.6/lib/python3.6/site-packages/sanic/exceptions.py", line 262, in abort
raise sanic_exception(message=message, status_code=status_code)
TypeError: __init__() missing 1 required positional argument: 'scheme'`
Also after a bit the connection times out and the log trace is
`2017-08-24 10:18:43 - (network)[INFO][127.0.0.1:34734]: GET http://0.0.0.0:8001/ 500 144
2017-08-24 10:19:43 - (sanic)[ERROR]: Traceback (most recent call last):
File "/home/nikos/.virtualenvs/3.6/lib/python3.6/site-packages/sanic/server.py", line 143, in connection_timeout
raise RequestTimeout('Request Timeout')
sanic.exceptions.RequestTimeout: Request Timeout`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/exceptions.py`
Content:
```
1 from sanic.response import ALL_STATUS_CODES, COMMON_STATUS_CODES
2
3 TRACEBACK_STYLE = '''
4 <style>
5 body {
6 padding: 20px;
7 font-family: Arial, sans-serif;
8 }
9
10 p {
11 margin: 0;
12 }
13
14 .summary {
15 padding: 10px;
16 }
17
18 h1 {
19 margin-bottom: 0;
20 }
21
22 h3 {
23 margin-top: 10px;
24 }
25
26 h3 code {
27 font-size: 24px;
28 }
29
30 .frame-line > * {
31 padding: 5px 10px;
32 }
33
34 .frame-line {
35 margin-bottom: 5px;
36 }
37
38 .frame-code {
39 font-size: 16px;
40 padding-left: 30px;
41 }
42
43 .tb-wrapper {
44 border: 1px solid #f3f3f3;
45 }
46
47 .tb-header {
48 background-color: #f3f3f3;
49 padding: 5px 10px;
50 }
51
52 .tb-border {
53 padding-top: 20px;
54 }
55
56 .frame-descriptor {
57 background-color: #e2eafb;
58 }
59
60 .frame-descriptor {
61 font-size: 14px;
62 }
63 </style>
64 '''
65
66 TRACEBACK_WRAPPER_HTML = '''
67 <html>
68 <head>
69 {style}
70 </head>
71 <body>
72 {inner_html}
73 <div class="summary">
74 <p>
75 <b>{exc_name}: {exc_value}</b>
76 while handling path <code>{path}</code>
77 </p>
78 </div>
79 </body>
80 </html>
81 '''
82
83 TRACEBACK_WRAPPER_INNER_HTML = '''
84 <h1>{exc_name}</h1>
85 <h3><code>{exc_value}</code></h3>
86 <div class="tb-wrapper">
87 <p class="tb-header">Traceback (most recent call last):</p>
88 {frame_html}
89 </div>
90 '''
91
92 TRACEBACK_BORDER = '''
93 <div class="tb-border">
94 <b><i>
95 The above exception was the direct cause of the
96 following exception:
97 </i></b>
98 </div>
99 '''
100
101 TRACEBACK_LINE_HTML = '''
102 <div class="frame-line">
103 <p class="frame-descriptor">
104 File {0.filename}, line <i>{0.lineno}</i>,
105 in <code><b>{0.name}</b></code>
106 </p>
107 <p class="frame-code"><code>{0.line}</code></p>
108 </div>
109 '''
110
111 INTERNAL_SERVER_ERROR_HTML = '''
112 <h1>Internal Server Error</h1>
113 <p>
114 The server encountered an internal error and cannot complete
115 your request.
116 </p>
117 '''
118
119
120 _sanic_exceptions = {}
121
122
123 def add_status_code(code):
124 """
125 Decorator used for adding exceptions to _sanic_exceptions.
126 """
127 def class_decorator(cls):
128 cls.status_code = code
129 _sanic_exceptions[code] = cls
130 return cls
131 return class_decorator
132
133
134 class SanicException(Exception):
135
136 def __init__(self, message, status_code=None):
137 super().__init__(message)
138
139 if status_code is not None:
140 self.status_code = status_code
141
142
143 @add_status_code(404)
144 class NotFound(SanicException):
145 pass
146
147
148 @add_status_code(400)
149 class InvalidUsage(SanicException):
150 pass
151
152
153 @add_status_code(500)
154 class ServerError(SanicException):
155 pass
156
157
158 class URLBuildError(ServerError):
159 pass
160
161
162 class FileNotFound(NotFound):
163 pass
164
165 def __init__(self, message, path, relative_url):
166 super().__init__(message)
167 self.path = path
168 self.relative_url = relative_url
169
170
171 @add_status_code(408)
172 class RequestTimeout(SanicException):
173 pass
174
175
176 @add_status_code(413)
177 class PayloadTooLarge(SanicException):
178 pass
179
180
181 class HeaderNotFound(InvalidUsage):
182 pass
183
184
185 @add_status_code(416)
186 class ContentRangeError(SanicException):
187 pass
188
189 def __init__(self, message, content_range):
190 super().__init__(message)
191 self.headers = {
192 'Content-Type': 'text/plain',
193 "Content-Range": "bytes */%s" % (content_range.total,)
194 }
195
196
197 @add_status_code(403)
198 class Forbidden(SanicException):
199 pass
200
201
202 class InvalidRangeType(ContentRangeError):
203 pass
204
205
206 @add_status_code(401)
207 class Unauthorized(SanicException):
208 """
209 Unauthorized exception (401 HTTP status code).
210
211 :param message: Message describing the exception.
212 :param scheme: Name of the authentication scheme to be used.
213
214 When present, kwargs is used to complete the WWW-Authentication header.
215
216 Examples::
217
218 # With a Basic auth-scheme, realm MUST be present:
219 raise Unauthorized("Auth required.", "Basic", realm="Restricted Area")
220
221 # With a Digest auth-scheme, things are a bit more complicated:
222 raise Unauthorized("Auth required.",
223 "Digest",
224 realm="Restricted Area",
225 qop="auth, auth-int",
226 algorithm="MD5",
227 nonce="abcdef",
228 opaque="zyxwvu")
229
230 # With a Bearer auth-scheme, realm is optional so you can write:
231 raise Unauthorized("Auth required.", "Bearer")
232
233 # or, if you want to specify the realm:
234 raise Unauthorized("Auth required.", "Bearer", realm="Restricted Area")
235 """
236 def __init__(self, message, scheme, **kwargs):
237 super().__init__(message)
238
239 values = ["{!s}={!r}".format(k, v) for k, v in kwargs.items()]
240 challenge = ', '.join(values)
241
242 self.headers = {
243 "WWW-Authenticate": "{} {}".format(scheme, challenge).rstrip()
244 }
245
246
247 def abort(status_code, message=None):
248 """
249 Raise an exception based on SanicException. Returns the HTTP response
250 message appropriate for the given status code, unless provided.
251
252 :param status_code: The HTTP status code to return.
253 :param message: The HTTP response body. Defaults to the messages
254 in response.py for the given status code.
255 """
256 if message is None:
257 message = COMMON_STATUS_CODES.get(status_code,
258 ALL_STATUS_CODES.get(status_code))
259 # These are stored as bytes in the STATUS_CODES dict
260 message = message.decode('utf8')
261 sanic_exception = _sanic_exceptions.get(status_code, SanicException)
262 raise sanic_exception(message=message, status_code=status_code)
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sanic/exceptions.py b/sanic/exceptions.py
--- a/sanic/exceptions.py
+++ b/sanic/exceptions.py
@@ -209,6 +209,7 @@
Unauthorized exception (401 HTTP status code).
:param message: Message describing the exception.
+ :param status_code: HTTP Status code.
:param scheme: Name of the authentication scheme to be used.
When present, kwargs is used to complete the WWW-Authentication header.
@@ -216,11 +217,13 @@
Examples::
# With a Basic auth-scheme, realm MUST be present:
- raise Unauthorized("Auth required.", "Basic", realm="Restricted Area")
+ raise Unauthorized("Auth required.",
+ scheme="Basic",
+ realm="Restricted Area")
# With a Digest auth-scheme, things are a bit more complicated:
raise Unauthorized("Auth required.",
- "Digest",
+ scheme="Digest",
realm="Restricted Area",
qop="auth, auth-int",
algorithm="MD5",
@@ -228,20 +231,24 @@
opaque="zyxwvu")
# With a Bearer auth-scheme, realm is optional so you can write:
- raise Unauthorized("Auth required.", "Bearer")
+ raise Unauthorized("Auth required.", scheme="Bearer")
# or, if you want to specify the realm:
- raise Unauthorized("Auth required.", "Bearer", realm="Restricted Area")
+ raise Unauthorized("Auth required.",
+ scheme="Bearer",
+ realm="Restricted Area")
"""
- def __init__(self, message, scheme, **kwargs):
- super().__init__(message)
+ def __init__(self, message, status_code=None, scheme=None, **kwargs):
+ super().__init__(message, status_code)
- values = ["{!s}={!r}".format(k, v) for k, v in kwargs.items()]
- challenge = ', '.join(values)
+ # if auth-scheme is specified, set "WWW-Authenticate" header
+ if scheme is not None:
+ values = ["{!s}={!r}".format(k, v) for k, v in kwargs.items()]
+ challenge = ', '.join(values)
- self.headers = {
- "WWW-Authenticate": "{} {}".format(scheme, challenge).rstrip()
- }
+ self.headers = {
+ "WWW-Authenticate": "{} {}".format(scheme, challenge).rstrip()
+ }
def abort(status_code, message=None):
| {"golden_diff": "diff --git a/sanic/exceptions.py b/sanic/exceptions.py\n--- a/sanic/exceptions.py\n+++ b/sanic/exceptions.py\n@@ -209,6 +209,7 @@\n Unauthorized exception (401 HTTP status code).\n \n :param message: Message describing the exception.\n+ :param status_code: HTTP Status code.\n :param scheme: Name of the authentication scheme to be used.\n \n When present, kwargs is used to complete the WWW-Authentication header.\n@@ -216,11 +217,13 @@\n Examples::\n \n # With a Basic auth-scheme, realm MUST be present:\n- raise Unauthorized(\"Auth required.\", \"Basic\", realm=\"Restricted Area\")\n+ raise Unauthorized(\"Auth required.\",\n+ scheme=\"Basic\",\n+ realm=\"Restricted Area\")\n \n # With a Digest auth-scheme, things are a bit more complicated:\n raise Unauthorized(\"Auth required.\",\n- \"Digest\",\n+ scheme=\"Digest\",\n realm=\"Restricted Area\",\n qop=\"auth, auth-int\",\n algorithm=\"MD5\",\n@@ -228,20 +231,24 @@\n opaque=\"zyxwvu\")\n \n # With a Bearer auth-scheme, realm is optional so you can write:\n- raise Unauthorized(\"Auth required.\", \"Bearer\")\n+ raise Unauthorized(\"Auth required.\", scheme=\"Bearer\")\n \n # or, if you want to specify the realm:\n- raise Unauthorized(\"Auth required.\", \"Bearer\", realm=\"Restricted Area\")\n+ raise Unauthorized(\"Auth required.\",\n+ scheme=\"Bearer\",\n+ realm=\"Restricted Area\")\n \"\"\"\n- def __init__(self, message, scheme, **kwargs):\n- super().__init__(message)\n+ def __init__(self, message, status_code=None, scheme=None, **kwargs):\n+ super().__init__(message, status_code)\n \n- values = [\"{!s}={!r}\".format(k, v) for k, v in kwargs.items()]\n- challenge = ', '.join(values)\n+ # if auth-scheme is specified, set \"WWW-Authenticate\" header\n+ if scheme is not None:\n+ values = [\"{!s}={!r}\".format(k, v) for k, v in kwargs.items()]\n+ challenge = ', '.join(values)\n \n- self.headers = {\n- \"WWW-Authenticate\": \"{} {}\".format(scheme, challenge).rstrip()\n- }\n+ self.headers = {\n+ \"WWW-Authenticate\": \"{} {}\".format(scheme, challenge).rstrip()\n+ }\n \n \n def abort(status_code, message=None):\n", "issue": "Sanic exceptions\nHow does sanic exceptions are supposed to work? The docs states that \r\n\r\n> Exceptions can be thrown from within request handlers and will automatically be handled by Sanic. Exceptions take a message as their first argument, and can also take a status code to be passed back in the HTTP response.\r\n\r\nThis is my route\r\n```python\r\[email protected](\"/\")\r\nasync def test(request):\r\n abort(401)\r\n```\r\n\r\nWhen I make a request on the path I get a response of : \r\n\r\n>Internal Server Error\r\nThe server encountered an internal error and cannot complete your request.\r\n\r\n`2017-08-24 10:18:43 - (sanic)[ERROR]: Traceback (most recent call last):\r\n File \"/home/nikos/.virtualenvs/3.6/lib/python3.6/site-packages/sanic/app.py\", line 503, in handle_request\r\n response = await response\r\n File \"/home/nikos/Desktop/Side Projects/micro/test2.py\", line 15, in test\r\n abort(401)\r\n File \"/home/nikos/.virtualenvs/3.6/lib/python3.6/site-packages/sanic/exceptions.py\", line 262, in abort\r\n raise sanic_exception(message=message, status_code=status_code)\r\nTypeError: __init__() missing 1 required positional argument: 'scheme'`\r\n\r\nAlso after a bit the connection times out and the log trace is \r\n\r\n`2017-08-24 10:18:43 - (network)[INFO][127.0.0.1:34734]: GET http://0.0.0.0:8001/ 500 144\r\n2017-08-24 10:19:43 - (sanic)[ERROR]: Traceback (most recent call last):\r\n File \"/home/nikos/.virtualenvs/3.6/lib/python3.6/site-packages/sanic/server.py\", line 143, in connection_timeout\r\n raise RequestTimeout('Request Timeout')\r\nsanic.exceptions.RequestTimeout: Request Timeout`\n", "before_files": [{"content": "from sanic.response import ALL_STATUS_CODES, COMMON_STATUS_CODES\n\nTRACEBACK_STYLE = '''\n <style>\n body {\n padding: 20px;\n font-family: Arial, sans-serif;\n }\n\n p {\n margin: 0;\n }\n\n .summary {\n padding: 10px;\n }\n\n h1 {\n margin-bottom: 0;\n }\n\n h3 {\n margin-top: 10px;\n }\n\n h3 code {\n font-size: 24px;\n }\n\n .frame-line > * {\n padding: 5px 10px;\n }\n\n .frame-line {\n margin-bottom: 5px;\n }\n\n .frame-code {\n font-size: 16px;\n padding-left: 30px;\n }\n\n .tb-wrapper {\n border: 1px solid #f3f3f3;\n }\n\n .tb-header {\n background-color: #f3f3f3;\n padding: 5px 10px;\n }\n\n .tb-border {\n padding-top: 20px;\n }\n\n .frame-descriptor {\n background-color: #e2eafb;\n }\n\n .frame-descriptor {\n font-size: 14px;\n }\n </style>\n'''\n\nTRACEBACK_WRAPPER_HTML = '''\n <html>\n <head>\n {style}\n </head>\n <body>\n {inner_html}\n <div class=\"summary\">\n <p>\n <b>{exc_name}: {exc_value}</b>\n while handling path <code>{path}</code>\n </p>\n </div>\n </body>\n </html>\n'''\n\nTRACEBACK_WRAPPER_INNER_HTML = '''\n <h1>{exc_name}</h1>\n <h3><code>{exc_value}</code></h3>\n <div class=\"tb-wrapper\">\n <p class=\"tb-header\">Traceback (most recent call last):</p>\n {frame_html}\n </div>\n'''\n\nTRACEBACK_BORDER = '''\n <div class=\"tb-border\">\n <b><i>\n The above exception was the direct cause of the\n following exception:\n </i></b>\n </div>\n'''\n\nTRACEBACK_LINE_HTML = '''\n <div class=\"frame-line\">\n <p class=\"frame-descriptor\">\n File {0.filename}, line <i>{0.lineno}</i>,\n in <code><b>{0.name}</b></code>\n </p>\n <p class=\"frame-code\"><code>{0.line}</code></p>\n </div>\n'''\n\nINTERNAL_SERVER_ERROR_HTML = '''\n <h1>Internal Server Error</h1>\n <p>\n The server encountered an internal error and cannot complete\n your request.\n </p>\n'''\n\n\n_sanic_exceptions = {}\n\n\ndef add_status_code(code):\n \"\"\"\n Decorator used for adding exceptions to _sanic_exceptions.\n \"\"\"\n def class_decorator(cls):\n cls.status_code = code\n _sanic_exceptions[code] = cls\n return cls\n return class_decorator\n\n\nclass SanicException(Exception):\n\n def __init__(self, message, status_code=None):\n super().__init__(message)\n\n if status_code is not None:\n self.status_code = status_code\n\n\n@add_status_code(404)\nclass NotFound(SanicException):\n pass\n\n\n@add_status_code(400)\nclass InvalidUsage(SanicException):\n pass\n\n\n@add_status_code(500)\nclass ServerError(SanicException):\n pass\n\n\nclass URLBuildError(ServerError):\n pass\n\n\nclass FileNotFound(NotFound):\n pass\n\n def __init__(self, message, path, relative_url):\n super().__init__(message)\n self.path = path\n self.relative_url = relative_url\n\n\n@add_status_code(408)\nclass RequestTimeout(SanicException):\n pass\n\n\n@add_status_code(413)\nclass PayloadTooLarge(SanicException):\n pass\n\n\nclass HeaderNotFound(InvalidUsage):\n pass\n\n\n@add_status_code(416)\nclass ContentRangeError(SanicException):\n pass\n\n def __init__(self, message, content_range):\n super().__init__(message)\n self.headers = {\n 'Content-Type': 'text/plain',\n \"Content-Range\": \"bytes */%s\" % (content_range.total,)\n }\n\n\n@add_status_code(403)\nclass Forbidden(SanicException):\n pass\n\n\nclass InvalidRangeType(ContentRangeError):\n pass\n\n\n@add_status_code(401)\nclass Unauthorized(SanicException):\n \"\"\"\n Unauthorized exception (401 HTTP status code).\n\n :param message: Message describing the exception.\n :param scheme: Name of the authentication scheme to be used.\n\n When present, kwargs is used to complete the WWW-Authentication header.\n\n Examples::\n\n # With a Basic auth-scheme, realm MUST be present:\n raise Unauthorized(\"Auth required.\", \"Basic\", realm=\"Restricted Area\")\n\n # With a Digest auth-scheme, things are a bit more complicated:\n raise Unauthorized(\"Auth required.\",\n \"Digest\",\n realm=\"Restricted Area\",\n qop=\"auth, auth-int\",\n algorithm=\"MD5\",\n nonce=\"abcdef\",\n opaque=\"zyxwvu\")\n\n # With a Bearer auth-scheme, realm is optional so you can write:\n raise Unauthorized(\"Auth required.\", \"Bearer\")\n\n # or, if you want to specify the realm:\n raise Unauthorized(\"Auth required.\", \"Bearer\", realm=\"Restricted Area\")\n \"\"\"\n def __init__(self, message, scheme, **kwargs):\n super().__init__(message)\n\n values = [\"{!s}={!r}\".format(k, v) for k, v in kwargs.items()]\n challenge = ', '.join(values)\n\n self.headers = {\n \"WWW-Authenticate\": \"{} {}\".format(scheme, challenge).rstrip()\n }\n\n\ndef abort(status_code, message=None):\n \"\"\"\n Raise an exception based on SanicException. Returns the HTTP response\n message appropriate for the given status code, unless provided.\n\n :param status_code: The HTTP status code to return.\n :param message: The HTTP response body. Defaults to the messages\n in response.py for the given status code.\n \"\"\"\n if message is None:\n message = COMMON_STATUS_CODES.get(status_code,\n ALL_STATUS_CODES.get(status_code))\n # These are stored as bytes in the STATUS_CODES dict\n message = message.decode('utf8')\n sanic_exception = _sanic_exceptions.get(status_code, SanicException)\n raise sanic_exception(message=message, status_code=status_code)\n", "path": "sanic/exceptions.py"}], "after_files": [{"content": "from sanic.response import ALL_STATUS_CODES, COMMON_STATUS_CODES\n\nTRACEBACK_STYLE = '''\n <style>\n body {\n padding: 20px;\n font-family: Arial, sans-serif;\n }\n\n p {\n margin: 0;\n }\n\n .summary {\n padding: 10px;\n }\n\n h1 {\n margin-bottom: 0;\n }\n\n h3 {\n margin-top: 10px;\n }\n\n h3 code {\n font-size: 24px;\n }\n\n .frame-line > * {\n padding: 5px 10px;\n }\n\n .frame-line {\n margin-bottom: 5px;\n }\n\n .frame-code {\n font-size: 16px;\n padding-left: 30px;\n }\n\n .tb-wrapper {\n border: 1px solid #f3f3f3;\n }\n\n .tb-header {\n background-color: #f3f3f3;\n padding: 5px 10px;\n }\n\n .tb-border {\n padding-top: 20px;\n }\n\n .frame-descriptor {\n background-color: #e2eafb;\n }\n\n .frame-descriptor {\n font-size: 14px;\n }\n </style>\n'''\n\nTRACEBACK_WRAPPER_HTML = '''\n <html>\n <head>\n {style}\n </head>\n <body>\n {inner_html}\n <div class=\"summary\">\n <p>\n <b>{exc_name}: {exc_value}</b>\n while handling path <code>{path}</code>\n </p>\n </div>\n </body>\n </html>\n'''\n\nTRACEBACK_WRAPPER_INNER_HTML = '''\n <h1>{exc_name}</h1>\n <h3><code>{exc_value}</code></h3>\n <div class=\"tb-wrapper\">\n <p class=\"tb-header\">Traceback (most recent call last):</p>\n {frame_html}\n </div>\n'''\n\nTRACEBACK_BORDER = '''\n <div class=\"tb-border\">\n <b><i>\n The above exception was the direct cause of the\n following exception:\n </i></b>\n </div>\n'''\n\nTRACEBACK_LINE_HTML = '''\n <div class=\"frame-line\">\n <p class=\"frame-descriptor\">\n File {0.filename}, line <i>{0.lineno}</i>,\n in <code><b>{0.name}</b></code>\n </p>\n <p class=\"frame-code\"><code>{0.line}</code></p>\n </div>\n'''\n\nINTERNAL_SERVER_ERROR_HTML = '''\n <h1>Internal Server Error</h1>\n <p>\n The server encountered an internal error and cannot complete\n your request.\n </p>\n'''\n\n\n_sanic_exceptions = {}\n\n\ndef add_status_code(code):\n \"\"\"\n Decorator used for adding exceptions to _sanic_exceptions.\n \"\"\"\n def class_decorator(cls):\n cls.status_code = code\n _sanic_exceptions[code] = cls\n return cls\n return class_decorator\n\n\nclass SanicException(Exception):\n\n def __init__(self, message, status_code=None):\n super().__init__(message)\n\n if status_code is not None:\n self.status_code = status_code\n\n\n@add_status_code(404)\nclass NotFound(SanicException):\n pass\n\n\n@add_status_code(400)\nclass InvalidUsage(SanicException):\n pass\n\n\n@add_status_code(500)\nclass ServerError(SanicException):\n pass\n\n\nclass URLBuildError(ServerError):\n pass\n\n\nclass FileNotFound(NotFound):\n pass\n\n def __init__(self, message, path, relative_url):\n super().__init__(message)\n self.path = path\n self.relative_url = relative_url\n\n\n@add_status_code(408)\nclass RequestTimeout(SanicException):\n pass\n\n\n@add_status_code(413)\nclass PayloadTooLarge(SanicException):\n pass\n\n\nclass HeaderNotFound(InvalidUsage):\n pass\n\n\n@add_status_code(416)\nclass ContentRangeError(SanicException):\n pass\n\n def __init__(self, message, content_range):\n super().__init__(message)\n self.headers = {\n 'Content-Type': 'text/plain',\n \"Content-Range\": \"bytes */%s\" % (content_range.total,)\n }\n\n\n@add_status_code(403)\nclass Forbidden(SanicException):\n pass\n\n\nclass InvalidRangeType(ContentRangeError):\n pass\n\n\n@add_status_code(401)\nclass Unauthorized(SanicException):\n \"\"\"\n Unauthorized exception (401 HTTP status code).\n\n :param message: Message describing the exception.\n :param status_code: HTTP Status code.\n :param scheme: Name of the authentication scheme to be used.\n\n When present, kwargs is used to complete the WWW-Authentication header.\n\n Examples::\n\n # With a Basic auth-scheme, realm MUST be present:\n raise Unauthorized(\"Auth required.\",\n scheme=\"Basic\",\n realm=\"Restricted Area\")\n\n # With a Digest auth-scheme, things are a bit more complicated:\n raise Unauthorized(\"Auth required.\",\n scheme=\"Digest\",\n realm=\"Restricted Area\",\n qop=\"auth, auth-int\",\n algorithm=\"MD5\",\n nonce=\"abcdef\",\n opaque=\"zyxwvu\")\n\n # With a Bearer auth-scheme, realm is optional so you can write:\n raise Unauthorized(\"Auth required.\", scheme=\"Bearer\")\n\n # or, if you want to specify the realm:\n raise Unauthorized(\"Auth required.\",\n scheme=\"Bearer\",\n realm=\"Restricted Area\")\n \"\"\"\n def __init__(self, message, status_code=None, scheme=None, **kwargs):\n super().__init__(message, status_code)\n\n # if auth-scheme is specified, set \"WWW-Authenticate\" header\n if scheme is not None:\n values = [\"{!s}={!r}\".format(k, v) for k, v in kwargs.items()]\n challenge = ', '.join(values)\n\n self.headers = {\n \"WWW-Authenticate\": \"{} {}\".format(scheme, challenge).rstrip()\n }\n\n\ndef abort(status_code, message=None):\n \"\"\"\n Raise an exception based on SanicException. Returns the HTTP response\n message appropriate for the given status code, unless provided.\n\n :param status_code: The HTTP status code to return.\n :param message: The HTTP response body. Defaults to the messages\n in response.py for the given status code.\n \"\"\"\n if message is None:\n message = COMMON_STATUS_CODES.get(status_code,\n ALL_STATUS_CODES.get(status_code))\n # These are stored as bytes in the STATUS_CODES dict\n message = message.decode('utf8')\n sanic_exception = _sanic_exceptions.get(status_code, SanicException)\n raise sanic_exception(message=message, status_code=status_code)\n", "path": "sanic/exceptions.py"}]} | 2,883 | 565 |
gh_patches_debug_25263 | rasdani/github-patches | git_diff | pallets__werkzeug-2468 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
2.2.0 breaks werkzeug.routing.ValidationError import
```
(venv-test) $ pip install werkzeug
Collecting werkzeug
Downloading Werkzeug-2.2.0-py3-none-any.whl (232 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 232.2/232.2 KB 3.5 MB/s eta 0:00:00
Collecting MarkupSafe>=2.1.1
Using cached MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (25 kB)
Installing collected packages: MarkupSafe, werkzeug
Successfully installed MarkupSafe-2.1.1 werkzeug-2.2.0
(venv-test) $ python
Python 3.10.4 (main, Jun 29 2022, 12:14:53) [GCC 11.2.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> from werkzeug.routing import ValidationError
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: cannot import name 'ValidationError' from 'werkzeug.routing' (/tmp/venv-test/lib/python3.10/site-packages/werkzeug/routing/__init__.py)
```
This is a documented API for 2.2, e.g. https://werkzeug.palletsprojects.com/en/2.2.x/routing/#custom-converters example code.
Environment:
- Python version: 3.10
- Werkzeug version: 2.2.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/werkzeug/routing/__init__.py`
Content:
```
1 """When it comes to combining multiple controller or view functions
2 (however you want to call them) you need a dispatcher. A simple way
3 would be applying regular expression tests on the ``PATH_INFO`` and
4 calling registered callback functions that return the value then.
5
6 This module implements a much more powerful system than simple regular
7 expression matching because it can also convert values in the URLs and
8 build URLs.
9
10 Here a simple example that creates a URL map for an application with
11 two subdomains (www and kb) and some URL rules:
12
13 .. code-block:: python
14
15 m = Map([
16 # Static URLs
17 Rule('/', endpoint='static/index'),
18 Rule('/about', endpoint='static/about'),
19 Rule('/help', endpoint='static/help'),
20 # Knowledge Base
21 Subdomain('kb', [
22 Rule('/', endpoint='kb/index'),
23 Rule('/browse/', endpoint='kb/browse'),
24 Rule('/browse/<int:id>/', endpoint='kb/browse'),
25 Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
26 ])
27 ], default_subdomain='www')
28
29 If the application doesn't use subdomains it's perfectly fine to not set
30 the default subdomain and not use the `Subdomain` rule factory. The
31 endpoint in the rules can be anything, for example import paths or
32 unique identifiers. The WSGI application can use those endpoints to get the
33 handler for that URL. It doesn't have to be a string at all but it's
34 recommended.
35
36 Now it's possible to create a URL adapter for one of the subdomains and
37 build URLs:
38
39 .. code-block:: python
40
41 c = m.bind('example.com')
42
43 c.build("kb/browse", dict(id=42))
44 'http://kb.example.com/browse/42/'
45
46 c.build("kb/browse", dict())
47 'http://kb.example.com/browse/'
48
49 c.build("kb/browse", dict(id=42, page=3))
50 'http://kb.example.com/browse/42/3'
51
52 c.build("static/about")
53 '/about'
54
55 c.build("static/index", force_external=True)
56 'http://www.example.com/'
57
58 c = m.bind('example.com', subdomain='kb')
59
60 c.build("static/about")
61 'http://www.example.com/about'
62
63 The first argument to bind is the server name *without* the subdomain.
64 Per default it will assume that the script is mounted on the root, but
65 often that's not the case so you can provide the real mount point as
66 second argument:
67
68 .. code-block:: python
69
70 c = m.bind('example.com', '/applications/example')
71
72 The third argument can be the subdomain, if not given the default
73 subdomain is used. For more details about binding have a look at the
74 documentation of the `MapAdapter`.
75
76 And here is how you can match URLs:
77
78 .. code-block:: python
79
80 c = m.bind('example.com')
81
82 c.match("/")
83 ('static/index', {})
84
85 c.match("/about")
86 ('static/about', {})
87
88 c = m.bind('example.com', '/', 'kb')
89
90 c.match("/")
91 ('kb/index', {})
92
93 c.match("/browse/42/23")
94 ('kb/browse', {'id': 42, 'page': 23})
95
96 If matching fails you get a ``NotFound`` exception, if the rule thinks
97 it's a good idea to redirect (for example because the URL was defined
98 to have a slash at the end but the request was missing that slash) it
99 will raise a ``RequestRedirect`` exception. Both are subclasses of
100 ``HTTPException`` so you can use those errors as responses in the
101 application.
102
103 If matching succeeded but the URL rule was incompatible to the given
104 method (for example there were only rules for ``GET`` and ``HEAD`` but
105 routing tried to match a ``POST`` request) a ``MethodNotAllowed``
106 exception is raised.
107 """
108 from .converters import AnyConverter
109 from .converters import BaseConverter
110 from .converters import FloatConverter
111 from .converters import IntegerConverter
112 from .converters import PathConverter
113 from .converters import UnicodeConverter
114 from .converters import UUIDConverter
115 from .exceptions import BuildError
116 from .exceptions import NoMatch
117 from .exceptions import RequestAliasRedirect
118 from .exceptions import RequestPath
119 from .exceptions import RequestRedirect
120 from .exceptions import RoutingException
121 from .exceptions import WebsocketMismatch
122 from .map import Map
123 from .map import MapAdapter
124 from .matcher import StateMachineMatcher
125 from .rules import EndpointPrefix
126 from .rules import parse_converter_args
127 from .rules import Rule
128 from .rules import RuleFactory
129 from .rules import RuleTemplate
130 from .rules import RuleTemplateFactory
131 from .rules import Subdomain
132 from .rules import Submount
133
```
Path: `src/werkzeug/routing/matcher.py`
Content:
```
1 import re
2 import typing as t
3 from dataclasses import dataclass
4 from dataclasses import field
5
6 from .converters import ValidationError
7 from .exceptions import NoMatch
8 from .exceptions import RequestAliasRedirect
9 from .exceptions import RequestPath
10 from .rules import Rule
11 from .rules import RulePart
12
13
14 class SlashRequired(Exception):
15 pass
16
17
18 @dataclass
19 class State:
20 """A representation of a rule state.
21
22 This includes the *rules* that correspond to the state and the
23 possible *static* and *dynamic* transitions to the next state.
24 """
25
26 dynamic: t.List[t.Tuple[RulePart, "State"]] = field(default_factory=list)
27 rules: t.List[Rule] = field(default_factory=list)
28 static: t.Dict[str, "State"] = field(default_factory=dict)
29
30
31 class StateMachineMatcher:
32 def __init__(self, merge_slashes: bool) -> None:
33 self._root = State()
34 self.merge_slashes = merge_slashes
35
36 def add(self, rule: Rule) -> None:
37 state = self._root
38 for part in rule._parts:
39 if part.static:
40 state.static.setdefault(part.content, State())
41 state = state.static[part.content]
42 else:
43 for test_part, new_state in state.dynamic:
44 if test_part == part:
45 state = new_state
46 break
47 else:
48 new_state = State()
49 state.dynamic.append((part, new_state))
50 state = new_state
51 state.rules.append(rule)
52
53 def update(self) -> None:
54 # For every state the dynamic transitions should be sorted by
55 # the weight of the transition
56 state = self._root
57
58 def _update_state(state: State) -> None:
59 state.dynamic.sort(key=lambda entry: entry[0].weight)
60 for new_state in state.static.values():
61 _update_state(new_state)
62 for _, new_state in state.dynamic:
63 _update_state(new_state)
64
65 _update_state(state)
66
67 def match(
68 self, domain: str, path: str, method: str, websocket: bool
69 ) -> t.Tuple[Rule, t.MutableMapping[str, t.Any]]:
70 # To match to a rule we need to start at the root state and
71 # try to follow the transitions until we find a match, or find
72 # there is no transition to follow.
73
74 have_match_for = set()
75 websocket_mismatch = False
76
77 def _match(
78 state: State, parts: t.List[str], values: t.List[str]
79 ) -> t.Optional[t.Tuple[Rule, t.List[str]]]:
80 # This function is meant to be called recursively, and will attempt
81 # to match the head part to the state's transitions.
82 nonlocal have_match_for, websocket_mismatch
83
84 # The base case is when all parts have been matched via
85 # transitions. Hence if there is a rule with methods &
86 # websocket that work return it and the dynamic values
87 # extracted.
88 if parts == []:
89 for rule in state.rules:
90 if rule.methods is not None and method not in rule.methods:
91 have_match_for.update(rule.methods)
92 elif rule.websocket != websocket:
93 websocket_mismatch = True
94 else:
95 return rule, values
96
97 # Test if there is a match with this path with a
98 # trailing slash, if so raise an exception to report
99 # that matching is possible with an additional slash
100 if "" in state.static:
101 for rule in state.static[""].rules:
102 if (
103 rule.strict_slashes
104 and websocket == rule.websocket
105 and (rule.methods is None or method in rule.methods)
106 ):
107 raise SlashRequired()
108 return None
109
110 part = parts[0]
111 # To match this part try the static transitions first
112 if part in state.static:
113 rv = _match(state.static[part], parts[1:], values)
114 if rv is not None:
115 return rv
116 # No match via the static transitions, so try the dynamic
117 # ones.
118 for test_part, new_state in state.dynamic:
119 target = part
120 remaining = parts[1:]
121 # A final part indicates a transition that always
122 # consumes the remaining parts i.e. transitions to a
123 # final state.
124 if test_part.final:
125 target = "/".join(parts)
126 remaining = []
127 match = re.compile(test_part.content).match(target)
128 if match is not None:
129 rv = _match(new_state, remaining, values + list(match.groups()))
130 if rv is not None:
131 return rv
132 return None
133
134 try:
135 rv = _match(self._root, [domain, *path.split("/")], [])
136 except SlashRequired:
137 raise RequestPath(f"{path}/") from None
138
139 if self.merge_slashes and rv is None:
140 # Try to match again, but with slashes merged
141 path = re.sub("/{2,}?", "/", path)
142 try:
143 rv = _match(self._root, [domain, *path.split("/")], [])
144 except SlashRequired:
145 raise RequestPath(f"{path}/") from None
146 if rv is None:
147 raise NoMatch(have_match_for, websocket_mismatch)
148 else:
149 raise RequestPath(f"{path}")
150 elif rv is not None:
151 rule, values = rv
152
153 result = {}
154 for name, value in zip(rule._converters.keys(), values):
155 try:
156 value = rule._converters[name].to_python(value)
157 except ValidationError:
158 raise NoMatch(have_match_for, websocket_mismatch) from None
159 result[str(name)] = value
160 if rule.defaults:
161 result.update(rule.defaults)
162
163 if rule.alias and rule.map.redirect_defaults:
164 raise RequestAliasRedirect(result, rule.endpoint)
165
166 return rule, result
167
168 raise NoMatch(have_match_for, websocket_mismatch)
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/werkzeug/routing/__init__.py b/src/werkzeug/routing/__init__.py
--- a/src/werkzeug/routing/__init__.py
+++ b/src/werkzeug/routing/__init__.py
@@ -112,6 +112,7 @@
from .converters import PathConverter
from .converters import UnicodeConverter
from .converters import UUIDConverter
+from .converters import ValidationError
from .exceptions import BuildError
from .exceptions import NoMatch
from .exceptions import RequestAliasRedirect
diff --git a/src/werkzeug/routing/matcher.py b/src/werkzeug/routing/matcher.py
--- a/src/werkzeug/routing/matcher.py
+++ b/src/werkzeug/routing/matcher.py
@@ -129,6 +129,22 @@
rv = _match(new_state, remaining, values + list(match.groups()))
if rv is not None:
return rv
+
+ # If there is no match and the only part left is a
+ # trailing slash ("") consider rules that aren't
+ # strict-slashes as these should match if there is a final
+ # slash part.
+ if parts == [""]:
+ for rule in state.rules:
+ if rule.strict_slashes:
+ continue
+ if rule.methods is not None and method not in rule.methods:
+ have_match_for.update(rule.methods)
+ elif rule.websocket != websocket:
+ websocket_mismatch = True
+ else:
+ return rule, values
+
return None
try:
| {"golden_diff": "diff --git a/src/werkzeug/routing/__init__.py b/src/werkzeug/routing/__init__.py\n--- a/src/werkzeug/routing/__init__.py\n+++ b/src/werkzeug/routing/__init__.py\n@@ -112,6 +112,7 @@\n from .converters import PathConverter\n from .converters import UnicodeConverter\n from .converters import UUIDConverter\n+from .converters import ValidationError\n from .exceptions import BuildError\n from .exceptions import NoMatch\n from .exceptions import RequestAliasRedirect\ndiff --git a/src/werkzeug/routing/matcher.py b/src/werkzeug/routing/matcher.py\n--- a/src/werkzeug/routing/matcher.py\n+++ b/src/werkzeug/routing/matcher.py\n@@ -129,6 +129,22 @@\n rv = _match(new_state, remaining, values + list(match.groups()))\n if rv is not None:\n return rv\n+\n+ # If there is no match and the only part left is a\n+ # trailing slash (\"\") consider rules that aren't\n+ # strict-slashes as these should match if there is a final\n+ # slash part.\n+ if parts == [\"\"]:\n+ for rule in state.rules:\n+ if rule.strict_slashes:\n+ continue\n+ if rule.methods is not None and method not in rule.methods:\n+ have_match_for.update(rule.methods)\n+ elif rule.websocket != websocket:\n+ websocket_mismatch = True\n+ else:\n+ return rule, values\n+\n return None\n \n try:\n", "issue": "2.2.0 breaks werkzeug.routing.ValidationError import\n```\r\n(venv-test) $ pip install werkzeug\r\nCollecting werkzeug\r\n Downloading Werkzeug-2.2.0-py3-none-any.whl (232 kB)\r\n \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 232.2/232.2 KB 3.5 MB/s eta 0:00:00\r\nCollecting MarkupSafe>=2.1.1\r\n Using cached MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (25 kB)\r\nInstalling collected packages: MarkupSafe, werkzeug\r\nSuccessfully installed MarkupSafe-2.1.1 werkzeug-2.2.0\r\n(venv-test) $ python\r\nPython 3.10.4 (main, Jun 29 2022, 12:14:53) [GCC 11.2.0] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> from werkzeug.routing import ValidationError\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\nImportError: cannot import name 'ValidationError' from 'werkzeug.routing' (/tmp/venv-test/lib/python3.10/site-packages/werkzeug/routing/__init__.py)\r\n```\r\n\r\nThis is a documented API for 2.2, e.g. https://werkzeug.palletsprojects.com/en/2.2.x/routing/#custom-converters example code.\r\n\r\nEnvironment:\r\n\r\n- Python version: 3.10\r\n- Werkzeug version: 2.2.0\r\n\n", "before_files": [{"content": "\"\"\"When it comes to combining multiple controller or view functions\n(however you want to call them) you need a dispatcher. A simple way\nwould be applying regular expression tests on the ``PATH_INFO`` and\ncalling registered callback functions that return the value then.\n\nThis module implements a much more powerful system than simple regular\nexpression matching because it can also convert values in the URLs and\nbuild URLs.\n\nHere a simple example that creates a URL map for an application with\ntwo subdomains (www and kb) and some URL rules:\n\n.. code-block:: python\n\n m = Map([\n # Static URLs\n Rule('/', endpoint='static/index'),\n Rule('/about', endpoint='static/about'),\n Rule('/help', endpoint='static/help'),\n # Knowledge Base\n Subdomain('kb', [\n Rule('/', endpoint='kb/index'),\n Rule('/browse/', endpoint='kb/browse'),\n Rule('/browse/<int:id>/', endpoint='kb/browse'),\n Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')\n ])\n ], default_subdomain='www')\n\nIf the application doesn't use subdomains it's perfectly fine to not set\nthe default subdomain and not use the `Subdomain` rule factory. The\nendpoint in the rules can be anything, for example import paths or\nunique identifiers. The WSGI application can use those endpoints to get the\nhandler for that URL. It doesn't have to be a string at all but it's\nrecommended.\n\nNow it's possible to create a URL adapter for one of the subdomains and\nbuild URLs:\n\n.. code-block:: python\n\n c = m.bind('example.com')\n\n c.build(\"kb/browse\", dict(id=42))\n 'http://kb.example.com/browse/42/'\n\n c.build(\"kb/browse\", dict())\n 'http://kb.example.com/browse/'\n\n c.build(\"kb/browse\", dict(id=42, page=3))\n 'http://kb.example.com/browse/42/3'\n\n c.build(\"static/about\")\n '/about'\n\n c.build(\"static/index\", force_external=True)\n 'http://www.example.com/'\n\n c = m.bind('example.com', subdomain='kb')\n\n c.build(\"static/about\")\n 'http://www.example.com/about'\n\nThe first argument to bind is the server name *without* the subdomain.\nPer default it will assume that the script is mounted on the root, but\noften that's not the case so you can provide the real mount point as\nsecond argument:\n\n.. code-block:: python\n\n c = m.bind('example.com', '/applications/example')\n\nThe third argument can be the subdomain, if not given the default\nsubdomain is used. For more details about binding have a look at the\ndocumentation of the `MapAdapter`.\n\nAnd here is how you can match URLs:\n\n.. code-block:: python\n\n c = m.bind('example.com')\n\n c.match(\"/\")\n ('static/index', {})\n\n c.match(\"/about\")\n ('static/about', {})\n\n c = m.bind('example.com', '/', 'kb')\n\n c.match(\"/\")\n ('kb/index', {})\n\n c.match(\"/browse/42/23\")\n ('kb/browse', {'id': 42, 'page': 23})\n\nIf matching fails you get a ``NotFound`` exception, if the rule thinks\nit's a good idea to redirect (for example because the URL was defined\nto have a slash at the end but the request was missing that slash) it\nwill raise a ``RequestRedirect`` exception. Both are subclasses of\n``HTTPException`` so you can use those errors as responses in the\napplication.\n\nIf matching succeeded but the URL rule was incompatible to the given\nmethod (for example there were only rules for ``GET`` and ``HEAD`` but\nrouting tried to match a ``POST`` request) a ``MethodNotAllowed``\nexception is raised.\n\"\"\"\nfrom .converters import AnyConverter\nfrom .converters import BaseConverter\nfrom .converters import FloatConverter\nfrom .converters import IntegerConverter\nfrom .converters import PathConverter\nfrom .converters import UnicodeConverter\nfrom .converters import UUIDConverter\nfrom .exceptions import BuildError\nfrom .exceptions import NoMatch\nfrom .exceptions import RequestAliasRedirect\nfrom .exceptions import RequestPath\nfrom .exceptions import RequestRedirect\nfrom .exceptions import RoutingException\nfrom .exceptions import WebsocketMismatch\nfrom .map import Map\nfrom .map import MapAdapter\nfrom .matcher import StateMachineMatcher\nfrom .rules import EndpointPrefix\nfrom .rules import parse_converter_args\nfrom .rules import Rule\nfrom .rules import RuleFactory\nfrom .rules import RuleTemplate\nfrom .rules import RuleTemplateFactory\nfrom .rules import Subdomain\nfrom .rules import Submount\n", "path": "src/werkzeug/routing/__init__.py"}, {"content": "import re\nimport typing as t\nfrom dataclasses import dataclass\nfrom dataclasses import field\n\nfrom .converters import ValidationError\nfrom .exceptions import NoMatch\nfrom .exceptions import RequestAliasRedirect\nfrom .exceptions import RequestPath\nfrom .rules import Rule\nfrom .rules import RulePart\n\n\nclass SlashRequired(Exception):\n pass\n\n\n@dataclass\nclass State:\n \"\"\"A representation of a rule state.\n\n This includes the *rules* that correspond to the state and the\n possible *static* and *dynamic* transitions to the next state.\n \"\"\"\n\n dynamic: t.List[t.Tuple[RulePart, \"State\"]] = field(default_factory=list)\n rules: t.List[Rule] = field(default_factory=list)\n static: t.Dict[str, \"State\"] = field(default_factory=dict)\n\n\nclass StateMachineMatcher:\n def __init__(self, merge_slashes: bool) -> None:\n self._root = State()\n self.merge_slashes = merge_slashes\n\n def add(self, rule: Rule) -> None:\n state = self._root\n for part in rule._parts:\n if part.static:\n state.static.setdefault(part.content, State())\n state = state.static[part.content]\n else:\n for test_part, new_state in state.dynamic:\n if test_part == part:\n state = new_state\n break\n else:\n new_state = State()\n state.dynamic.append((part, new_state))\n state = new_state\n state.rules.append(rule)\n\n def update(self) -> None:\n # For every state the dynamic transitions should be sorted by\n # the weight of the transition\n state = self._root\n\n def _update_state(state: State) -> None:\n state.dynamic.sort(key=lambda entry: entry[0].weight)\n for new_state in state.static.values():\n _update_state(new_state)\n for _, new_state in state.dynamic:\n _update_state(new_state)\n\n _update_state(state)\n\n def match(\n self, domain: str, path: str, method: str, websocket: bool\n ) -> t.Tuple[Rule, t.MutableMapping[str, t.Any]]:\n # To match to a rule we need to start at the root state and\n # try to follow the transitions until we find a match, or find\n # there is no transition to follow.\n\n have_match_for = set()\n websocket_mismatch = False\n\n def _match(\n state: State, parts: t.List[str], values: t.List[str]\n ) -> t.Optional[t.Tuple[Rule, t.List[str]]]:\n # This function is meant to be called recursively, and will attempt\n # to match the head part to the state's transitions.\n nonlocal have_match_for, websocket_mismatch\n\n # The base case is when all parts have been matched via\n # transitions. Hence if there is a rule with methods &\n # websocket that work return it and the dynamic values\n # extracted.\n if parts == []:\n for rule in state.rules:\n if rule.methods is not None and method not in rule.methods:\n have_match_for.update(rule.methods)\n elif rule.websocket != websocket:\n websocket_mismatch = True\n else:\n return rule, values\n\n # Test if there is a match with this path with a\n # trailing slash, if so raise an exception to report\n # that matching is possible with an additional slash\n if \"\" in state.static:\n for rule in state.static[\"\"].rules:\n if (\n rule.strict_slashes\n and websocket == rule.websocket\n and (rule.methods is None or method in rule.methods)\n ):\n raise SlashRequired()\n return None\n\n part = parts[0]\n # To match this part try the static transitions first\n if part in state.static:\n rv = _match(state.static[part], parts[1:], values)\n if rv is not None:\n return rv\n # No match via the static transitions, so try the dynamic\n # ones.\n for test_part, new_state in state.dynamic:\n target = part\n remaining = parts[1:]\n # A final part indicates a transition that always\n # consumes the remaining parts i.e. transitions to a\n # final state.\n if test_part.final:\n target = \"/\".join(parts)\n remaining = []\n match = re.compile(test_part.content).match(target)\n if match is not None:\n rv = _match(new_state, remaining, values + list(match.groups()))\n if rv is not None:\n return rv\n return None\n\n try:\n rv = _match(self._root, [domain, *path.split(\"/\")], [])\n except SlashRequired:\n raise RequestPath(f\"{path}/\") from None\n\n if self.merge_slashes and rv is None:\n # Try to match again, but with slashes merged\n path = re.sub(\"/{2,}?\", \"/\", path)\n try:\n rv = _match(self._root, [domain, *path.split(\"/\")], [])\n except SlashRequired:\n raise RequestPath(f\"{path}/\") from None\n if rv is None:\n raise NoMatch(have_match_for, websocket_mismatch)\n else:\n raise RequestPath(f\"{path}\")\n elif rv is not None:\n rule, values = rv\n\n result = {}\n for name, value in zip(rule._converters.keys(), values):\n try:\n value = rule._converters[name].to_python(value)\n except ValidationError:\n raise NoMatch(have_match_for, websocket_mismatch) from None\n result[str(name)] = value\n if rule.defaults:\n result.update(rule.defaults)\n\n if rule.alias and rule.map.redirect_defaults:\n raise RequestAliasRedirect(result, rule.endpoint)\n\n return rule, result\n\n raise NoMatch(have_match_for, websocket_mismatch)\n", "path": "src/werkzeug/routing/matcher.py"}], "after_files": [{"content": "\"\"\"When it comes to combining multiple controller or view functions\n(however you want to call them) you need a dispatcher. A simple way\nwould be applying regular expression tests on the ``PATH_INFO`` and\ncalling registered callback functions that return the value then.\n\nThis module implements a much more powerful system than simple regular\nexpression matching because it can also convert values in the URLs and\nbuild URLs.\n\nHere a simple example that creates a URL map for an application with\ntwo subdomains (www and kb) and some URL rules:\n\n.. code-block:: python\n\n m = Map([\n # Static URLs\n Rule('/', endpoint='static/index'),\n Rule('/about', endpoint='static/about'),\n Rule('/help', endpoint='static/help'),\n # Knowledge Base\n Subdomain('kb', [\n Rule('/', endpoint='kb/index'),\n Rule('/browse/', endpoint='kb/browse'),\n Rule('/browse/<int:id>/', endpoint='kb/browse'),\n Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')\n ])\n ], default_subdomain='www')\n\nIf the application doesn't use subdomains it's perfectly fine to not set\nthe default subdomain and not use the `Subdomain` rule factory. The\nendpoint in the rules can be anything, for example import paths or\nunique identifiers. The WSGI application can use those endpoints to get the\nhandler for that URL. It doesn't have to be a string at all but it's\nrecommended.\n\nNow it's possible to create a URL adapter for one of the subdomains and\nbuild URLs:\n\n.. code-block:: python\n\n c = m.bind('example.com')\n\n c.build(\"kb/browse\", dict(id=42))\n 'http://kb.example.com/browse/42/'\n\n c.build(\"kb/browse\", dict())\n 'http://kb.example.com/browse/'\n\n c.build(\"kb/browse\", dict(id=42, page=3))\n 'http://kb.example.com/browse/42/3'\n\n c.build(\"static/about\")\n '/about'\n\n c.build(\"static/index\", force_external=True)\n 'http://www.example.com/'\n\n c = m.bind('example.com', subdomain='kb')\n\n c.build(\"static/about\")\n 'http://www.example.com/about'\n\nThe first argument to bind is the server name *without* the subdomain.\nPer default it will assume that the script is mounted on the root, but\noften that's not the case so you can provide the real mount point as\nsecond argument:\n\n.. code-block:: python\n\n c = m.bind('example.com', '/applications/example')\n\nThe third argument can be the subdomain, if not given the default\nsubdomain is used. For more details about binding have a look at the\ndocumentation of the `MapAdapter`.\n\nAnd here is how you can match URLs:\n\n.. code-block:: python\n\n c = m.bind('example.com')\n\n c.match(\"/\")\n ('static/index', {})\n\n c.match(\"/about\")\n ('static/about', {})\n\n c = m.bind('example.com', '/', 'kb')\n\n c.match(\"/\")\n ('kb/index', {})\n\n c.match(\"/browse/42/23\")\n ('kb/browse', {'id': 42, 'page': 23})\n\nIf matching fails you get a ``NotFound`` exception, if the rule thinks\nit's a good idea to redirect (for example because the URL was defined\nto have a slash at the end but the request was missing that slash) it\nwill raise a ``RequestRedirect`` exception. Both are subclasses of\n``HTTPException`` so you can use those errors as responses in the\napplication.\n\nIf matching succeeded but the URL rule was incompatible to the given\nmethod (for example there were only rules for ``GET`` and ``HEAD`` but\nrouting tried to match a ``POST`` request) a ``MethodNotAllowed``\nexception is raised.\n\"\"\"\nfrom .converters import AnyConverter\nfrom .converters import BaseConverter\nfrom .converters import FloatConverter\nfrom .converters import IntegerConverter\nfrom .converters import PathConverter\nfrom .converters import UnicodeConverter\nfrom .converters import UUIDConverter\nfrom .converters import ValidationError\nfrom .exceptions import BuildError\nfrom .exceptions import NoMatch\nfrom .exceptions import RequestAliasRedirect\nfrom .exceptions import RequestPath\nfrom .exceptions import RequestRedirect\nfrom .exceptions import RoutingException\nfrom .exceptions import WebsocketMismatch\nfrom .map import Map\nfrom .map import MapAdapter\nfrom .matcher import StateMachineMatcher\nfrom .rules import EndpointPrefix\nfrom .rules import parse_converter_args\nfrom .rules import Rule\nfrom .rules import RuleFactory\nfrom .rules import RuleTemplate\nfrom .rules import RuleTemplateFactory\nfrom .rules import Subdomain\nfrom .rules import Submount\n", "path": "src/werkzeug/routing/__init__.py"}, {"content": "import re\nimport typing as t\nfrom dataclasses import dataclass\nfrom dataclasses import field\n\nfrom .converters import ValidationError\nfrom .exceptions import NoMatch\nfrom .exceptions import RequestAliasRedirect\nfrom .exceptions import RequestPath\nfrom .rules import Rule\nfrom .rules import RulePart\n\n\nclass SlashRequired(Exception):\n pass\n\n\n@dataclass\nclass State:\n \"\"\"A representation of a rule state.\n\n This includes the *rules* that correspond to the state and the\n possible *static* and *dynamic* transitions to the next state.\n \"\"\"\n\n dynamic: t.List[t.Tuple[RulePart, \"State\"]] = field(default_factory=list)\n rules: t.List[Rule] = field(default_factory=list)\n static: t.Dict[str, \"State\"] = field(default_factory=dict)\n\n\nclass StateMachineMatcher:\n def __init__(self, merge_slashes: bool) -> None:\n self._root = State()\n self.merge_slashes = merge_slashes\n\n def add(self, rule: Rule) -> None:\n state = self._root\n for part in rule._parts:\n if part.static:\n state.static.setdefault(part.content, State())\n state = state.static[part.content]\n else:\n for test_part, new_state in state.dynamic:\n if test_part == part:\n state = new_state\n break\n else:\n new_state = State()\n state.dynamic.append((part, new_state))\n state = new_state\n state.rules.append(rule)\n\n def update(self) -> None:\n # For every state the dynamic transitions should be sorted by\n # the weight of the transition\n state = self._root\n\n def _update_state(state: State) -> None:\n state.dynamic.sort(key=lambda entry: entry[0].weight)\n for new_state in state.static.values():\n _update_state(new_state)\n for _, new_state in state.dynamic:\n _update_state(new_state)\n\n _update_state(state)\n\n def match(\n self, domain: str, path: str, method: str, websocket: bool\n ) -> t.Tuple[Rule, t.MutableMapping[str, t.Any]]:\n # To match to a rule we need to start at the root state and\n # try to follow the transitions until we find a match, or find\n # there is no transition to follow.\n\n have_match_for = set()\n websocket_mismatch = False\n\n def _match(\n state: State, parts: t.List[str], values: t.List[str]\n ) -> t.Optional[t.Tuple[Rule, t.List[str]]]:\n # This function is meant to be called recursively, and will attempt\n # to match the head part to the state's transitions.\n nonlocal have_match_for, websocket_mismatch\n\n # The base case is when all parts have been matched via\n # transitions. Hence if there is a rule with methods &\n # websocket that work return it and the dynamic values\n # extracted.\n if parts == []:\n for rule in state.rules:\n if rule.methods is not None and method not in rule.methods:\n have_match_for.update(rule.methods)\n elif rule.websocket != websocket:\n websocket_mismatch = True\n else:\n return rule, values\n\n # Test if there is a match with this path with a\n # trailing slash, if so raise an exception to report\n # that matching is possible with an additional slash\n if \"\" in state.static:\n for rule in state.static[\"\"].rules:\n if (\n rule.strict_slashes\n and websocket == rule.websocket\n and (rule.methods is None or method in rule.methods)\n ):\n raise SlashRequired()\n return None\n\n part = parts[0]\n # To match this part try the static transitions first\n if part in state.static:\n rv = _match(state.static[part], parts[1:], values)\n if rv is not None:\n return rv\n # No match via the static transitions, so try the dynamic\n # ones.\n for test_part, new_state in state.dynamic:\n target = part\n remaining = parts[1:]\n # A final part indicates a transition that always\n # consumes the remaining parts i.e. transitions to a\n # final state.\n if test_part.final:\n target = \"/\".join(parts)\n remaining = []\n match = re.compile(test_part.content).match(target)\n if match is not None:\n rv = _match(new_state, remaining, values + list(match.groups()))\n if rv is not None:\n return rv\n\n # If there is no match and the only part left is a\n # trailing slash (\"\") consider rules that aren't\n # strict-slashes as these should match if there is a final\n # slash part.\n if parts == [\"\"]:\n for rule in state.rules:\n if rule.strict_slashes:\n continue\n if rule.methods is not None and method not in rule.methods:\n have_match_for.update(rule.methods)\n elif rule.websocket != websocket:\n websocket_mismatch = True\n else:\n return rule, values\n\n return None\n\n try:\n rv = _match(self._root, [domain, *path.split(\"/\")], [])\n except SlashRequired:\n raise RequestPath(f\"{path}/\") from None\n\n if self.merge_slashes and rv is None:\n # Try to match again, but with slashes merged\n path = re.sub(\"/{2,}?\", \"/\", path)\n try:\n rv = _match(self._root, [domain, *path.split(\"/\")], [])\n except SlashRequired:\n raise RequestPath(f\"{path}/\") from None\n if rv is None:\n raise NoMatch(have_match_for, websocket_mismatch)\n else:\n raise RequestPath(f\"{path}\")\n elif rv is not None:\n rule, values = rv\n\n result = {}\n for name, value in zip(rule._converters.keys(), values):\n try:\n value = rule._converters[name].to_python(value)\n except ValidationError:\n raise NoMatch(have_match_for, websocket_mismatch) from None\n result[str(name)] = value\n if rule.defaults:\n result.update(rule.defaults)\n\n if rule.alias and rule.map.redirect_defaults:\n raise RequestAliasRedirect(result, rule.endpoint)\n\n return rule, result\n\n raise NoMatch(have_match_for, websocket_mismatch)\n", "path": "src/werkzeug/routing/matcher.py"}]} | 3,680 | 348 |
gh_patches_debug_2436 | rasdani/github-patches | git_diff | facebookresearch__CompilerGym-592 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Is running CompilerGym intended to leave cache directories behind?
## ❓ Questions and Help
Not sure if this is a bug or not, so submitting as a question. Running a CompilerGym experiment leaves behind many cache directories. When running a large experiment, this can create problems through the sheer number of directories in `COMPILER_GYM_CACHE`. I expected the `COMPILER_GYM_CACHE` to not have anything after the experiment exited cleanly.
Is there a way to avoid the experiments leaving the directories behind?
## Steps to reproduce
Running the following on my machine leaves behind about 270 cache directories.
```python
import compiler_gym
import compiler_gym.wrappers
from ray import tune
from ray.rllib.agents.ppo import PPOTrainer
def make_env(env_config):
env = compiler_gym.make(env_config['cgym_id'])
env = compiler_gym.wrappers.TimeLimit(env, env_config['timelimit'])
dataset = env.datasets[env_config['dataset']]
env = compiler_gym.wrappers.CycleOverBenchmarks(
env, dataset.benchmarks())
return env
config = {
"env_config": {
"cgym_id": "llvm-autophase-ic-v0",
"timelimit": 45,
"dataset": "benchmark://cbench-v1",
},
"env": "CompilerGym",
}
stop = {
"timesteps_total": 10_000,
}
tune.register_env("CompilerGym", make_env)
tune.run(
PPOTrainer,
config=config,
stop=stop,
name='cgym_cache_dir_demo',
)
```
## Environment
Please fill in this checklist:
- CompilerGym: 0.2.2
- How you installed CompilerGym (conda, pip, source): pip
- OS: Ubuntu 20.04.1 LTS (x86_64)
- Python version: 3.9.7
- Build command you used (if compiling from source): N/A
- GCC/clang version (if compiling from source): N/A
- Bazel version (if compiling from source): N/A
- Versions of any other relevant libraries: ray: 1.10.0, gym: 0.20.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `compiler_gym/service/runtime/create_and_run_compiler_gym_service.py`
Content:
```
1 #! /usr/bin/env python3
2 #
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 #
5 # This source code is licensed under the MIT license found in the
6 # LICENSE file in the root directory of this source tree.
7 """An example CompilerGym service in python."""
8 import os
9 import sys
10 from concurrent import futures
11 from multiprocessing import cpu_count
12 from pathlib import Path
13 from signal import SIGTERM, signal
14 from tempfile import mkdtemp
15 from threading import Event, Thread
16 from typing import Type
17
18 import grpc
19 from absl import app, flags, logging
20
21 from compiler_gym.service import connection
22 from compiler_gym.service.compilation_session import CompilationSession
23 from compiler_gym.service.proto import compiler_gym_service_pb2_grpc
24 from compiler_gym.service.runtime.compiler_gym_service import CompilerGymService
25 from compiler_gym.util import debug_util as dbg
26 from compiler_gym.util.filesystem import atomic_file_write
27 from compiler_gym.util.shell_format import plural
28
29 flags.DEFINE_string("working_dir", "", "Path to use as service working directory")
30 flags.DEFINE_integer("port", 0, "The service listening port")
31 flags.DEFINE_integer(
32 "rpc_service_threads", cpu_count(), "The number of server worker threads"
33 )
34 flags.DEFINE_integer("logbuflevel", 0, "Flag for compatability with C++ service.")
35 FLAGS = flags.FLAGS
36
37 MAX_MESSAGE_SIZE_IN_BYTES = 512 * 1024 * 1024
38
39
40 shutdown_signal = Event()
41
42
43 # NOTE(cummins): This script is executed in a subprocess, so code coverage
44 # tracking does not work. As such we use "# pragma: no cover" annotation for all
45 # functions.
46 def _shutdown_handler(signal_number, stack_frame): # pragma: no cover
47 del stack_frame # Unused
48 logging.info("Service received signal: %d", signal_number)
49 shutdown_signal.set()
50
51
52 def create_and_run_compiler_gym_service(
53 compilation_session_type: Type[CompilationSession],
54 ): # pragma: no cover
55 """Create and run an RPC service for the given compilation session.
56
57 This should be called on its own in a self contained script to implement a
58 compilation service. Example:
59
60 .. code-block:: python
61
62 from compiler_gym.service import runtime
63 from my_compiler_service import MyCompilationSession
64
65 if __name__ == "__main__":
66 runtime.create_and_run_compiler_gym_service(MyCompilationSession)
67
68 This function never returns.
69
70 :param compilation_session_type: A sublass of :class:`CompilationSession
71 <compiler_gym.service.CompilationSession>` that provides implementations
72 of the abstract methods.
73 """
74
75 def main(argv):
76 # Register a signal handler for SIGTERM that will set the shutdownSignal
77 # future value.
78 signal(SIGTERM, _shutdown_handler)
79
80 argv = [x for x in argv if x.strip()]
81 if len(argv) > 1:
82 print(
83 f"ERROR: Unrecognized command line argument '{argv[1]}'",
84 file=sys.stderr,
85 )
86 sys.exit(1)
87
88 working_dir = Path(FLAGS.working_dir or mkdtemp(prefix="compiler_gym-service-"))
89 (working_dir / "logs").mkdir(exist_ok=True, parents=True)
90
91 FLAGS.log_dir = str(working_dir / "logs")
92 logging.get_absl_handler().use_absl_log_file()
93 logging.set_verbosity(dbg.get_logging_level())
94
95 # Create the service.
96 server = grpc.server(
97 futures.ThreadPoolExecutor(max_workers=FLAGS.rpc_service_threads),
98 options=connection.GRPC_CHANNEL_OPTIONS,
99 )
100 service = CompilerGymService(
101 working_directory=working_dir,
102 compilation_session_type=compilation_session_type,
103 )
104 compiler_gym_service_pb2_grpc.add_CompilerGymServiceServicer_to_server(
105 service, server
106 )
107
108 address = f"0.0.0.0:{FLAGS.port}" if FLAGS.port else "0.0.0.0:0"
109 port = server.add_insecure_port(address)
110
111 with atomic_file_write(working_dir / "port.txt", fileobj=True, mode="w") as f:
112 f.write(str(port))
113
114 with atomic_file_write(working_dir / "pid.txt", fileobj=True, mode="w") as f:
115 f.write(str(os.getpid()))
116
117 logging.info(
118 "Service %s listening on %d, PID = %d", working_dir, port, os.getpid()
119 )
120
121 server.start()
122
123 # Block on the RPC service in a separate thread. This enables the
124 # current thread to handle the shutdown routine.
125 server_thread = Thread(target=server.wait_for_termination)
126 server_thread.start()
127
128 # Block until the shutdown signal is received.
129 shutdown_signal.wait()
130 logging.info("Shutting down the RPC service")
131 server.stop(60).wait()
132 server_thread.join()
133
134 if len(service.sessions):
135 print(
136 "ERROR: Killing a service with",
137 plural(len(service.session), "active session", "active sessions"),
138 file=sys.stderr,
139 )
140 sys.exit(6)
141
142 app.run(main)
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/compiler_gym/service/runtime/create_and_run_compiler_gym_service.py b/compiler_gym/service/runtime/create_and_run_compiler_gym_service.py
--- a/compiler_gym/service/runtime/create_and_run_compiler_gym_service.py
+++ b/compiler_gym/service/runtime/create_and_run_compiler_gym_service.py
@@ -130,6 +130,7 @@
logging.info("Shutting down the RPC service")
server.stop(60).wait()
server_thread.join()
+ logging.info("Service closed")
if len(service.sessions):
print(
| {"golden_diff": "diff --git a/compiler_gym/service/runtime/create_and_run_compiler_gym_service.py b/compiler_gym/service/runtime/create_and_run_compiler_gym_service.py\n--- a/compiler_gym/service/runtime/create_and_run_compiler_gym_service.py\n+++ b/compiler_gym/service/runtime/create_and_run_compiler_gym_service.py\n@@ -130,6 +130,7 @@\n logging.info(\"Shutting down the RPC service\")\n server.stop(60).wait()\n server_thread.join()\n+ logging.info(\"Service closed\")\n \n if len(service.sessions):\n print(\n", "issue": "Is running CompilerGym intended to leave cache directories behind?\n## \u2753 Questions and Help\r\n\r\nNot sure if this is a bug or not, so submitting as a question. Running a CompilerGym experiment leaves behind many cache directories. When running a large experiment, this can create problems through the sheer number of directories in `COMPILER_GYM_CACHE`. I expected the `COMPILER_GYM_CACHE` to not have anything after the experiment exited cleanly.\r\n\r\nIs there a way to avoid the experiments leaving the directories behind?\r\n\r\n## Steps to reproduce\r\nRunning the following on my machine leaves behind about 270 cache directories.\r\n\r\n```python\r\nimport compiler_gym\r\nimport compiler_gym.wrappers\r\nfrom ray import tune\r\nfrom ray.rllib.agents.ppo import PPOTrainer\r\n\r\n\r\ndef make_env(env_config):\r\n env = compiler_gym.make(env_config['cgym_id'])\r\n env = compiler_gym.wrappers.TimeLimit(env, env_config['timelimit'])\r\n dataset = env.datasets[env_config['dataset']]\r\n env = compiler_gym.wrappers.CycleOverBenchmarks(\r\n env, dataset.benchmarks())\r\n return env\r\n\r\n\r\nconfig = {\r\n \"env_config\": {\r\n \"cgym_id\": \"llvm-autophase-ic-v0\",\r\n \"timelimit\": 45,\r\n \"dataset\": \"benchmark://cbench-v1\",\r\n },\r\n \"env\": \"CompilerGym\",\r\n}\r\n\r\nstop = {\r\n \"timesteps_total\": 10_000,\r\n}\r\n\r\ntune.register_env(\"CompilerGym\", make_env)\r\ntune.run(\r\n PPOTrainer,\r\n config=config,\r\n stop=stop,\r\n name='cgym_cache_dir_demo',\r\n)\r\n```\r\n\r\n## Environment\r\n\r\nPlease fill in this checklist:\r\n\r\n- CompilerGym: 0.2.2\r\n- How you installed CompilerGym (conda, pip, source): pip\r\n- OS: Ubuntu 20.04.1 LTS (x86_64)\r\n- Python version: 3.9.7\r\n- Build command you used (if compiling from source): N/A\r\n- GCC/clang version (if compiling from source): N/A\r\n- Bazel version (if compiling from source): N/A\r\n- Versions of any other relevant libraries: ray: 1.10.0, gym: 0.20.0\r\n\n", "before_files": [{"content": "#! /usr/bin/env python3\n#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"An example CompilerGym service in python.\"\"\"\nimport os\nimport sys\nfrom concurrent import futures\nfrom multiprocessing import cpu_count\nfrom pathlib import Path\nfrom signal import SIGTERM, signal\nfrom tempfile import mkdtemp\nfrom threading import Event, Thread\nfrom typing import Type\n\nimport grpc\nfrom absl import app, flags, logging\n\nfrom compiler_gym.service import connection\nfrom compiler_gym.service.compilation_session import CompilationSession\nfrom compiler_gym.service.proto import compiler_gym_service_pb2_grpc\nfrom compiler_gym.service.runtime.compiler_gym_service import CompilerGymService\nfrom compiler_gym.util import debug_util as dbg\nfrom compiler_gym.util.filesystem import atomic_file_write\nfrom compiler_gym.util.shell_format import plural\n\nflags.DEFINE_string(\"working_dir\", \"\", \"Path to use as service working directory\")\nflags.DEFINE_integer(\"port\", 0, \"The service listening port\")\nflags.DEFINE_integer(\n \"rpc_service_threads\", cpu_count(), \"The number of server worker threads\"\n)\nflags.DEFINE_integer(\"logbuflevel\", 0, \"Flag for compatability with C++ service.\")\nFLAGS = flags.FLAGS\n\nMAX_MESSAGE_SIZE_IN_BYTES = 512 * 1024 * 1024\n\n\nshutdown_signal = Event()\n\n\n# NOTE(cummins): This script is executed in a subprocess, so code coverage\n# tracking does not work. As such we use \"# pragma: no cover\" annotation for all\n# functions.\ndef _shutdown_handler(signal_number, stack_frame): # pragma: no cover\n del stack_frame # Unused\n logging.info(\"Service received signal: %d\", signal_number)\n shutdown_signal.set()\n\n\ndef create_and_run_compiler_gym_service(\n compilation_session_type: Type[CompilationSession],\n): # pragma: no cover\n \"\"\"Create and run an RPC service for the given compilation session.\n\n This should be called on its own in a self contained script to implement a\n compilation service. Example:\n\n .. code-block:: python\n\n from compiler_gym.service import runtime\n from my_compiler_service import MyCompilationSession\n\n if __name__ == \"__main__\":\n runtime.create_and_run_compiler_gym_service(MyCompilationSession)\n\n This function never returns.\n\n :param compilation_session_type: A sublass of :class:`CompilationSession\n <compiler_gym.service.CompilationSession>` that provides implementations\n of the abstract methods.\n \"\"\"\n\n def main(argv):\n # Register a signal handler for SIGTERM that will set the shutdownSignal\n # future value.\n signal(SIGTERM, _shutdown_handler)\n\n argv = [x for x in argv if x.strip()]\n if len(argv) > 1:\n print(\n f\"ERROR: Unrecognized command line argument '{argv[1]}'\",\n file=sys.stderr,\n )\n sys.exit(1)\n\n working_dir = Path(FLAGS.working_dir or mkdtemp(prefix=\"compiler_gym-service-\"))\n (working_dir / \"logs\").mkdir(exist_ok=True, parents=True)\n\n FLAGS.log_dir = str(working_dir / \"logs\")\n logging.get_absl_handler().use_absl_log_file()\n logging.set_verbosity(dbg.get_logging_level())\n\n # Create the service.\n server = grpc.server(\n futures.ThreadPoolExecutor(max_workers=FLAGS.rpc_service_threads),\n options=connection.GRPC_CHANNEL_OPTIONS,\n )\n service = CompilerGymService(\n working_directory=working_dir,\n compilation_session_type=compilation_session_type,\n )\n compiler_gym_service_pb2_grpc.add_CompilerGymServiceServicer_to_server(\n service, server\n )\n\n address = f\"0.0.0.0:{FLAGS.port}\" if FLAGS.port else \"0.0.0.0:0\"\n port = server.add_insecure_port(address)\n\n with atomic_file_write(working_dir / \"port.txt\", fileobj=True, mode=\"w\") as f:\n f.write(str(port))\n\n with atomic_file_write(working_dir / \"pid.txt\", fileobj=True, mode=\"w\") as f:\n f.write(str(os.getpid()))\n\n logging.info(\n \"Service %s listening on %d, PID = %d\", working_dir, port, os.getpid()\n )\n\n server.start()\n\n # Block on the RPC service in a separate thread. This enables the\n # current thread to handle the shutdown routine.\n server_thread = Thread(target=server.wait_for_termination)\n server_thread.start()\n\n # Block until the shutdown signal is received.\n shutdown_signal.wait()\n logging.info(\"Shutting down the RPC service\")\n server.stop(60).wait()\n server_thread.join()\n\n if len(service.sessions):\n print(\n \"ERROR: Killing a service with\",\n plural(len(service.session), \"active session\", \"active sessions\"),\n file=sys.stderr,\n )\n sys.exit(6)\n\n app.run(main)\n", "path": "compiler_gym/service/runtime/create_and_run_compiler_gym_service.py"}], "after_files": [{"content": "#! /usr/bin/env python3\n#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"An example CompilerGym service in python.\"\"\"\nimport os\nimport sys\nfrom concurrent import futures\nfrom multiprocessing import cpu_count\nfrom pathlib import Path\nfrom signal import SIGTERM, signal\nfrom tempfile import mkdtemp\nfrom threading import Event, Thread\nfrom typing import Type\n\nimport grpc\nfrom absl import app, flags, logging\n\nfrom compiler_gym.service import connection\nfrom compiler_gym.service.compilation_session import CompilationSession\nfrom compiler_gym.service.proto import compiler_gym_service_pb2_grpc\nfrom compiler_gym.service.runtime.compiler_gym_service import CompilerGymService\nfrom compiler_gym.util import debug_util as dbg\nfrom compiler_gym.util.filesystem import atomic_file_write\nfrom compiler_gym.util.shell_format import plural\n\nflags.DEFINE_string(\"working_dir\", \"\", \"Path to use as service working directory\")\nflags.DEFINE_integer(\"port\", 0, \"The service listening port\")\nflags.DEFINE_integer(\n \"rpc_service_threads\", cpu_count(), \"The number of server worker threads\"\n)\nflags.DEFINE_integer(\"logbuflevel\", 0, \"Flag for compatability with C++ service.\")\nFLAGS = flags.FLAGS\n\nMAX_MESSAGE_SIZE_IN_BYTES = 512 * 1024 * 1024\n\n\nshutdown_signal = Event()\n\n\n# NOTE(cummins): This script is executed in a subprocess, so code coverage\n# tracking does not work. As such we use \"# pragma: no cover\" annotation for all\n# functions.\ndef _shutdown_handler(signal_number, stack_frame): # pragma: no cover\n del stack_frame # Unused\n logging.info(\"Service received signal: %d\", signal_number)\n shutdown_signal.set()\n\n\ndef create_and_run_compiler_gym_service(\n compilation_session_type: Type[CompilationSession],\n): # pragma: no cover\n \"\"\"Create and run an RPC service for the given compilation session.\n\n This should be called on its own in a self contained script to implement a\n compilation service. Example:\n\n .. code-block:: python\n\n from compiler_gym.service import runtime\n from my_compiler_service import MyCompilationSession\n\n if __name__ == \"__main__\":\n runtime.create_and_run_compiler_gym_service(MyCompilationSession)\n\n This function never returns.\n\n :param compilation_session_type: A sublass of :class:`CompilationSession\n <compiler_gym.service.CompilationSession>` that provides implementations\n of the abstract methods.\n \"\"\"\n\n def main(argv):\n # Register a signal handler for SIGTERM that will set the shutdownSignal\n # future value.\n signal(SIGTERM, _shutdown_handler)\n\n argv = [x for x in argv if x.strip()]\n if len(argv) > 1:\n print(\n f\"ERROR: Unrecognized command line argument '{argv[1]}'\",\n file=sys.stderr,\n )\n sys.exit(1)\n\n working_dir = Path(FLAGS.working_dir or mkdtemp(prefix=\"compiler_gym-service-\"))\n (working_dir / \"logs\").mkdir(exist_ok=True, parents=True)\n\n FLAGS.log_dir = str(working_dir / \"logs\")\n logging.get_absl_handler().use_absl_log_file()\n logging.set_verbosity(dbg.get_logging_level())\n\n # Create the service.\n server = grpc.server(\n futures.ThreadPoolExecutor(max_workers=FLAGS.rpc_service_threads),\n options=connection.GRPC_CHANNEL_OPTIONS,\n )\n service = CompilerGymService(\n working_directory=working_dir,\n compilation_session_type=compilation_session_type,\n )\n compiler_gym_service_pb2_grpc.add_CompilerGymServiceServicer_to_server(\n service, server\n )\n\n address = f\"0.0.0.0:{FLAGS.port}\" if FLAGS.port else \"0.0.0.0:0\"\n port = server.add_insecure_port(address)\n\n with atomic_file_write(working_dir / \"port.txt\", fileobj=True, mode=\"w\") as f:\n f.write(str(port))\n\n with atomic_file_write(working_dir / \"pid.txt\", fileobj=True, mode=\"w\") as f:\n f.write(str(os.getpid()))\n\n logging.info(\n \"Service %s listening on %d, PID = %d\", working_dir, port, os.getpid()\n )\n\n server.start()\n\n # Block on the RPC service in a separate thread. This enables the\n # current thread to handle the shutdown routine.\n server_thread = Thread(target=server.wait_for_termination)\n server_thread.start()\n\n # Block until the shutdown signal is received.\n shutdown_signal.wait()\n logging.info(\"Shutting down the RPC service\")\n server.stop(60).wait()\n server_thread.join()\n logging.info(\"Service closed\")\n\n if len(service.sessions):\n print(\n \"ERROR: Killing a service with\",\n plural(len(service.session), \"active session\", \"active sessions\"),\n file=sys.stderr,\n )\n sys.exit(6)\n\n app.run(main)\n", "path": "compiler_gym/service/runtime/create_and_run_compiler_gym_service.py"}]} | 2,200 | 121 |
gh_patches_debug_17514 | rasdani/github-patches | git_diff | pydantic__pydantic-7883 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
@validate_call on instance method of class with `__slots__` raises "AttributeError: object attribute is read-only"
### Initial Checks
- [X] I confirm that I'm using Pydantic V2
### Description
When the @validate_call decorator is applied to an instance method of a class with `__slots__` set, calls to the instance method raise an AttributeError:
```
Traceback (most recent call last):
File "./test_validate_call.py", line 25, in <module>
c.any_instance_method()
File "./.venv/lib/python3.8/site-packages/pydantic/_internal/_validate_call.py", line 118, in __get__
object.__setattr__(obj, self._name, result)
AttributeError: 'ClassWithSlots' object attribute 'any_instance_method' is read-only
```
This error is not raised when using the `@validate_arguments` decorator instead of `@validate_call`.
### Example Code
```Python
from pydantic import validate_call
class ClassWithSlots():
__slots__ = {}
@validate_call
def any_instance_method(self):
pass
c = ClassWithSlots()
c.any_instance_method()
```
### Python, Pydantic & OS Version
```Text
pydantic version: 2.3.0
pydantic-core version: 2.6.3
pydantic-core build: profile=release pgo=true
install path: ./.venv/lib/python3.8/site-packages/pydantic
python version: 3.8.10 (default, May 26 2023, 14:05:08) [GCC 9.4.0]
platform: Linux-5.15.0-83-generic-x86_64-with-glibc2.29
optional deps. installed: ['typing-extensions']
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydantic/_internal/_validate_call.py`
Content:
```
1 from __future__ import annotations as _annotations
2
3 import inspect
4 from dataclasses import dataclass
5 from functools import partial
6 from typing import Any, Awaitable, Callable
7
8 import pydantic_core
9
10 from ..config import ConfigDict
11 from ..plugin._schema_validator import create_schema_validator
12 from . import _generate_schema, _typing_extra
13 from ._config import ConfigWrapper
14
15
16 @dataclass
17 class CallMarker:
18 function: Callable[..., Any]
19 validate_return: bool
20
21
22 class ValidateCallWrapper:
23 """This is a wrapper around a function that validates the arguments passed to it, and optionally the return value.
24
25 It's partially inspired by `wraps` which in turn uses `partial`, but extended to be a descriptor so
26 these functions can be applied to instance methods, class methods, static methods, as well as normal functions.
27 """
28
29 __slots__ = (
30 'raw_function',
31 '_config',
32 '_validate_return',
33 '__pydantic_core_schema__',
34 '__pydantic_validator__',
35 '__signature__',
36 '__name__',
37 '__qualname__',
38 '__annotations__',
39 '__dict__', # required for __module__
40 )
41
42 def __init__(self, function: Callable[..., Any], config: ConfigDict | None, validate_return: bool):
43 self.raw_function = function
44 self._config = config
45 self._validate_return = validate_return
46 self.__signature__ = inspect.signature(function)
47 if isinstance(function, partial):
48 func = function.func
49 self.__name__ = f'partial({func.__name__})'
50 self.__qualname__ = f'partial({func.__qualname__})'
51 self.__annotations__ = func.__annotations__
52 self.__module__ = func.__module__
53 self.__doc__ = func.__doc__
54 else:
55 self.__name__ = function.__name__
56 self.__qualname__ = function.__qualname__
57 self.__annotations__ = function.__annotations__
58 self.__module__ = function.__module__
59 self.__doc__ = function.__doc__
60
61 namespace = _typing_extra.add_module_globals(function, None)
62 config_wrapper = ConfigWrapper(config)
63 gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)
64 schema = gen_schema.clean_schema(gen_schema.generate_schema(function))
65 self.__pydantic_core_schema__ = schema
66 core_config = config_wrapper.core_config(self)
67 self.__pydantic_validator__ = create_schema_validator(schema, core_config, config_wrapper.plugin_settings)
68
69 if self._validate_return:
70 return_type = (
71 self.__signature__.return_annotation
72 if self.__signature__.return_annotation is not self.__signature__.empty
73 else Any
74 )
75 gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)
76 schema = gen_schema.clean_schema(gen_schema.generate_schema(return_type))
77 self.__return_pydantic_core_schema__ = schema
78 validator = create_schema_validator(schema, core_config, config_wrapper.plugin_settings)
79 if inspect.iscoroutinefunction(self.raw_function):
80
81 async def return_val_wrapper(aw: Awaitable[Any]) -> None:
82 return validator.validate_python(await aw)
83
84 self.__return_pydantic_validator__ = return_val_wrapper
85 else:
86 self.__return_pydantic_validator__ = validator.validate_python
87 else:
88 self.__return_pydantic_core_schema__ = None
89 self.__return_pydantic_validator__ = None
90
91 self._name: str | None = None # set by __get__, used to set the instance attribute when decorating methods
92
93 def __call__(self, *args: Any, **kwargs: Any) -> Any:
94 res = self.__pydantic_validator__.validate_python(pydantic_core.ArgsKwargs(args, kwargs))
95 if self.__return_pydantic_validator__:
96 return self.__return_pydantic_validator__(res)
97 return res
98
99 def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:
100 """Bind the raw function and return another ValidateCallWrapper wrapping that."""
101 if obj is None:
102 try:
103 # Handle the case where a method is accessed as a class attribute
104 return objtype.__getattribute__(objtype, self._name) # type: ignore
105 except AttributeError:
106 # This will happen the first time the attribute is accessed
107 pass
108
109 bound_function = self.raw_function.__get__(obj, objtype)
110 result = self.__class__(bound_function, self._config, self._validate_return)
111 if self._name is not None:
112 if obj is not None:
113 object.__setattr__(obj, self._name, result)
114 else:
115 object.__setattr__(objtype, self._name, result)
116 return result
117
118 def __set_name__(self, owner: Any, name: str) -> None:
119 self._name = name
120
121 def __repr__(self) -> str:
122 return f'ValidateCallWrapper({self.raw_function})'
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydantic/_internal/_validate_call.py b/pydantic/_internal/_validate_call.py
--- a/pydantic/_internal/_validate_call.py
+++ b/pydantic/_internal/_validate_call.py
@@ -108,6 +108,11 @@
bound_function = self.raw_function.__get__(obj, objtype)
result = self.__class__(bound_function, self._config, self._validate_return)
+
+ # skip binding to instance when obj or objtype has __slots__ attribute
+ if hasattr(obj, '__slots__') or hasattr(objtype, '__slots__'):
+ return result
+
if self._name is not None:
if obj is not None:
object.__setattr__(obj, self._name, result)
@@ -120,3 +125,6 @@
def __repr__(self) -> str:
return f'ValidateCallWrapper({self.raw_function})'
+
+ def __eq__(self, other):
+ return self.raw_function == other.raw_function
| {"golden_diff": "diff --git a/pydantic/_internal/_validate_call.py b/pydantic/_internal/_validate_call.py\n--- a/pydantic/_internal/_validate_call.py\n+++ b/pydantic/_internal/_validate_call.py\n@@ -108,6 +108,11 @@\n \n bound_function = self.raw_function.__get__(obj, objtype)\n result = self.__class__(bound_function, self._config, self._validate_return)\n+\n+ # skip binding to instance when obj or objtype has __slots__ attribute\n+ if hasattr(obj, '__slots__') or hasattr(objtype, '__slots__'):\n+ return result\n+\n if self._name is not None:\n if obj is not None:\n object.__setattr__(obj, self._name, result)\n@@ -120,3 +125,6 @@\n \n def __repr__(self) -> str:\n return f'ValidateCallWrapper({self.raw_function})'\n+\n+ def __eq__(self, other):\n+ return self.raw_function == other.raw_function\n", "issue": "@validate_call on instance method of class with `__slots__` raises \"AttributeError: object attribute is read-only\"\n### Initial Checks\n\n- [X] I confirm that I'm using Pydantic V2\n\n### Description\n\nWhen the @validate_call decorator is applied to an instance method of a class with `__slots__` set, calls to the instance method raise an AttributeError:\r\n```\r\nTraceback (most recent call last):\r\n File \"./test_validate_call.py\", line 25, in <module>\r\n c.any_instance_method()\r\n File \"./.venv/lib/python3.8/site-packages/pydantic/_internal/_validate_call.py\", line 118, in __get__\r\n object.__setattr__(obj, self._name, result)\r\nAttributeError: 'ClassWithSlots' object attribute 'any_instance_method' is read-only\r\n```\r\n\r\nThis error is not raised when using the `@validate_arguments` decorator instead of `@validate_call`.\n\n### Example Code\n\n```Python\nfrom pydantic import validate_call\r\n\r\nclass ClassWithSlots():\r\n __slots__ = {}\r\n\r\n @validate_call\r\n def any_instance_method(self):\r\n pass\r\n\r\nc = ClassWithSlots()\r\nc.any_instance_method()\n```\n\n\n### Python, Pydantic & OS Version\n\n```Text\npydantic version: 2.3.0\r\n pydantic-core version: 2.6.3\r\n pydantic-core build: profile=release pgo=true\r\n install path: ./.venv/lib/python3.8/site-packages/pydantic\r\n python version: 3.8.10 (default, May 26 2023, 14:05:08) [GCC 9.4.0]\r\n platform: Linux-5.15.0-83-generic-x86_64-with-glibc2.29\r\n optional deps. installed: ['typing-extensions']\n```\n\n", "before_files": [{"content": "from __future__ import annotations as _annotations\n\nimport inspect\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Awaitable, Callable\n\nimport pydantic_core\n\nfrom ..config import ConfigDict\nfrom ..plugin._schema_validator import create_schema_validator\nfrom . import _generate_schema, _typing_extra\nfrom ._config import ConfigWrapper\n\n\n@dataclass\nclass CallMarker:\n function: Callable[..., Any]\n validate_return: bool\n\n\nclass ValidateCallWrapper:\n \"\"\"This is a wrapper around a function that validates the arguments passed to it, and optionally the return value.\n\n It's partially inspired by `wraps` which in turn uses `partial`, but extended to be a descriptor so\n these functions can be applied to instance methods, class methods, static methods, as well as normal functions.\n \"\"\"\n\n __slots__ = (\n 'raw_function',\n '_config',\n '_validate_return',\n '__pydantic_core_schema__',\n '__pydantic_validator__',\n '__signature__',\n '__name__',\n '__qualname__',\n '__annotations__',\n '__dict__', # required for __module__\n )\n\n def __init__(self, function: Callable[..., Any], config: ConfigDict | None, validate_return: bool):\n self.raw_function = function\n self._config = config\n self._validate_return = validate_return\n self.__signature__ = inspect.signature(function)\n if isinstance(function, partial):\n func = function.func\n self.__name__ = f'partial({func.__name__})'\n self.__qualname__ = f'partial({func.__qualname__})'\n self.__annotations__ = func.__annotations__\n self.__module__ = func.__module__\n self.__doc__ = func.__doc__\n else:\n self.__name__ = function.__name__\n self.__qualname__ = function.__qualname__\n self.__annotations__ = function.__annotations__\n self.__module__ = function.__module__\n self.__doc__ = function.__doc__\n\n namespace = _typing_extra.add_module_globals(function, None)\n config_wrapper = ConfigWrapper(config)\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n schema = gen_schema.clean_schema(gen_schema.generate_schema(function))\n self.__pydantic_core_schema__ = schema\n core_config = config_wrapper.core_config(self)\n self.__pydantic_validator__ = create_schema_validator(schema, core_config, config_wrapper.plugin_settings)\n\n if self._validate_return:\n return_type = (\n self.__signature__.return_annotation\n if self.__signature__.return_annotation is not self.__signature__.empty\n else Any\n )\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n schema = gen_schema.clean_schema(gen_schema.generate_schema(return_type))\n self.__return_pydantic_core_schema__ = schema\n validator = create_schema_validator(schema, core_config, config_wrapper.plugin_settings)\n if inspect.iscoroutinefunction(self.raw_function):\n\n async def return_val_wrapper(aw: Awaitable[Any]) -> None:\n return validator.validate_python(await aw)\n\n self.__return_pydantic_validator__ = return_val_wrapper\n else:\n self.__return_pydantic_validator__ = validator.validate_python\n else:\n self.__return_pydantic_core_schema__ = None\n self.__return_pydantic_validator__ = None\n\n self._name: str | None = None # set by __get__, used to set the instance attribute when decorating methods\n\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n res = self.__pydantic_validator__.validate_python(pydantic_core.ArgsKwargs(args, kwargs))\n if self.__return_pydantic_validator__:\n return self.__return_pydantic_validator__(res)\n return res\n\n def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:\n \"\"\"Bind the raw function and return another ValidateCallWrapper wrapping that.\"\"\"\n if obj is None:\n try:\n # Handle the case where a method is accessed as a class attribute\n return objtype.__getattribute__(objtype, self._name) # type: ignore\n except AttributeError:\n # This will happen the first time the attribute is accessed\n pass\n\n bound_function = self.raw_function.__get__(obj, objtype)\n result = self.__class__(bound_function, self._config, self._validate_return)\n if self._name is not None:\n if obj is not None:\n object.__setattr__(obj, self._name, result)\n else:\n object.__setattr__(objtype, self._name, result)\n return result\n\n def __set_name__(self, owner: Any, name: str) -> None:\n self._name = name\n\n def __repr__(self) -> str:\n return f'ValidateCallWrapper({self.raw_function})'\n", "path": "pydantic/_internal/_validate_call.py"}], "after_files": [{"content": "from __future__ import annotations as _annotations\n\nimport inspect\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Awaitable, Callable\n\nimport pydantic_core\n\nfrom ..config import ConfigDict\nfrom ..plugin._schema_validator import create_schema_validator\nfrom . import _generate_schema, _typing_extra\nfrom ._config import ConfigWrapper\n\n\n@dataclass\nclass CallMarker:\n function: Callable[..., Any]\n validate_return: bool\n\n\nclass ValidateCallWrapper:\n \"\"\"This is a wrapper around a function that validates the arguments passed to it, and optionally the return value.\n\n It's partially inspired by `wraps` which in turn uses `partial`, but extended to be a descriptor so\n these functions can be applied to instance methods, class methods, static methods, as well as normal functions.\n \"\"\"\n\n __slots__ = (\n 'raw_function',\n '_config',\n '_validate_return',\n '__pydantic_core_schema__',\n '__pydantic_validator__',\n '__signature__',\n '__name__',\n '__qualname__',\n '__annotations__',\n '__dict__', # required for __module__\n )\n\n def __init__(self, function: Callable[..., Any], config: ConfigDict | None, validate_return: bool):\n self.raw_function = function\n self._config = config\n self._validate_return = validate_return\n self.__signature__ = inspect.signature(function)\n if isinstance(function, partial):\n func = function.func\n self.__name__ = f'partial({func.__name__})'\n self.__qualname__ = f'partial({func.__qualname__})'\n self.__annotations__ = func.__annotations__\n self.__module__ = func.__module__\n self.__doc__ = func.__doc__\n else:\n self.__name__ = function.__name__\n self.__qualname__ = function.__qualname__\n self.__annotations__ = function.__annotations__\n self.__module__ = function.__module__\n self.__doc__ = function.__doc__\n\n namespace = _typing_extra.add_module_globals(function, None)\n config_wrapper = ConfigWrapper(config)\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n schema = gen_schema.clean_schema(gen_schema.generate_schema(function))\n self.__pydantic_core_schema__ = schema\n core_config = config_wrapper.core_config(self)\n self.__pydantic_validator__ = create_schema_validator(schema, core_config, config_wrapper.plugin_settings)\n\n if self._validate_return:\n return_type = (\n self.__signature__.return_annotation\n if self.__signature__.return_annotation is not self.__signature__.empty\n else Any\n )\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n schema = gen_schema.clean_schema(gen_schema.generate_schema(return_type))\n self.__return_pydantic_core_schema__ = schema\n validator = create_schema_validator(schema, core_config, config_wrapper.plugin_settings)\n if inspect.iscoroutinefunction(self.raw_function):\n\n async def return_val_wrapper(aw: Awaitable[Any]) -> None:\n return validator.validate_python(await aw)\n\n self.__return_pydantic_validator__ = return_val_wrapper\n else:\n self.__return_pydantic_validator__ = validator.validate_python\n else:\n self.__return_pydantic_core_schema__ = None\n self.__return_pydantic_validator__ = None\n\n self._name: str | None = None # set by __get__, used to set the instance attribute when decorating methods\n\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n res = self.__pydantic_validator__.validate_python(pydantic_core.ArgsKwargs(args, kwargs))\n if self.__return_pydantic_validator__:\n return self.__return_pydantic_validator__(res)\n return res\n\n def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:\n \"\"\"Bind the raw function and return another ValidateCallWrapper wrapping that.\"\"\"\n if obj is None:\n try:\n # Handle the case where a method is accessed as a class attribute\n return objtype.__getattribute__(objtype, self._name) # type: ignore\n except AttributeError:\n # This will happen the first time the attribute is accessed\n pass\n\n bound_function = self.raw_function.__get__(obj, objtype)\n result = self.__class__(bound_function, self._config, self._validate_return)\n\n # skip binding to instance when obj or objtype has __slots__ attribute\n if hasattr(obj, '__slots__') or hasattr(objtype, '__slots__'):\n return result\n\n if self._name is not None:\n if obj is not None:\n object.__setattr__(obj, self._name, result)\n else:\n object.__setattr__(objtype, self._name, result)\n return result\n\n def __set_name__(self, owner: Any, name: str) -> None:\n self._name = name\n\n def __repr__(self) -> str:\n return f'ValidateCallWrapper({self.raw_function})'\n\n def __eq__(self, other):\n return self.raw_function == other.raw_function\n", "path": "pydantic/_internal/_validate_call.py"}]} | 2,015 | 231 |
gh_patches_debug_31126 | rasdani/github-patches | git_diff | mlcommons__GaNDLF-480 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Vips installation is a pain and should be made optional, or use something else entirely
**Is your feature request related to a problem? Please describe.**
VIPS (libvips/pyvips) is a major pain to install and use, since it does not install all dependencies from pypi.
**Describe the solution you'd like**
Currently, we are only using VIPS for a resize operation in the unit tests, and this should be either made optional or changed to use another library.
**Describe alternatives you've considered**
Nothing else, since VIPS is just a pain.
**Additional context**
N.A.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `GANDLF/utils/generic.py`
Content:
```
1 import os, datetime, sys
2 import numpy as np
3
4
5 def checkPatchDivisibility(patch_size, number=16):
6 """
7 This function checks the divisibility of a numpy array or integer for architectural integrity
8
9 Args:
10 patch_size (numpy.array): The patch size for checking.
11 number (int, optional): The number to check divisibility for. Defaults to 16.
12
13 Returns:
14 bool: If all elements of array are divisible or not, after taking 2D patches into account.
15 """
16 if isinstance(patch_size, int):
17 patch_size_to_check = np.array(patch_size)
18 else:
19 patch_size_to_check = patch_size
20 # for 2D, don't check divisibility of last dimension
21 if patch_size_to_check[-1] == 1:
22 patch_size_to_check = patch_size_to_check[:-1]
23 # for 2D, don't check divisibility of first dimension
24 elif patch_size_to_check[0] == 1:
25 patch_size_to_check = patch_size_to_check[1:]
26 if np.count_nonzero(np.remainder(patch_size_to_check, number)) > 0:
27 return False
28
29 # adding check to address https://github.com/CBICA/GaNDLF/issues/53
30 # there is quite possibly a better way to do this
31 unique = np.unique(patch_size_to_check)
32 if (unique.shape[0] == 1) and (unique[0] <= number):
33 return False
34 return True
35
36
37 def fix_paths(cwd):
38 """
39 This function takes the current working directory of the script (which is required for VIPS) and sets up all the paths correctly
40
41 Args:
42 cwd (str): The current working directory.
43 """
44 if os.name == "nt": # proceed for windows
45 vipshome = os.path.join(cwd, "vips/vips-dev-8.10/bin")
46 os.environ["PATH"] = vipshome + ";" + os.environ["PATH"]
47
48
49 def get_date_time():
50 """
51 Get a well-parsed date string
52
53 Returns:
54 str: The date in format YYYY/MM/DD::HH:MM:SS
55 """
56 now = datetime.datetime.now().strftime("%Y/%m/%d::%H:%M:%S")
57 return now
58
59
60 def get_unique_timestamp():
61 """
62 Get a well-parsed timestamp string to be used for unique filenames
63
64 Returns:
65 str: The date in format YYYYMMDD_HHMMSS
66 """
67 now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
68 return now
69
70
71 def get_filename_extension_sanitized(filename):
72 """
73 This function returns the extension of the filename with leading and trailing characters removed.
74 Args:
75 filename (str): The filename to be processed.
76 Returns:
77 str: The filename with extension removed.
78 """
79 _, ext = os.path.splitext(filename)
80 # if .gz or .nii file is detected, always return .nii.gz
81 if (ext == ".gz") or (ext == ".nii"):
82 ext = ".nii.gz"
83 return ext
84
85
86 def parse_version(version_string):
87 """
88 Parses version string, discards last identifier (NR/alpha/beta) and returns an integer for comparison.
89
90 Args:
91 version_string (str): The string to be parsed.
92
93 Returns:
94 int: The version number.
95 """
96 version_string_split = version_string.split(".")
97 if len(version_string_split) > 3:
98 del version_string_split[-1]
99 return int("".join(version_string_split))
100
101
102 def version_check(version_from_config, version_to_check):
103 """
104 This function checks if the version of the config file is compatible with the version of the code.
105
106 Args:
107 version_from_config (str): The version of the config file.
108 version_to_check (str): The version of the code or model to check.
109
110 Returns:
111 bool: If the version of the config file is compatible with the version of the code.
112 """
113 version_to_check_int = parse_version(version_to_check)
114 min_ver = parse_version(version_from_config["minimum"])
115 max_ver = parse_version(version_from_config["maximum"])
116 if (min_ver > version_to_check_int) or (max_ver < version_to_check_int):
117 sys.exit(
118 "Incompatible version of GaNDLF detected ("
119 + str(version_to_check_int)
120 + ")"
121 )
122
123 return True
124
125
126 def checkPatchDimensions(patch_size, numlay):
127 """
128 This function checks the divisibility of a numpy array or integer for architectural integrity
129
130 Args:
131 patch_size (numpy.array): The patch size for checking.
132 number (int, optional): The number to check divisibility for. Defaults to 16.
133
134 Returns:
135 int: Largest multiple of 2 (less than or equal to numlay) that each element of patch size is divisible by to yield int >= 2
136 """
137 if isinstance(patch_size, int):
138 patch_size_to_check = np.array(patch_size)
139 else:
140 patch_size_to_check = patch_size
141 # for 2D, don't check divisibility of last dimension
142 if patch_size_to_check[-1] == 1:
143 patch_size_to_check = patch_size_to_check[:-1]
144
145 if all(
146 [x >= 2 ** (numlay + 1) and x % 2**numlay == 0 for x in patch_size_to_check]
147 ):
148 return numlay
149 else:
150 # base2 = np.floor(np.log2(patch_size_to_check))
151 base2 = np.array([getBase2(x) for x in patch_size_to_check])
152 remain = patch_size_to_check / 2**base2 # check that at least 1
153
154 layers = np.where(remain == 1, base2 - 1, base2)
155 return int(np.min(layers))
156
157
158 def getBase2(num):
159 # helper for checkPatchDimensions (returns the largest multiple of 2 that num is evenly divisible by)
160 base = 0
161 while num % 2 == 0:
162 num = num / 2
163 base = base + 1
164 return base
165
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 """The setup script."""
4
5
6 import os
7 from setuptools import setup, find_packages
8 from setuptools.command.install import install
9 from setuptools.command.develop import develop
10 from setuptools.command.egg_info import egg_info
11
12 with open("README.md") as readme_file:
13 readme = readme_file.read()
14
15
16 def git_submodule_update():
17 ## submodule update
18 os.system("git submodule update --init --recursive")
19
20
21 class CustomInstallCommand(install):
22 def run(self):
23 install.run(self)
24 git_submodule_update()
25
26
27 class CustomDevelopCommand(develop):
28 def run(self):
29 develop.run(self)
30 git_submodule_update()
31
32
33 class CustomEggInfoCommand(egg_info):
34 def run(self):
35 egg_info.run(self)
36 git_submodule_update()
37
38
39 # read version.py
40 import sys, re
41
42 try:
43 filepath = "GANDLF/version.py"
44 version_file = open(filepath)
45 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
46
47 except Exception as error:
48 __version__ = "0.0.1"
49 sys.stderr.write("Warning: Could not open '%s' due %s\n" % (filepath, error))
50
51 requirements = [
52 "black",
53 "numpy==1.22.0",
54 "scipy",
55 "SimpleITK!=2.0.*",
56 "torchvision",
57 "tqdm",
58 "torchio==0.18.57",
59 "pandas",
60 "pylint",
61 "scikit-learn>=0.23.2",
62 "scikit-image>=0.19.1",
63 'pickle5>=0.0.11; python_version < "3.8.0"',
64 "setuptools",
65 "seaborn",
66 "pyyaml",
67 "tiffslide",
68 "matplotlib",
69 "requests>=2.25.0",
70 "pyvips==2.2.1",
71 "pytest",
72 "coverage",
73 "pytest-cov",
74 "psutil",
75 "medcam",
76 "opencv-python",
77 "torchmetrics==0.5.1", # newer versions have changed api for f1 invocation
78 "OpenPatchMiner==0.1.8",
79 "zarr==2.10.3",
80 "pydicom",
81 "onnx",
82 "torchinfo==1.7.0",
83 ]
84
85 # pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389
86 if sys.platform == "darwin":
87 requirements.append("torch==1.9.0")
88 else:
89 requirements.append("torch==1.8.2")
90
91 setup(
92 name="GANDLF",
93 version=__version__,
94 author="Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun Güley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos", # alphabetical order
95 author_email="[email protected]",
96 python_requires=">=3.7",
97 packages=find_packages(),
98 cmdclass={ # this ensures git_submodule_update is called during install
99 "install": CustomInstallCommand,
100 "develop": CustomDevelopCommand,
101 "egg_info": CustomEggInfoCommand,
102 },
103 scripts=[
104 "gandlf_run",
105 "gandlf_constructCSV",
106 "gandlf_collectStats",
107 "gandlf_patchMiner",
108 "gandlf_preprocess",
109 "gandlf_anonymizer",
110 "gandlf_verifyInstall",
111 ],
112 classifiers=[
113 "Development Status :: 3 - Alpha",
114 "Intended Audience :: Science/Research",
115 "License :: OSI Approved :: BSD License",
116 "Natural Language :: English",
117 "Operating System :: OS Independent",
118 "Programming Language :: Python :: 3.7",
119 "Programming Language :: Python :: 3.8",
120 "Programming Language :: Python :: 3.9",
121 "Topic :: Scientific/Engineering :: Medical Science Apps",
122 ],
123 description=(
124 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging."
125 ),
126 install_requires=requirements,
127 license="BSD-3-Clause License",
128 long_description=readme,
129 long_description_content_type="text/markdown",
130 include_package_data=True,
131 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging",
132 zip_safe=False,
133 )
134
135 ## windows vips installation
136 if os.name == "nt": # proceed for windows
137 from pathlib import Path
138
139 # download and extract if main dll is absent
140 if not Path("./vips/vips-dev-8.10/bin/libvips-42.dll").exists():
141 print("Downloading and extracting VIPS for Windows")
142 url = "https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip"
143 zip_to_extract = "./vips.zip"
144 import urllib.request, zipfile
145
146 urllib.request.urlretrieve(url, zip_to_extract)
147 z = zipfile.ZipFile(zip_to_extract)
148 z.extractall("./vips")
149 z.close()
150 os.remove(zip_to_extract)
151
```
Path: `GANDLF/utils/__init__.py`
Content:
```
1 import os
2
3 # hides torchio citation request, see https://github.com/fepegar/torchio/issues/235
4 os.environ["TORCHIO_HIDE_CITATION_PROMPT"] = "1"
5
6 from .imaging import (
7 resize_image,
8 resample_image,
9 perform_sanity_check_on_subject,
10 write_training_patches,
11 )
12
13 from .tensor import (
14 one_hot,
15 reverse_one_hot,
16 send_model_to_device,
17 get_class_imbalance_weights,
18 get_class_imbalance_weights_segmentation,
19 get_class_imbalance_weights_classification,
20 get_linear_interpolation_mode,
21 print_model_summary,
22 )
23
24 from .write_parse import (
25 writeTrainingCSV,
26 parseTrainingCSV,
27 get_dataframe,
28 )
29
30 from .parameter_processing import (
31 populate_header_in_parameters,
32 find_problem_type,
33 populate_channel_keys_in_params,
34 )
35
36 from .generic import (
37 fix_paths,
38 get_date_time,
39 get_unique_timestamp,
40 get_filename_extension_sanitized,
41 version_check,
42 )
43
44 from .modelio import (
45 best_model_path_end,
46 load_model,
47 load_ov_model,
48 save_model,
49 )
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/GANDLF/utils/__init__.py b/GANDLF/utils/__init__.py
--- a/GANDLF/utils/__init__.py
+++ b/GANDLF/utils/__init__.py
@@ -34,7 +34,6 @@
)
from .generic import (
- fix_paths,
get_date_time,
get_unique_timestamp,
get_filename_extension_sanitized,
diff --git a/GANDLF/utils/generic.py b/GANDLF/utils/generic.py
--- a/GANDLF/utils/generic.py
+++ b/GANDLF/utils/generic.py
@@ -34,18 +34,6 @@
return True
-def fix_paths(cwd):
- """
- This function takes the current working directory of the script (which is required for VIPS) and sets up all the paths correctly
-
- Args:
- cwd (str): The current working directory.
- """
- if os.name == "nt": # proceed for windows
- vipshome = os.path.join(cwd, "vips/vips-dev-8.10/bin")
- os.environ["PATH"] = vipshome + ";" + os.environ["PATH"]
-
-
def get_date_time():
"""
Get a well-parsed date string
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -67,7 +67,6 @@
"tiffslide",
"matplotlib",
"requests>=2.25.0",
- "pyvips==2.2.1",
"pytest",
"coverage",
"pytest-cov",
@@ -131,20 +130,3 @@
keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging",
zip_safe=False,
)
-
-## windows vips installation
-if os.name == "nt": # proceed for windows
- from pathlib import Path
-
- # download and extract if main dll is absent
- if not Path("./vips/vips-dev-8.10/bin/libvips-42.dll").exists():
- print("Downloading and extracting VIPS for Windows")
- url = "https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip"
- zip_to_extract = "./vips.zip"
- import urllib.request, zipfile
-
- urllib.request.urlretrieve(url, zip_to_extract)
- z = zipfile.ZipFile(zip_to_extract)
- z.extractall("./vips")
- z.close()
- os.remove(zip_to_extract)
| {"golden_diff": "diff --git a/GANDLF/utils/__init__.py b/GANDLF/utils/__init__.py\n--- a/GANDLF/utils/__init__.py\n+++ b/GANDLF/utils/__init__.py\n@@ -34,7 +34,6 @@\n )\n \n from .generic import (\n- fix_paths,\n get_date_time,\n get_unique_timestamp,\n get_filename_extension_sanitized,\ndiff --git a/GANDLF/utils/generic.py b/GANDLF/utils/generic.py\n--- a/GANDLF/utils/generic.py\n+++ b/GANDLF/utils/generic.py\n@@ -34,18 +34,6 @@\n return True\n \n \n-def fix_paths(cwd):\n- \"\"\"\n- This function takes the current working directory of the script (which is required for VIPS) and sets up all the paths correctly\n-\n- Args:\n- cwd (str): The current working directory.\n- \"\"\"\n- if os.name == \"nt\": # proceed for windows\n- vipshome = os.path.join(cwd, \"vips/vips-dev-8.10/bin\")\n- os.environ[\"PATH\"] = vipshome + \";\" + os.environ[\"PATH\"]\n-\n-\n def get_date_time():\n \"\"\"\n Get a well-parsed date string\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -67,7 +67,6 @@\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n- \"pyvips==2.2.1\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n@@ -131,20 +130,3 @@\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n )\n-\n-## windows vips installation\n-if os.name == \"nt\": # proceed for windows\n- from pathlib import Path\n-\n- # download and extract if main dll is absent\n- if not Path(\"./vips/vips-dev-8.10/bin/libvips-42.dll\").exists():\n- print(\"Downloading and extracting VIPS for Windows\")\n- url = \"https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip\"\n- zip_to_extract = \"./vips.zip\"\n- import urllib.request, zipfile\n-\n- urllib.request.urlretrieve(url, zip_to_extract)\n- z = zipfile.ZipFile(zip_to_extract)\n- z.extractall(\"./vips\")\n- z.close()\n- os.remove(zip_to_extract)\n", "issue": "Vips installation is a pain and should be made optional, or use something else entirely\n**Is your feature request related to a problem? Please describe.**\r\nVIPS (libvips/pyvips) is a major pain to install and use, since it does not install all dependencies from pypi.\r\n\r\n**Describe the solution you'd like**\r\nCurrently, we are only using VIPS for a resize operation in the unit tests, and this should be either made optional or changed to use another library.\r\n\r\n**Describe alternatives you've considered**\r\nNothing else, since VIPS is just a pain.\r\n\r\n**Additional context**\r\nN.A.\r\n\n", "before_files": [{"content": "import os, datetime, sys\nimport numpy as np\n\n\ndef checkPatchDivisibility(patch_size, number=16):\n \"\"\"\n This function checks the divisibility of a numpy array or integer for architectural integrity\n\n Args:\n patch_size (numpy.array): The patch size for checking.\n number (int, optional): The number to check divisibility for. Defaults to 16.\n\n Returns:\n bool: If all elements of array are divisible or not, after taking 2D patches into account.\n \"\"\"\n if isinstance(patch_size, int):\n patch_size_to_check = np.array(patch_size)\n else:\n patch_size_to_check = patch_size\n # for 2D, don't check divisibility of last dimension\n if patch_size_to_check[-1] == 1:\n patch_size_to_check = patch_size_to_check[:-1]\n # for 2D, don't check divisibility of first dimension\n elif patch_size_to_check[0] == 1:\n patch_size_to_check = patch_size_to_check[1:]\n if np.count_nonzero(np.remainder(patch_size_to_check, number)) > 0:\n return False\n\n # adding check to address https://github.com/CBICA/GaNDLF/issues/53\n # there is quite possibly a better way to do this\n unique = np.unique(patch_size_to_check)\n if (unique.shape[0] == 1) and (unique[0] <= number):\n return False\n return True\n\n\ndef fix_paths(cwd):\n \"\"\"\n This function takes the current working directory of the script (which is required for VIPS) and sets up all the paths correctly\n\n Args:\n cwd (str): The current working directory.\n \"\"\"\n if os.name == \"nt\": # proceed for windows\n vipshome = os.path.join(cwd, \"vips/vips-dev-8.10/bin\")\n os.environ[\"PATH\"] = vipshome + \";\" + os.environ[\"PATH\"]\n\n\ndef get_date_time():\n \"\"\"\n Get a well-parsed date string\n\n Returns:\n str: The date in format YYYY/MM/DD::HH:MM:SS\n \"\"\"\n now = datetime.datetime.now().strftime(\"%Y/%m/%d::%H:%M:%S\")\n return now\n\n\ndef get_unique_timestamp():\n \"\"\"\n Get a well-parsed timestamp string to be used for unique filenames\n\n Returns:\n str: The date in format YYYYMMDD_HHMMSS\n \"\"\"\n now = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n return now\n\n\ndef get_filename_extension_sanitized(filename):\n \"\"\"\n This function returns the extension of the filename with leading and trailing characters removed.\n Args:\n filename (str): The filename to be processed.\n Returns:\n str: The filename with extension removed.\n \"\"\"\n _, ext = os.path.splitext(filename)\n # if .gz or .nii file is detected, always return .nii.gz\n if (ext == \".gz\") or (ext == \".nii\"):\n ext = \".nii.gz\"\n return ext\n\n\ndef parse_version(version_string):\n \"\"\"\n Parses version string, discards last identifier (NR/alpha/beta) and returns an integer for comparison.\n\n Args:\n version_string (str): The string to be parsed.\n\n Returns:\n int: The version number.\n \"\"\"\n version_string_split = version_string.split(\".\")\n if len(version_string_split) > 3:\n del version_string_split[-1]\n return int(\"\".join(version_string_split))\n\n\ndef version_check(version_from_config, version_to_check):\n \"\"\"\n This function checks if the version of the config file is compatible with the version of the code.\n\n Args:\n version_from_config (str): The version of the config file.\n version_to_check (str): The version of the code or model to check.\n\n Returns:\n bool: If the version of the config file is compatible with the version of the code.\n \"\"\"\n version_to_check_int = parse_version(version_to_check)\n min_ver = parse_version(version_from_config[\"minimum\"])\n max_ver = parse_version(version_from_config[\"maximum\"])\n if (min_ver > version_to_check_int) or (max_ver < version_to_check_int):\n sys.exit(\n \"Incompatible version of GaNDLF detected (\"\n + str(version_to_check_int)\n + \")\"\n )\n\n return True\n\n\ndef checkPatchDimensions(patch_size, numlay):\n \"\"\"\n This function checks the divisibility of a numpy array or integer for architectural integrity\n\n Args:\n patch_size (numpy.array): The patch size for checking.\n number (int, optional): The number to check divisibility for. Defaults to 16.\n\n Returns:\n int: Largest multiple of 2 (less than or equal to numlay) that each element of patch size is divisible by to yield int >= 2\n \"\"\"\n if isinstance(patch_size, int):\n patch_size_to_check = np.array(patch_size)\n else:\n patch_size_to_check = patch_size\n # for 2D, don't check divisibility of last dimension\n if patch_size_to_check[-1] == 1:\n patch_size_to_check = patch_size_to_check[:-1]\n\n if all(\n [x >= 2 ** (numlay + 1) and x % 2**numlay == 0 for x in patch_size_to_check]\n ):\n return numlay\n else:\n # base2 = np.floor(np.log2(patch_size_to_check))\n base2 = np.array([getBase2(x) for x in patch_size_to_check])\n remain = patch_size_to_check / 2**base2 # check that at least 1\n\n layers = np.where(remain == 1, base2 - 1, base2)\n return int(np.min(layers))\n\n\ndef getBase2(num):\n # helper for checkPatchDimensions (returns the largest multiple of 2 that num is evenly divisible by)\n base = 0\n while num % 2 == 0:\n num = num / 2\n base = base + 1\n return base\n", "path": "GANDLF/utils/generic.py"}, {"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\n\ndef git_submodule_update():\n ## submodule update\n os.system(\"git submodule update --init --recursive\")\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n git_submodule_update()\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n git_submodule_update()\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n git_submodule_update()\n\n\n# read version.py\nimport sys, re\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.22.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.57\",\n \"pandas\",\n \"pylint\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n 'pickle5>=0.0.11; python_version < \"3.8.0\"',\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pyvips==2.2.1\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==0.5.1\", # newer versions have changed api for f1 invocation\n \"OpenPatchMiner==0.1.8\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n]\n\n# pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389\nif sys.platform == \"darwin\":\n requirements.append(\"torch==1.9.0\")\nelse:\n requirements.append(\"torch==1.8.2\")\n\nsetup(\n name=\"GANDLF\",\n version=__version__,\n author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun G\u00fcley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n packages=find_packages(),\n cmdclass={ # this ensures git_submodule_update is called during install\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"BSD-3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n)\n\n## windows vips installation\nif os.name == \"nt\": # proceed for windows\n from pathlib import Path\n\n # download and extract if main dll is absent\n if not Path(\"./vips/vips-dev-8.10/bin/libvips-42.dll\").exists():\n print(\"Downloading and extracting VIPS for Windows\")\n url = \"https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip\"\n zip_to_extract = \"./vips.zip\"\n import urllib.request, zipfile\n\n urllib.request.urlretrieve(url, zip_to_extract)\n z = zipfile.ZipFile(zip_to_extract)\n z.extractall(\"./vips\")\n z.close()\n os.remove(zip_to_extract)\n", "path": "setup.py"}, {"content": "import os\n\n# hides torchio citation request, see https://github.com/fepegar/torchio/issues/235\nos.environ[\"TORCHIO_HIDE_CITATION_PROMPT\"] = \"1\"\n\nfrom .imaging import (\n resize_image,\n resample_image,\n perform_sanity_check_on_subject,\n write_training_patches,\n)\n\nfrom .tensor import (\n one_hot,\n reverse_one_hot,\n send_model_to_device,\n get_class_imbalance_weights,\n get_class_imbalance_weights_segmentation,\n get_class_imbalance_weights_classification,\n get_linear_interpolation_mode,\n print_model_summary,\n)\n\nfrom .write_parse import (\n writeTrainingCSV,\n parseTrainingCSV,\n get_dataframe,\n)\n\nfrom .parameter_processing import (\n populate_header_in_parameters,\n find_problem_type,\n populate_channel_keys_in_params,\n)\n\nfrom .generic import (\n fix_paths,\n get_date_time,\n get_unique_timestamp,\n get_filename_extension_sanitized,\n version_check,\n)\n\nfrom .modelio import (\n best_model_path_end,\n load_model,\n load_ov_model,\n save_model,\n)\n", "path": "GANDLF/utils/__init__.py"}], "after_files": [{"content": "import os, datetime, sys\nimport numpy as np\n\n\ndef checkPatchDivisibility(patch_size, number=16):\n \"\"\"\n This function checks the divisibility of a numpy array or integer for architectural integrity\n\n Args:\n patch_size (numpy.array): The patch size for checking.\n number (int, optional): The number to check divisibility for. Defaults to 16.\n\n Returns:\n bool: If all elements of array are divisible or not, after taking 2D patches into account.\n \"\"\"\n if isinstance(patch_size, int):\n patch_size_to_check = np.array(patch_size)\n else:\n patch_size_to_check = patch_size\n # for 2D, don't check divisibility of last dimension\n if patch_size_to_check[-1] == 1:\n patch_size_to_check = patch_size_to_check[:-1]\n # for 2D, don't check divisibility of first dimension\n elif patch_size_to_check[0] == 1:\n patch_size_to_check = patch_size_to_check[1:]\n if np.count_nonzero(np.remainder(patch_size_to_check, number)) > 0:\n return False\n\n # adding check to address https://github.com/CBICA/GaNDLF/issues/53\n # there is quite possibly a better way to do this\n unique = np.unique(patch_size_to_check)\n if (unique.shape[0] == 1) and (unique[0] <= number):\n return False\n return True\n\n\ndef get_date_time():\n \"\"\"\n Get a well-parsed date string\n\n Returns:\n str: The date in format YYYY/MM/DD::HH:MM:SS\n \"\"\"\n now = datetime.datetime.now().strftime(\"%Y/%m/%d::%H:%M:%S\")\n return now\n\n\ndef get_unique_timestamp():\n \"\"\"\n Get a well-parsed timestamp string to be used for unique filenames\n\n Returns:\n str: The date in format YYYYMMDD_HHMMSS\n \"\"\"\n now = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n return now\n\n\ndef get_filename_extension_sanitized(filename):\n \"\"\"\n This function returns the extension of the filename with leading and trailing characters removed.\n Args:\n filename (str): The filename to be processed.\n Returns:\n str: The filename with extension removed.\n \"\"\"\n _, ext = os.path.splitext(filename)\n # if .gz or .nii file is detected, always return .nii.gz\n if (ext == \".gz\") or (ext == \".nii\"):\n ext = \".nii.gz\"\n return ext\n\n\ndef parse_version(version_string):\n \"\"\"\n Parses version string, discards last identifier (NR/alpha/beta) and returns an integer for comparison.\n\n Args:\n version_string (str): The string to be parsed.\n\n Returns:\n int: The version number.\n \"\"\"\n version_string_split = version_string.split(\".\")\n if len(version_string_split) > 3:\n del version_string_split[-1]\n return int(\"\".join(version_string_split))\n\n\ndef version_check(version_from_config, version_to_check):\n \"\"\"\n This function checks if the version of the config file is compatible with the version of the code.\n\n Args:\n version_from_config (str): The version of the config file.\n version_to_check (str): The version of the code or model to check.\n\n Returns:\n bool: If the version of the config file is compatible with the version of the code.\n \"\"\"\n version_to_check_int = parse_version(version_to_check)\n min_ver = parse_version(version_from_config[\"minimum\"])\n max_ver = parse_version(version_from_config[\"maximum\"])\n if (min_ver > version_to_check_int) or (max_ver < version_to_check_int):\n sys.exit(\n \"Incompatible version of GaNDLF detected (\"\n + str(version_to_check_int)\n + \")\"\n )\n\n return True\n\n\ndef checkPatchDimensions(patch_size, numlay):\n \"\"\"\n This function checks the divisibility of a numpy array or integer for architectural integrity\n\n Args:\n patch_size (numpy.array): The patch size for checking.\n number (int, optional): The number to check divisibility for. Defaults to 16.\n\n Returns:\n int: Largest multiple of 2 (less than or equal to numlay) that each element of patch size is divisible by to yield int >= 2\n \"\"\"\n if isinstance(patch_size, int):\n patch_size_to_check = np.array(patch_size)\n else:\n patch_size_to_check = patch_size\n # for 2D, don't check divisibility of last dimension\n if patch_size_to_check[-1] == 1:\n patch_size_to_check = patch_size_to_check[:-1]\n\n if all(\n [x >= 2 ** (numlay + 1) and x % 2**numlay == 0 for x in patch_size_to_check]\n ):\n return numlay\n else:\n # base2 = np.floor(np.log2(patch_size_to_check))\n base2 = np.array([getBase2(x) for x in patch_size_to_check])\n remain = patch_size_to_check / 2**base2 # check that at least 1\n\n layers = np.where(remain == 1, base2 - 1, base2)\n return int(np.min(layers))\n\n\ndef getBase2(num):\n # helper for checkPatchDimensions (returns the largest multiple of 2 that num is evenly divisible by)\n base = 0\n while num % 2 == 0:\n num = num / 2\n base = base + 1\n return base\n", "path": "GANDLF/utils/generic.py"}, {"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\n\ndef git_submodule_update():\n ## submodule update\n os.system(\"git submodule update --init --recursive\")\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n git_submodule_update()\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n git_submodule_update()\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n git_submodule_update()\n\n\n# read version.py\nimport sys, re\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.22.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.57\",\n \"pandas\",\n \"pylint\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n 'pickle5>=0.0.11; python_version < \"3.8.0\"',\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==0.5.1\", # newer versions have changed api for f1 invocation\n \"OpenPatchMiner==0.1.8\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n]\n\n# pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389\nif sys.platform == \"darwin\":\n requirements.append(\"torch==1.9.0\")\nelse:\n requirements.append(\"torch==1.8.2\")\n\nsetup(\n name=\"GANDLF\",\n version=__version__,\n author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun G\u00fcley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n packages=find_packages(),\n cmdclass={ # this ensures git_submodule_update is called during install\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"BSD-3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n)\n", "path": "setup.py"}, {"content": "import os\n\n# hides torchio citation request, see https://github.com/fepegar/torchio/issues/235\nos.environ[\"TORCHIO_HIDE_CITATION_PROMPT\"] = \"1\"\n\nfrom .imaging import (\n resize_image,\n resample_image,\n perform_sanity_check_on_subject,\n write_training_patches,\n)\n\nfrom .tensor import (\n one_hot,\n reverse_one_hot,\n send_model_to_device,\n get_class_imbalance_weights,\n get_class_imbalance_weights_segmentation,\n get_class_imbalance_weights_classification,\n get_linear_interpolation_mode,\n print_model_summary,\n)\n\nfrom .write_parse import (\n writeTrainingCSV,\n parseTrainingCSV,\n get_dataframe,\n)\n\nfrom .parameter_processing import (\n populate_header_in_parameters,\n find_problem_type,\n populate_channel_keys_in_params,\n)\n\nfrom .generic import (\n get_date_time,\n get_unique_timestamp,\n get_filename_extension_sanitized,\n version_check,\n)\n\nfrom .modelio import (\n best_model_path_end,\n load_model,\n load_ov_model,\n save_model,\n)\n", "path": "GANDLF/utils/__init__.py"}]} | 4,039 | 582 |
gh_patches_debug_12165 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-4599 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cannot download Daum tvpot url
cannot download video from Daum tvpot which is Korean internet portal service
http://tvpot.daum.net/v/vfb45BtrWBWWHKEkOwXEghB
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/daum.py`
Content:
```
1 # encoding: utf-8
2
3 from __future__ import unicode_literals
4
5 import re
6
7 from .common import InfoExtractor
8 from ..compat import (
9 compat_urllib_parse,
10 )
11
12
13 class DaumIE(InfoExtractor):
14 _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)'
15 IE_NAME = 'daum.net'
16
17 _TESTS = [{
18 'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
19 'info_dict': {
20 'id': '52554690',
21 'ext': 'mp4',
22 'title': 'DOTA 2GETHER 시즌2 6회 - 2부',
23 'description': 'DOTA 2GETHER 시즌2 6회 - 2부',
24 'upload_date': '20130831',
25 'duration': 3868,
26 },
27 }, {
28 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
29 'only_matching': True,
30 }, {
31 'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',
32 'only_matching': True,
33 }]
34
35 def _real_extract(self, url):
36 mobj = re.match(self._VALID_URL, url)
37 video_id = mobj.group('id')
38 canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
39 webpage = self._download_webpage(canonical_url, video_id)
40 full_id = self._search_regex(
41 r'<iframe src="http://videofarm.daum.net/controller/video/viewer/Video.html\?.*?vid=(.+?)[&"]',
42 webpage, 'full id')
43 query = compat_urllib_parse.urlencode({'vid': full_id})
44 info = self._download_xml(
45 'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id,
46 'Downloading video info')
47 urls = self._download_xml(
48 'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
49 video_id, 'Downloading video formats info')
50
51 formats = []
52 for format_el in urls.findall('result/output_list/output_list'):
53 profile = format_el.attrib['profile']
54 format_query = compat_urllib_parse.urlencode({
55 'vid': full_id,
56 'profile': profile,
57 })
58 url_doc = self._download_xml(
59 'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,
60 video_id, note='Downloading video data for %s format' % profile)
61 format_url = url_doc.find('result/url').text
62 formats.append({
63 'url': format_url,
64 'format_id': profile,
65 })
66
67 return {
68 'id': video_id,
69 'title': info.find('TITLE').text,
70 'formats': formats,
71 'thumbnail': self._og_search_thumbnail(webpage),
72 'description': info.find('CONTENTS').text,
73 'duration': int(info.find('DURATION').text),
74 'upload_date': info.find('REGDTTM').text[:8],
75 }
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/youtube_dl/extractor/daum.py b/youtube_dl/extractor/daum.py
--- a/youtube_dl/extractor/daum.py
+++ b/youtube_dl/extractor/daum.py
@@ -38,7 +38,7 @@
canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
webpage = self._download_webpage(canonical_url, video_id)
full_id = self._search_regex(
- r'<iframe src="http://videofarm.daum.net/controller/video/viewer/Video.html\?.*?vid=(.+?)[&"]',
+ r'src=["\']http://videofarm.daum.net/controller/video/viewer/Video.html\?.*?vid=(.+?)[&"\']',
webpage, 'full id')
query = compat_urllib_parse.urlencode({'vid': full_id})
info = self._download_xml(
| {"golden_diff": "diff --git a/youtube_dl/extractor/daum.py b/youtube_dl/extractor/daum.py\n--- a/youtube_dl/extractor/daum.py\n+++ b/youtube_dl/extractor/daum.py\n@@ -38,7 +38,7 @@\n canonical_url = 'http://tvpot.daum.net/v/%s' % video_id\n webpage = self._download_webpage(canonical_url, video_id)\n full_id = self._search_regex(\n- r'<iframe src=\"http://videofarm.daum.net/controller/video/viewer/Video.html\\?.*?vid=(.+?)[&\"]',\n+ r'src=[\"\\']http://videofarm.daum.net/controller/video/viewer/Video.html\\?.*?vid=(.+?)[&\"\\']',\n webpage, 'full id')\n query = compat_urllib_parse.urlencode({'vid': full_id})\n info = self._download_xml(\n", "issue": "cannot download Daum tvpot url\ncannot download video from Daum tvpot which is Korean internet portal service\nhttp://tvpot.daum.net/v/vfb45BtrWBWWHKEkOwXEghB\n\n", "before_files": [{"content": "# encoding: utf-8\n\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_urllib_parse,\n)\n\n\nclass DaumIE(InfoExtractor):\n _VALID_URL = r'https?://(?:m\\.)?tvpot\\.daum\\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)'\n IE_NAME = 'daum.net'\n\n _TESTS = [{\n 'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',\n 'info_dict': {\n 'id': '52554690',\n 'ext': 'mp4',\n 'title': 'DOTA 2GETHER \uc2dc\uc98c2 6\ud68c - 2\ubd80',\n 'description': 'DOTA 2GETHER \uc2dc\uc98c2 6\ud68c - 2\ubd80',\n 'upload_date': '20130831',\n 'duration': 3868,\n },\n }, {\n 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',\n 'only_matching': True,\n }, {\n 'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n canonical_url = 'http://tvpot.daum.net/v/%s' % video_id\n webpage = self._download_webpage(canonical_url, video_id)\n full_id = self._search_regex(\n r'<iframe src=\"http://videofarm.daum.net/controller/video/viewer/Video.html\\?.*?vid=(.+?)[&\"]',\n webpage, 'full id')\n query = compat_urllib_parse.urlencode({'vid': full_id})\n info = self._download_xml(\n 'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id,\n 'Downloading video info')\n urls = self._download_xml(\n 'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,\n video_id, 'Downloading video formats info')\n\n formats = []\n for format_el in urls.findall('result/output_list/output_list'):\n profile = format_el.attrib['profile']\n format_query = compat_urllib_parse.urlencode({\n 'vid': full_id,\n 'profile': profile,\n })\n url_doc = self._download_xml(\n 'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,\n video_id, note='Downloading video data for %s format' % profile)\n format_url = url_doc.find('result/url').text\n formats.append({\n 'url': format_url,\n 'format_id': profile,\n })\n\n return {\n 'id': video_id,\n 'title': info.find('TITLE').text,\n 'formats': formats,\n 'thumbnail': self._og_search_thumbnail(webpage),\n 'description': info.find('CONTENTS').text,\n 'duration': int(info.find('DURATION').text),\n 'upload_date': info.find('REGDTTM').text[:8],\n }\n", "path": "youtube_dl/extractor/daum.py"}], "after_files": [{"content": "# encoding: utf-8\n\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_urllib_parse,\n)\n\n\nclass DaumIE(InfoExtractor):\n _VALID_URL = r'https?://(?:m\\.)?tvpot\\.daum\\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)'\n IE_NAME = 'daum.net'\n\n _TESTS = [{\n 'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',\n 'info_dict': {\n 'id': '52554690',\n 'ext': 'mp4',\n 'title': 'DOTA 2GETHER \uc2dc\uc98c2 6\ud68c - 2\ubd80',\n 'description': 'DOTA 2GETHER \uc2dc\uc98c2 6\ud68c - 2\ubd80',\n 'upload_date': '20130831',\n 'duration': 3868,\n },\n }, {\n 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',\n 'only_matching': True,\n }, {\n 'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n canonical_url = 'http://tvpot.daum.net/v/%s' % video_id\n webpage = self._download_webpage(canonical_url, video_id)\n full_id = self._search_regex(\n r'src=[\"\\']http://videofarm.daum.net/controller/video/viewer/Video.html\\?.*?vid=(.+?)[&\"\\']',\n webpage, 'full id')\n query = compat_urllib_parse.urlencode({'vid': full_id})\n info = self._download_xml(\n 'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id,\n 'Downloading video info')\n urls = self._download_xml(\n 'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,\n video_id, 'Downloading video formats info')\n\n formats = []\n for format_el in urls.findall('result/output_list/output_list'):\n profile = format_el.attrib['profile']\n format_query = compat_urllib_parse.urlencode({\n 'vid': full_id,\n 'profile': profile,\n })\n url_doc = self._download_xml(\n 'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,\n video_id, note='Downloading video data for %s format' % profile)\n format_url = url_doc.find('result/url').text\n formats.append({\n 'url': format_url,\n 'format_id': profile,\n })\n\n return {\n 'id': video_id,\n 'title': info.find('TITLE').text,\n 'formats': formats,\n 'thumbnail': self._og_search_thumbnail(webpage),\n 'description': info.find('CONTENTS').text,\n 'duration': int(info.find('DURATION').text),\n 'upload_date': info.find('REGDTTM').text[:8],\n }\n", "path": "youtube_dl/extractor/daum.py"}]} | 1,217 | 209 |
gh_patches_debug_27366 | rasdani/github-patches | git_diff | meltano__meltano-6964 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feature: `--cwd` CLI option
### Feature scope
CLI (options, error messages, logging, etc.)
### Description
As seen in some other CLIs, it'd be nice to have a `--cwd` option which takes a path to a directory as an argument, and runs the program as-if it were invoked from that directory. This could be implemented by calling [`os.chdir`](https://docs.python.org/3/library/os.html#os.chdir) right after the CLI options are parsed if the `--cwd` option is provided.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/cli/cli.py`
Content:
```
1 """Definition of the top-level Click group for the Meltano CLI."""
2
3 from __future__ import annotations
4
5 import logging
6 import sys
7 from typing import NoReturn
8
9 import click
10
11 import meltano
12 from meltano.cli.utils import InstrumentedGroup
13 from meltano.core.behavior.versioned import IncompatibleVersionError
14 from meltano.core.error import MeltanoConfigurationError
15 from meltano.core.logging import LEVELS, setup_logging
16 from meltano.core.project import Project, ProjectNotFound
17 from meltano.core.project_settings_service import ProjectSettingsService
18 from meltano.core.tracking import CliContext, Tracker
19 from meltano.core.utils import get_no_color_flag
20
21 logger = logging.getLogger(__name__)
22
23
24 class NoWindowsGlobbingGroup(InstrumentedGroup):
25 """A instrumented Click group that does not perform glob expansion on Windows.
26
27 This restores the behaviour of Click's globbing to how it was before v8.
28 Click (as of version 8.1.3) ignores quotes around an asterisk, which makes
29 it behave differently than most shells that support globbing, and make some
30 typical Meltano commands fail, e.g. `meltano select tap-gitlab tags "*"`.
31 """
32
33 def main(self, *args, **kwargs) -> NoReturn:
34 """Invoke the Click CLI with Windows globbing disabled.
35
36 Args:
37 args: Positional arguments for the Click group.
38 kwargs: Keyword arguments for the Click group.
39 """
40 return super().main(*args, windows_expand_args=False, **kwargs)
41
42
43 @click.group(
44 cls=NoWindowsGlobbingGroup, invoke_without_command=True, no_args_is_help=True
45 )
46 @click.option("--log-level", type=click.Choice(LEVELS.keys()))
47 @click.option(
48 "--log-config", type=str, help="Path to a python logging yaml config file."
49 )
50 @click.option("-v", "--verbose", count=True, help="Not used.")
51 @click.option(
52 "--environment",
53 envvar="MELTANO_ENVIRONMENT",
54 help="Meltano environment name.",
55 )
56 @click.option(
57 "--no-environment", is_flag=True, default=False, help="Don't use any environment."
58 )
59 @click.version_option(version=meltano.__version__, prog_name="meltano")
60 @click.pass_context
61 def cli( # noqa: WPS231
62 ctx: click.Context,
63 log_level: str,
64 log_config: str,
65 verbose: int,
66 environment: str,
67 no_environment: bool,
68 ): # noqa: WPS231
69 """
70 ELT for the DataOps era.
71
72 \b\nRead more at https://docs.meltano.com/reference/command-line-interface
73 """
74 ctx.ensure_object(dict)
75
76 if log_level:
77 ProjectSettingsService.config_override["cli.log_level"] = log_level
78
79 if log_config:
80 ProjectSettingsService.config_override["cli.log_config"] = log_config
81
82 ctx.obj["verbosity"] = verbose
83
84 no_color = get_no_color_flag()
85 if no_color:
86 ctx.color = False
87
88 try: # noqa: WPS229
89 project = Project.find()
90 setup_logging(project)
91 project_setting_service = ProjectSettingsService(project)
92
93 readonly = project_setting_service.get("project_readonly")
94 if readonly:
95 project.readonly = True
96 if project.readonly:
97 logger.debug("Project is read-only.")
98
99 # detect active environment
100 selected_environment = None
101 is_default_environment = False
102 if no_environment or (environment and environment.lower() == "null"):
103 logger.info("No environment is active")
104 elif environment:
105 selected_environment = environment
106 elif project_setting_service.get("default_environment"):
107 selected_environment = project_setting_service.get("default_environment")
108 is_default_environment = True
109 ctx.obj["selected_environment"] = selected_environment
110 ctx.obj["is_default_environment"] = is_default_environment
111 ctx.obj["project"] = project
112 ctx.obj["tracker"] = Tracker(project)
113 ctx.obj["tracker"].add_contexts(
114 CliContext.from_click_context(ctx)
115 ) # backfill the `cli` CliContext
116 except ProjectNotFound:
117 ctx.obj["project"] = None
118 except IncompatibleVersionError:
119 click.secho(
120 "This Meltano project is incompatible with this version of `meltano`.",
121 fg="yellow",
122 )
123 click.echo(
124 "For more details, visit https://docs.meltano.com/guide/installation#upgrading-meltano-version"
125 )
126 sys.exit(3)
127
128
129 def activate_environment(
130 ctx: click.Context, project: Project, required: bool = False
131 ) -> None:
132 """Activate the selected environment.
133
134 The selected environment is whatever was selected with the `--environment`
135 option, or the default environment (set in `meltano.yml`) otherwise.
136
137 Args:
138 ctx: The Click context, used to determine the selected environment.
139 project: The project for which the environment will be activated.
140 """
141 if ctx.obj["selected_environment"]:
142 project.activate_environment(ctx.obj["selected_environment"])
143 elif required:
144 raise MeltanoConfigurationError(
145 reason="A Meltano environment must be specified",
146 instruction="Set the `default_environment` option in "
147 "`meltano.yml`, or the `--environment` CLI option",
148 )
149
150
151 def activate_explicitly_provided_environment(
152 ctx: click.Context, project: Project
153 ) -> None:
154 """Activate the selected environment if it has been explicitly set.
155
156 Some commands (e.g. `config`, `job`, etc.) do not respect the configured
157 `default_environment`, and will only run with an environment active if it
158 has been explicitly set (e.g. with the `--environment` CLI option).
159
160 Args:
161 ctx: The Click context, used to determine the selected environment.
162 project: The project for which the environment will be activated.
163 """
164 if ctx.obj["is_default_environment"]:
165 logger.info(
166 f"The default environment {ctx.obj['selected_environment']!r} will "
167 f"be ignored for `meltano {ctx.command.name}`. To configure a specific "
168 "environment, please use the option `--environment=<environment name>`."
169 )
170 project.deactivate_environment()
171 else:
172 activate_environment(ctx, project)
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/meltano/cli/cli.py b/src/meltano/cli/cli.py
--- a/src/meltano/cli/cli.py
+++ b/src/meltano/cli/cli.py
@@ -3,7 +3,9 @@
from __future__ import annotations
import logging
+import os
import sys
+from pathlib import Path
from typing import NoReturn
import click
@@ -56,6 +58,11 @@
@click.option(
"--no-environment", is_flag=True, default=False, help="Don't use any environment."
)
[email protected](
+ "--cwd",
+ type=click.Path(exists=True, file_okay=False, resolve_path=True, path_type=Path),
+ help="Run Meltano as if it had been started in the specified directory.",
+)
@click.version_option(version=meltano.__version__, prog_name="meltano")
@click.pass_context
def cli( # noqa: WPS231
@@ -65,6 +72,7 @@
verbose: int,
environment: str,
no_environment: bool,
+ cwd: Path | None,
): # noqa: WPS231
"""
ELT for the DataOps era.
@@ -85,6 +93,12 @@
if no_color:
ctx.color = False
+ if cwd:
+ try:
+ os.chdir(cwd)
+ except OSError as ex:
+ raise Exception(f"Unable to run Meltano from {cwd!r}") from ex
+
try: # noqa: WPS229
project = Project.find()
setup_logging(project)
| {"golden_diff": "diff --git a/src/meltano/cli/cli.py b/src/meltano/cli/cli.py\n--- a/src/meltano/cli/cli.py\n+++ b/src/meltano/cli/cli.py\n@@ -3,7 +3,9 @@\n from __future__ import annotations\n \n import logging\n+import os\n import sys\n+from pathlib import Path\n from typing import NoReturn\n \n import click\n@@ -56,6 +58,11 @@\n @click.option(\n \"--no-environment\", is_flag=True, default=False, help=\"Don't use any environment.\"\n )\[email protected](\n+ \"--cwd\",\n+ type=click.Path(exists=True, file_okay=False, resolve_path=True, path_type=Path),\n+ help=\"Run Meltano as if it had been started in the specified directory.\",\n+)\n @click.version_option(version=meltano.__version__, prog_name=\"meltano\")\n @click.pass_context\n def cli( # noqa: WPS231\n@@ -65,6 +72,7 @@\n verbose: int,\n environment: str,\n no_environment: bool,\n+ cwd: Path | None,\n ): # noqa: WPS231\n \"\"\"\n ELT for the DataOps era.\n@@ -85,6 +93,12 @@\n if no_color:\n ctx.color = False\n \n+ if cwd:\n+ try:\n+ os.chdir(cwd)\n+ except OSError as ex:\n+ raise Exception(f\"Unable to run Meltano from {cwd!r}\") from ex\n+\n try: # noqa: WPS229\n project = Project.find()\n setup_logging(project)\n", "issue": "feature: `--cwd` CLI option\n### Feature scope\n\nCLI (options, error messages, logging, etc.)\n\n### Description\n\nAs seen in some other CLIs, it'd be nice to have a `--cwd` option which takes a path to a directory as an argument, and runs the program as-if it were invoked from that directory. This could be implemented by calling [`os.chdir`](https://docs.python.org/3/library/os.html#os.chdir) right after the CLI options are parsed if the `--cwd` option is provided.\n", "before_files": [{"content": "\"\"\"Definition of the top-level Click group for the Meltano CLI.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport sys\nfrom typing import NoReturn\n\nimport click\n\nimport meltano\nfrom meltano.cli.utils import InstrumentedGroup\nfrom meltano.core.behavior.versioned import IncompatibleVersionError\nfrom meltano.core.error import MeltanoConfigurationError\nfrom meltano.core.logging import LEVELS, setup_logging\nfrom meltano.core.project import Project, ProjectNotFound\nfrom meltano.core.project_settings_service import ProjectSettingsService\nfrom meltano.core.tracking import CliContext, Tracker\nfrom meltano.core.utils import get_no_color_flag\n\nlogger = logging.getLogger(__name__)\n\n\nclass NoWindowsGlobbingGroup(InstrumentedGroup):\n \"\"\"A instrumented Click group that does not perform glob expansion on Windows.\n\n This restores the behaviour of Click's globbing to how it was before v8.\n Click (as of version 8.1.3) ignores quotes around an asterisk, which makes\n it behave differently than most shells that support globbing, and make some\n typical Meltano commands fail, e.g. `meltano select tap-gitlab tags \"*\"`.\n \"\"\"\n\n def main(self, *args, **kwargs) -> NoReturn:\n \"\"\"Invoke the Click CLI with Windows globbing disabled.\n\n Args:\n args: Positional arguments for the Click group.\n kwargs: Keyword arguments for the Click group.\n \"\"\"\n return super().main(*args, windows_expand_args=False, **kwargs)\n\n\[email protected](\n cls=NoWindowsGlobbingGroup, invoke_without_command=True, no_args_is_help=True\n)\[email protected](\"--log-level\", type=click.Choice(LEVELS.keys()))\[email protected](\n \"--log-config\", type=str, help=\"Path to a python logging yaml config file.\"\n)\[email protected](\"-v\", \"--verbose\", count=True, help=\"Not used.\")\[email protected](\n \"--environment\",\n envvar=\"MELTANO_ENVIRONMENT\",\n help=\"Meltano environment name.\",\n)\[email protected](\n \"--no-environment\", is_flag=True, default=False, help=\"Don't use any environment.\"\n)\[email protected]_option(version=meltano.__version__, prog_name=\"meltano\")\[email protected]_context\ndef cli( # noqa: WPS231\n ctx: click.Context,\n log_level: str,\n log_config: str,\n verbose: int,\n environment: str,\n no_environment: bool,\n): # noqa: WPS231\n \"\"\"\n ELT for the DataOps era.\n\n \\b\\nRead more at https://docs.meltano.com/reference/command-line-interface\n \"\"\"\n ctx.ensure_object(dict)\n\n if log_level:\n ProjectSettingsService.config_override[\"cli.log_level\"] = log_level\n\n if log_config:\n ProjectSettingsService.config_override[\"cli.log_config\"] = log_config\n\n ctx.obj[\"verbosity\"] = verbose\n\n no_color = get_no_color_flag()\n if no_color:\n ctx.color = False\n\n try: # noqa: WPS229\n project = Project.find()\n setup_logging(project)\n project_setting_service = ProjectSettingsService(project)\n\n readonly = project_setting_service.get(\"project_readonly\")\n if readonly:\n project.readonly = True\n if project.readonly:\n logger.debug(\"Project is read-only.\")\n\n # detect active environment\n selected_environment = None\n is_default_environment = False\n if no_environment or (environment and environment.lower() == \"null\"):\n logger.info(\"No environment is active\")\n elif environment:\n selected_environment = environment\n elif project_setting_service.get(\"default_environment\"):\n selected_environment = project_setting_service.get(\"default_environment\")\n is_default_environment = True\n ctx.obj[\"selected_environment\"] = selected_environment\n ctx.obj[\"is_default_environment\"] = is_default_environment\n ctx.obj[\"project\"] = project\n ctx.obj[\"tracker\"] = Tracker(project)\n ctx.obj[\"tracker\"].add_contexts(\n CliContext.from_click_context(ctx)\n ) # backfill the `cli` CliContext\n except ProjectNotFound:\n ctx.obj[\"project\"] = None\n except IncompatibleVersionError:\n click.secho(\n \"This Meltano project is incompatible with this version of `meltano`.\",\n fg=\"yellow\",\n )\n click.echo(\n \"For more details, visit https://docs.meltano.com/guide/installation#upgrading-meltano-version\"\n )\n sys.exit(3)\n\n\ndef activate_environment(\n ctx: click.Context, project: Project, required: bool = False\n) -> None:\n \"\"\"Activate the selected environment.\n\n The selected environment is whatever was selected with the `--environment`\n option, or the default environment (set in `meltano.yml`) otherwise.\n\n Args:\n ctx: The Click context, used to determine the selected environment.\n project: The project for which the environment will be activated.\n \"\"\"\n if ctx.obj[\"selected_environment\"]:\n project.activate_environment(ctx.obj[\"selected_environment\"])\n elif required:\n raise MeltanoConfigurationError(\n reason=\"A Meltano environment must be specified\",\n instruction=\"Set the `default_environment` option in \"\n \"`meltano.yml`, or the `--environment` CLI option\",\n )\n\n\ndef activate_explicitly_provided_environment(\n ctx: click.Context, project: Project\n) -> None:\n \"\"\"Activate the selected environment if it has been explicitly set.\n\n Some commands (e.g. `config`, `job`, etc.) do not respect the configured\n `default_environment`, and will only run with an environment active if it\n has been explicitly set (e.g. with the `--environment` CLI option).\n\n Args:\n ctx: The Click context, used to determine the selected environment.\n project: The project for which the environment will be activated.\n \"\"\"\n if ctx.obj[\"is_default_environment\"]:\n logger.info(\n f\"The default environment {ctx.obj['selected_environment']!r} will \"\n f\"be ignored for `meltano {ctx.command.name}`. To configure a specific \"\n \"environment, please use the option `--environment=<environment name>`.\"\n )\n project.deactivate_environment()\n else:\n activate_environment(ctx, project)\n", "path": "src/meltano/cli/cli.py"}], "after_files": [{"content": "\"\"\"Definition of the top-level Click group for the Meltano CLI.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import NoReturn\n\nimport click\n\nimport meltano\nfrom meltano.cli.utils import InstrumentedGroup\nfrom meltano.core.behavior.versioned import IncompatibleVersionError\nfrom meltano.core.error import MeltanoConfigurationError\nfrom meltano.core.logging import LEVELS, setup_logging\nfrom meltano.core.project import Project, ProjectNotFound\nfrom meltano.core.project_settings_service import ProjectSettingsService\nfrom meltano.core.tracking import CliContext, Tracker\nfrom meltano.core.utils import get_no_color_flag\n\nlogger = logging.getLogger(__name__)\n\n\nclass NoWindowsGlobbingGroup(InstrumentedGroup):\n \"\"\"A instrumented Click group that does not perform glob expansion on Windows.\n\n This restores the behaviour of Click's globbing to how it was before v8.\n Click (as of version 8.1.3) ignores quotes around an asterisk, which makes\n it behave differently than most shells that support globbing, and make some\n typical Meltano commands fail, e.g. `meltano select tap-gitlab tags \"*\"`.\n \"\"\"\n\n def main(self, *args, **kwargs) -> NoReturn:\n \"\"\"Invoke the Click CLI with Windows globbing disabled.\n\n Args:\n args: Positional arguments for the Click group.\n kwargs: Keyword arguments for the Click group.\n \"\"\"\n return super().main(*args, windows_expand_args=False, **kwargs)\n\n\[email protected](\n cls=NoWindowsGlobbingGroup, invoke_without_command=True, no_args_is_help=True\n)\[email protected](\"--log-level\", type=click.Choice(LEVELS.keys()))\[email protected](\n \"--log-config\", type=str, help=\"Path to a python logging yaml config file.\"\n)\[email protected](\"-v\", \"--verbose\", count=True, help=\"Not used.\")\[email protected](\n \"--environment\",\n envvar=\"MELTANO_ENVIRONMENT\",\n help=\"Meltano environment name.\",\n)\[email protected](\n \"--no-environment\", is_flag=True, default=False, help=\"Don't use any environment.\"\n)\[email protected](\n \"--cwd\",\n type=click.Path(exists=True, file_okay=False, resolve_path=True, path_type=Path),\n help=\"Run Meltano as if it had been started in the specified directory.\",\n)\[email protected]_option(version=meltano.__version__, prog_name=\"meltano\")\[email protected]_context\ndef cli( # noqa: WPS231\n ctx: click.Context,\n log_level: str,\n log_config: str,\n verbose: int,\n environment: str,\n no_environment: bool,\n cwd: Path | None,\n): # noqa: WPS231\n \"\"\"\n ELT for the DataOps era.\n\n \\b\\nRead more at https://docs.meltano.com/reference/command-line-interface\n \"\"\"\n ctx.ensure_object(dict)\n\n if log_level:\n ProjectSettingsService.config_override[\"cli.log_level\"] = log_level\n\n if log_config:\n ProjectSettingsService.config_override[\"cli.log_config\"] = log_config\n\n ctx.obj[\"verbosity\"] = verbose\n\n no_color = get_no_color_flag()\n if no_color:\n ctx.color = False\n\n if cwd:\n try:\n os.chdir(cwd)\n except OSError as ex:\n raise Exception(f\"Unable to run Meltano from {cwd!r}\") from ex\n\n try: # noqa: WPS229\n project = Project.find()\n setup_logging(project)\n project_setting_service = ProjectSettingsService(project)\n\n readonly = project_setting_service.get(\"project_readonly\")\n if readonly:\n project.readonly = True\n if project.readonly:\n logger.debug(\"Project is read-only.\")\n\n # detect active environment\n selected_environment = None\n is_default_environment = False\n if no_environment or (environment and environment.lower() == \"null\"):\n logger.info(\"No environment is active\")\n elif environment:\n selected_environment = environment\n elif project_setting_service.get(\"default_environment\"):\n selected_environment = project_setting_service.get(\"default_environment\")\n is_default_environment = True\n ctx.obj[\"selected_environment\"] = selected_environment\n ctx.obj[\"is_default_environment\"] = is_default_environment\n ctx.obj[\"project\"] = project\n ctx.obj[\"tracker\"] = Tracker(project)\n ctx.obj[\"tracker\"].add_contexts(\n CliContext.from_click_context(ctx)\n ) # backfill the `cli` CliContext\n except ProjectNotFound:\n ctx.obj[\"project\"] = None\n except IncompatibleVersionError:\n click.secho(\n \"This Meltano project is incompatible with this version of `meltano`.\",\n fg=\"yellow\",\n )\n click.echo(\n \"For more details, visit https://docs.meltano.com/guide/installation#upgrading-meltano-version\"\n )\n sys.exit(3)\n\n\ndef activate_environment(\n ctx: click.Context, project: Project, required: bool = False\n) -> None:\n \"\"\"Activate the selected environment.\n\n The selected environment is whatever was selected with the `--environment`\n option, or the default environment (set in `meltano.yml`) otherwise.\n\n Args:\n ctx: The Click context, used to determine the selected environment.\n project: The project for which the environment will be activated.\n \"\"\"\n if ctx.obj[\"selected_environment\"]:\n project.activate_environment(ctx.obj[\"selected_environment\"])\n elif required:\n raise MeltanoConfigurationError(\n reason=\"A Meltano environment must be specified\",\n instruction=\"Set the `default_environment` option in \"\n \"`meltano.yml`, or the `--environment` CLI option\",\n )\n\n\ndef activate_explicitly_provided_environment(\n ctx: click.Context, project: Project\n) -> None:\n \"\"\"Activate the selected environment if it has been explicitly set.\n\n Some commands (e.g. `config`, `job`, etc.) do not respect the configured\n `default_environment`, and will only run with an environment active if it\n has been explicitly set (e.g. with the `--environment` CLI option).\n\n Args:\n ctx: The Click context, used to determine the selected environment.\n project: The project for which the environment will be activated.\n \"\"\"\n if ctx.obj[\"is_default_environment\"]:\n logger.info(\n f\"The default environment {ctx.obj['selected_environment']!r} will \"\n f\"be ignored for `meltano {ctx.command.name}`. To configure a specific \"\n \"environment, please use the option `--environment=<environment name>`.\"\n )\n project.deactivate_environment()\n else:\n activate_environment(ctx, project)\n", "path": "src/meltano/cli/cli.py"}]} | 2,152 | 363 |
gh_patches_debug_28355 | rasdani/github-patches | git_diff | pymeasure__pymeasure-982 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Find USB device from hardware information
The COM port of virtual Com ports via USB may change from time to time.
I propose a simple helper method, which retrieves the COM port of a USB virtual port according to hardware information (vendor id, product id, serial number).
This method allows to specify the hardware information and then to connect always to the correct device, independent of the COM port number assigned at that time.
Rough idea:
~~~python
from serial.tools import list_ports
def findPort(vid: int, pid: int, sn: str) -> str:
"""Find the port name ('COM5') of the first device with the given USB information.
:param int vid: Vendor ID.
:param int pid: Product ID.
:param str sn: Serial number.
:return: Port as a string as `device` attribute returned from `list_ports.comports`.
"""
for port in sorted(list_ports.comports()):
if port.vid == vid and port.pid == pid and port.serial_number == str(sn):
return port.device
raise AttributeError("No device found for the given data.")
~~~
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pymeasure/instruments/__init__.py`
Content:
```
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2023 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25 from ..errors import RangeError, RangeException
26 from .channel import Channel
27 from .instrument import Instrument
28 from .resources import list_resources
29 from .validators import discreteTruncate
30
31 from . import activetechnologies
32 from . import advantest
33 from . import agilent
34 from . import aja
35 from . import ametek
36 from . import ami
37 from . import anaheimautomation
38 from . import anapico
39 from . import andeenhagerling
40 from . import anritsu
41 from . import attocube
42 from . import bkprecision
43 from . import danfysik
44 from . import deltaelektronika
45 from . import edwards
46 from . import eurotest
47 from . import fluke
48 from . import fwbell
49 from . import hcp
50 from . import heidenhain
51 from . import hp
52 from . import ipgphotonics
53 from . import keithley
54 from . import keysight
55 from . import lakeshore
56 from . import lecroy
57 from . import mksinst
58 from . import newport
59 from . import ni
60 from . import novanta
61 from . import oxfordinstruments
62 from . import parker
63 from . import pendulum
64 from . import razorbill
65 from . import rohdeschwarz
66 from . import siglenttechnologies
67 from . import signalrecovery
68 from . import srs
69 from . import tcpowerconversion
70 from . import tektronix
71 from . import teledyne
72 from . import temptronic
73 from . import texio
74 from . import thermotron
75 from . import thorlabs
76 from . import thyracont
77 from . import toptica
78 from . import velleman
79 from . import yokogawa
80
```
Path: `pymeasure/instruments/resources.py`
Content:
```
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2023 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25 import pyvisa
26
27
28 def list_resources():
29 """
30 Prints the available resources, and returns a list of VISA resource names
31
32 .. code-block:: python
33
34 resources = list_resources()
35 #prints (e.g.)
36 #0 : GPIB0::22::INSTR : Agilent Technologies,34410A,******
37 #1 : GPIB0::26::INSTR : Keithley Instruments Inc., Model 2612, *****
38 dmm = Agilent34410(resources[0])
39
40 """
41 rm = pyvisa.ResourceManager()
42 instrs = rm.list_resources()
43 for n, instr in enumerate(instrs):
44 # trying to catch errors in comunication
45 try:
46 res = rm.open_resource(instr)
47 # try to avoid errors from *idn?
48 try:
49 # noinspection PyUnresolvedReferences
50 idn = res.query('*idn?')[:-1]
51 except pyvisa.Error:
52 idn = "Not known"
53 finally:
54 res.close()
55 print(n, ":", instr, ":", idn)
56 except pyvisa.VisaIOError as e:
57 print(n, ":", instr, ":", "Visa IO Error: check connections")
58 print(e)
59 rm.close()
60 return instrs
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pymeasure/instruments/__init__.py b/pymeasure/instruments/__init__.py
--- a/pymeasure/instruments/__init__.py
+++ b/pymeasure/instruments/__init__.py
@@ -25,7 +25,7 @@
from ..errors import RangeError, RangeException
from .channel import Channel
from .instrument import Instrument
-from .resources import list_resources
+from .resources import find_serial_port, list_resources
from .validators import discreteTruncate
from . import activetechnologies
diff --git a/pymeasure/instruments/resources.py b/pymeasure/instruments/resources.py
--- a/pymeasure/instruments/resources.py
+++ b/pymeasure/instruments/resources.py
@@ -23,6 +23,7 @@
#
import pyvisa
+from serial.tools import list_ports
def list_resources():
@@ -58,3 +59,28 @@
print(e)
rm.close()
return instrs
+
+
+def find_serial_port(vendor_id=None, product_id=None, serial_number=None):
+ """Find the VISA port name of the first serial device with the given USB information.
+
+ Use `None` as a value if you do not want to check for that parameter.
+
+ .. code-block:: python
+
+ resource_name = find_serial_port(vendor_id=1256, serial_number="SN12345")
+ dmm = Agilent34410(resource_name)
+
+ :param int vid: Vendor ID.
+ :param int pid: Product ID.
+ :param str sn: Serial number.
+ :return str: Port as a VISA string for a serial device (e.g. "ASRL5" or "ASRL/dev/ttyACM5").
+ """
+ for port in sorted(list_ports.comports()):
+ if ((vendor_id is None or port.vid == vendor_id)
+ and (product_id is None or port.pid == product_id)
+ and (serial_number is None or port.serial_number == str(serial_number))):
+ # remove "COM" from windows serial port names.
+ port_name = port.device.replace("COM", "")
+ return "ASRL" + port_name
+ raise AttributeError("No device found for the given data.")
| {"golden_diff": "diff --git a/pymeasure/instruments/__init__.py b/pymeasure/instruments/__init__.py\n--- a/pymeasure/instruments/__init__.py\n+++ b/pymeasure/instruments/__init__.py\n@@ -25,7 +25,7 @@\n from ..errors import RangeError, RangeException\n from .channel import Channel\n from .instrument import Instrument\n-from .resources import list_resources\n+from .resources import find_serial_port, list_resources\n from .validators import discreteTruncate\n \n from . import activetechnologies\ndiff --git a/pymeasure/instruments/resources.py b/pymeasure/instruments/resources.py\n--- a/pymeasure/instruments/resources.py\n+++ b/pymeasure/instruments/resources.py\n@@ -23,6 +23,7 @@\n #\n \n import pyvisa\n+from serial.tools import list_ports\n \n \n def list_resources():\n@@ -58,3 +59,28 @@\n print(e)\n rm.close()\n return instrs\n+\n+\n+def find_serial_port(vendor_id=None, product_id=None, serial_number=None):\n+ \"\"\"Find the VISA port name of the first serial device with the given USB information.\n+\n+ Use `None` as a value if you do not want to check for that parameter.\n+\n+ .. code-block:: python\n+\n+ resource_name = find_serial_port(vendor_id=1256, serial_number=\"SN12345\")\n+ dmm = Agilent34410(resource_name)\n+\n+ :param int vid: Vendor ID.\n+ :param int pid: Product ID.\n+ :param str sn: Serial number.\n+ :return str: Port as a VISA string for a serial device (e.g. \"ASRL5\" or \"ASRL/dev/ttyACM5\").\n+ \"\"\"\n+ for port in sorted(list_ports.comports()):\n+ if ((vendor_id is None or port.vid == vendor_id)\n+ and (product_id is None or port.pid == product_id)\n+ and (serial_number is None or port.serial_number == str(serial_number))):\n+ # remove \"COM\" from windows serial port names.\n+ port_name = port.device.replace(\"COM\", \"\")\n+ return \"ASRL\" + port_name\n+ raise AttributeError(\"No device found for the given data.\")\n", "issue": "Find USB device from hardware information\nThe COM port of virtual Com ports via USB may change from time to time.\r\nI propose a simple helper method, which retrieves the COM port of a USB virtual port according to hardware information (vendor id, product id, serial number).\r\nThis method allows to specify the hardware information and then to connect always to the correct device, independent of the COM port number assigned at that time.\r\n\r\nRough idea:\r\n\r\n~~~python\r\nfrom serial.tools import list_ports\r\n\r\ndef findPort(vid: int, pid: int, sn: str) -> str:\r\n \"\"\"Find the port name ('COM5') of the first device with the given USB information.\r\n\r\n :param int vid: Vendor ID.\r\n :param int pid: Product ID.\r\n :param str sn: Serial number.\r\n :return: Port as a string as `device` attribute returned from `list_ports.comports`.\r\n \"\"\"\r\n for port in sorted(list_ports.comports()):\r\n if port.vid == vid and port.pid == pid and port.serial_number == str(sn):\r\n return port.device\r\n raise AttributeError(\"No device found for the given data.\")\r\n~~~\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2023 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nfrom ..errors import RangeError, RangeException\nfrom .channel import Channel\nfrom .instrument import Instrument\nfrom .resources import list_resources\nfrom .validators import discreteTruncate\n\nfrom . import activetechnologies\nfrom . import advantest\nfrom . import agilent\nfrom . import aja\nfrom . import ametek\nfrom . import ami\nfrom . import anaheimautomation\nfrom . import anapico\nfrom . import andeenhagerling\nfrom . import anritsu\nfrom . import attocube\nfrom . import bkprecision\nfrom . import danfysik\nfrom . import deltaelektronika\nfrom . import edwards\nfrom . import eurotest\nfrom . import fluke\nfrom . import fwbell\nfrom . import hcp\nfrom . import heidenhain\nfrom . import hp\nfrom . import ipgphotonics\nfrom . import keithley\nfrom . import keysight\nfrom . import lakeshore\nfrom . import lecroy\nfrom . import mksinst\nfrom . import newport\nfrom . import ni\nfrom . import novanta\nfrom . import oxfordinstruments\nfrom . import parker\nfrom . import pendulum\nfrom . import razorbill\nfrom . import rohdeschwarz\nfrom . import siglenttechnologies\nfrom . import signalrecovery\nfrom . import srs\nfrom . import tcpowerconversion\nfrom . import tektronix\nfrom . import teledyne\nfrom . import temptronic\nfrom . import texio\nfrom . import thermotron\nfrom . import thorlabs\nfrom . import thyracont\nfrom . import toptica\nfrom . import velleman\nfrom . import yokogawa\n", "path": "pymeasure/instruments/__init__.py"}, {"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2023 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport pyvisa\n\n\ndef list_resources():\n \"\"\"\n Prints the available resources, and returns a list of VISA resource names\n\n .. code-block:: python\n\n resources = list_resources()\n #prints (e.g.)\n #0 : GPIB0::22::INSTR : Agilent Technologies,34410A,******\n #1 : GPIB0::26::INSTR : Keithley Instruments Inc., Model 2612, *****\n dmm = Agilent34410(resources[0])\n\n \"\"\"\n rm = pyvisa.ResourceManager()\n instrs = rm.list_resources()\n for n, instr in enumerate(instrs):\n # trying to catch errors in comunication\n try:\n res = rm.open_resource(instr)\n # try to avoid errors from *idn?\n try:\n # noinspection PyUnresolvedReferences\n idn = res.query('*idn?')[:-1]\n except pyvisa.Error:\n idn = \"Not known\"\n finally:\n res.close()\n print(n, \":\", instr, \":\", idn)\n except pyvisa.VisaIOError as e:\n print(n, \":\", instr, \":\", \"Visa IO Error: check connections\")\n print(e)\n rm.close()\n return instrs\n", "path": "pymeasure/instruments/resources.py"}], "after_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2023 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nfrom ..errors import RangeError, RangeException\nfrom .channel import Channel\nfrom .instrument import Instrument\nfrom .resources import find_serial_port, list_resources\nfrom .validators import discreteTruncate\n\nfrom . import activetechnologies\nfrom . import advantest\nfrom . import agilent\nfrom . import aja\nfrom . import ametek\nfrom . import ami\nfrom . import anaheimautomation\nfrom . import anapico\nfrom . import andeenhagerling\nfrom . import anritsu\nfrom . import attocube\nfrom . import bkprecision\nfrom . import danfysik\nfrom . import deltaelektronika\nfrom . import edwards\nfrom . import eurotest\nfrom . import fluke\nfrom . import fwbell\nfrom . import hcp\nfrom . import heidenhain\nfrom . import hp\nfrom . import ipgphotonics\nfrom . import keithley\nfrom . import keysight\nfrom . import lakeshore\nfrom . import lecroy\nfrom . import mksinst\nfrom . import newport\nfrom . import ni\nfrom . import novanta\nfrom . import oxfordinstruments\nfrom . import parker\nfrom . import pendulum\nfrom . import razorbill\nfrom . import rohdeschwarz\nfrom . import siglenttechnologies\nfrom . import signalrecovery\nfrom . import srs\nfrom . import tcpowerconversion\nfrom . import tektronix\nfrom . import teledyne\nfrom . import temptronic\nfrom . import texio\nfrom . import thermotron\nfrom . import thorlabs\nfrom . import thyracont\nfrom . import toptica\nfrom . import velleman\nfrom . import yokogawa\n", "path": "pymeasure/instruments/__init__.py"}, {"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2023 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport pyvisa\nfrom serial.tools import list_ports\n\n\ndef list_resources():\n \"\"\"\n Prints the available resources, and returns a list of VISA resource names\n\n .. code-block:: python\n\n resources = list_resources()\n #prints (e.g.)\n #0 : GPIB0::22::INSTR : Agilent Technologies,34410A,******\n #1 : GPIB0::26::INSTR : Keithley Instruments Inc., Model 2612, *****\n dmm = Agilent34410(resources[0])\n\n \"\"\"\n rm = pyvisa.ResourceManager()\n instrs = rm.list_resources()\n for n, instr in enumerate(instrs):\n # trying to catch errors in comunication\n try:\n res = rm.open_resource(instr)\n # try to avoid errors from *idn?\n try:\n # noinspection PyUnresolvedReferences\n idn = res.query('*idn?')[:-1]\n except pyvisa.Error:\n idn = \"Not known\"\n finally:\n res.close()\n print(n, \":\", instr, \":\", idn)\n except pyvisa.VisaIOError as e:\n print(n, \":\", instr, \":\", \"Visa IO Error: check connections\")\n print(e)\n rm.close()\n return instrs\n\n\ndef find_serial_port(vendor_id=None, product_id=None, serial_number=None):\n \"\"\"Find the VISA port name of the first serial device with the given USB information.\n\n Use `None` as a value if you do not want to check for that parameter.\n\n .. code-block:: python\n\n resource_name = find_serial_port(vendor_id=1256, serial_number=\"SN12345\")\n dmm = Agilent34410(resource_name)\n\n :param int vid: Vendor ID.\n :param int pid: Product ID.\n :param str sn: Serial number.\n :return str: Port as a VISA string for a serial device (e.g. \"ASRL5\" or \"ASRL/dev/ttyACM5\").\n \"\"\"\n for port in sorted(list_ports.comports()):\n if ((vendor_id is None or port.vid == vendor_id)\n and (product_id is None or port.pid == product_id)\n and (serial_number is None or port.serial_number == str(serial_number))):\n # remove \"COM\" from windows serial port names.\n port_name = port.device.replace(\"COM\", \"\")\n return \"ASRL\" + port_name\n raise AttributeError(\"No device found for the given data.\")\n", "path": "pymeasure/instruments/resources.py"}]} | 1,944 | 504 |
gh_patches_debug_7980 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-508 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add bwctest.sh in OpenSearch and one plugin
- [x] OpenSearch Core - https://github.com/opensearch-project/OpenSearch/pull/1150
- [x] Anomaly Detection Plugin - https://github.com/opensearch-project/anomaly-detection/pull/189
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bundle-workflow/src/paths/script_finder.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import os
8
9
10 class ScriptFinder:
11 class ScriptNotFoundError(Exception):
12 def __init__(self, kind, paths):
13 self.kind = kind
14 self.paths = paths
15 super().__init__(f"Could not find {kind} script. Looked in {paths}.")
16
17 component_scripts_path = os.path.realpath(
18 os.path.join(
19 os.path.dirname(os.path.abspath(__file__)), "../../scripts/components"
20 )
21 )
22
23 default_scripts_path = os.path.realpath(
24 os.path.join(
25 os.path.dirname(os.path.abspath(__file__)), "../../scripts/default"
26 )
27 )
28
29 """
30 ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.
31
32 For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,
33 it will look in the following locations, in order:
34 * Root of the Git repository
35 * /scripts/<script-name> in the Git repository
36 * <component_scripts_path>/<component_name>/<script-name>
37 * <default_scripts_path>/<script-name>
38
39 For install.sh scripts, given a component name, it will look in the following locations, in order:
40 * <component_scripts_path>/<component_name>/<script-name>
41 * <default_scripts_path>/<script-name>
42 """
43
44 @classmethod
45 def __find_script(cls, name, paths):
46 script = next(filter(lambda path: os.path.exists(path), paths), None)
47 if script is None:
48 raise ScriptFinder.ScriptNotFoundError(name, paths)
49 return script
50
51 @classmethod
52 def find_build_script(cls, component_name, git_dir):
53 paths = [
54 os.path.realpath(os.path.join(git_dir, "build.sh")),
55 os.path.realpath(os.path.join(git_dir, "scripts/build.sh")),
56 os.path.realpath(
57 os.path.join(cls.component_scripts_path, component_name, "build.sh")
58 ),
59 os.path.realpath(os.path.join(cls.default_scripts_path, "build.sh")),
60 ]
61
62 return cls.__find_script("build.sh", paths)
63
64 @classmethod
65 def find_integ_test_script(cls, component_name, git_dir):
66 paths = [
67 # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497
68 # os.path.realpath(os.path.join(git_dir, "integtest.sh")),
69 # os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
70 os.path.realpath(
71 os.path.join(cls.component_scripts_path, component_name, "integtest.sh")
72 ),
73 os.path.realpath(os.path.join(cls.default_scripts_path, "integtest.sh")),
74 ]
75
76 return cls.__find_script("integtest.sh", paths)
77
78 @classmethod
79 def find_install_script(cls, component_name):
80 paths = [
81 os.path.realpath(
82 os.path.join(cls.component_scripts_path, component_name, "install.sh")
83 ),
84 os.path.realpath(os.path.join(cls.default_scripts_path, "install.sh")),
85 ]
86
87 return cls.__find_script("install.sh", paths)
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bundle-workflow/src/paths/script_finder.py b/bundle-workflow/src/paths/script_finder.py
--- a/bundle-workflow/src/paths/script_finder.py
+++ b/bundle-workflow/src/paths/script_finder.py
@@ -85,3 +85,16 @@
]
return cls.__find_script("install.sh", paths)
+
+ @classmethod
+ def find_bwc_test_script(cls, component_name, git_dir):
+ paths = [
+ os.path.realpath(os.path.join(git_dir, "bwctest.sh")),
+ os.path.realpath(os.path.join(git_dir, "scripts/bwctest.sh")),
+ os.path.realpath(
+ os.path.join(cls.component_scripts_path, component_name, "bwctest.sh")
+ ),
+ os.path.realpath(os.path.join(cls.default_scripts_path, "bwctest.sh")),
+ ]
+
+ return cls.__find_script("bwctest.sh", paths)
| {"golden_diff": "diff --git a/bundle-workflow/src/paths/script_finder.py b/bundle-workflow/src/paths/script_finder.py\n--- a/bundle-workflow/src/paths/script_finder.py\n+++ b/bundle-workflow/src/paths/script_finder.py\n@@ -85,3 +85,16 @@\n ]\n \n return cls.__find_script(\"install.sh\", paths)\n+\n+ @classmethod\n+ def find_bwc_test_script(cls, component_name, git_dir):\n+ paths = [\n+ os.path.realpath(os.path.join(git_dir, \"bwctest.sh\")),\n+ os.path.realpath(os.path.join(git_dir, \"scripts/bwctest.sh\")),\n+ os.path.realpath(\n+ os.path.join(cls.component_scripts_path, component_name, \"bwctest.sh\")\n+ ),\n+ os.path.realpath(os.path.join(cls.default_scripts_path, \"bwctest.sh\")),\n+ ]\n+\n+ return cls.__find_script(\"bwctest.sh\", paths)\n", "issue": "Add bwctest.sh in OpenSearch and one plugin\n- [x] OpenSearch Core - https://github.com/opensearch-project/OpenSearch/pull/1150\r\n- [x] Anomaly Detection Plugin - https://github.com/opensearch-project/anomaly-detection/pull/189\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\n\nclass ScriptFinder:\n class ScriptNotFoundError(Exception):\n def __init__(self, kind, paths):\n self.kind = kind\n self.paths = paths\n super().__init__(f\"Could not find {kind} script. Looked in {paths}.\")\n\n component_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/components\"\n )\n )\n\n default_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/default\"\n )\n )\n\n \"\"\"\n ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.\n\n For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,\n it will look in the following locations, in order:\n * Root of the Git repository\n * /scripts/<script-name> in the Git repository\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n\n For install.sh scripts, given a component name, it will look in the following locations, in order:\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n \"\"\"\n\n @classmethod\n def __find_script(cls, name, paths):\n script = next(filter(lambda path: os.path.exists(path), paths), None)\n if script is None:\n raise ScriptFinder.ScriptNotFoundError(name, paths)\n return script\n\n @classmethod\n def find_build_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/build.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"build.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"build.sh\")),\n ]\n\n return cls.__find_script(\"build.sh\", paths)\n\n @classmethod\n def find_integ_test_script(cls, component_name, git_dir):\n paths = [\n # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497\n # os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n # os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"integtest.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"integtest.sh\")),\n ]\n\n return cls.__find_script(\"integtest.sh\", paths)\n\n @classmethod\n def find_install_script(cls, component_name):\n paths = [\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"install.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"install.sh\")),\n ]\n\n return cls.__find_script(\"install.sh\", paths)\n", "path": "bundle-workflow/src/paths/script_finder.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\n\nclass ScriptFinder:\n class ScriptNotFoundError(Exception):\n def __init__(self, kind, paths):\n self.kind = kind\n self.paths = paths\n super().__init__(f\"Could not find {kind} script. Looked in {paths}.\")\n\n component_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/components\"\n )\n )\n\n default_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/default\"\n )\n )\n\n \"\"\"\n ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.\n\n For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,\n it will look in the following locations, in order:\n * Root of the Git repository\n * /scripts/<script-name> in the Git repository\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n\n For install.sh scripts, given a component name, it will look in the following locations, in order:\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n \"\"\"\n\n @classmethod\n def __find_script(cls, name, paths):\n script = next(filter(lambda path: os.path.exists(path), paths), None)\n if script is None:\n raise ScriptFinder.ScriptNotFoundError(name, paths)\n return script\n\n @classmethod\n def find_build_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/build.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"build.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"build.sh\")),\n ]\n\n return cls.__find_script(\"build.sh\", paths)\n\n @classmethod\n def find_integ_test_script(cls, component_name, git_dir):\n paths = [\n # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497\n # os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n # os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"integtest.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"integtest.sh\")),\n ]\n\n return cls.__find_script(\"integtest.sh\", paths)\n\n @classmethod\n def find_install_script(cls, component_name):\n paths = [\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"install.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"install.sh\")),\n ]\n\n return cls.__find_script(\"install.sh\", paths)\n\n @classmethod\n def find_bwc_test_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"bwctest.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/bwctest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"bwctest.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"bwctest.sh\")),\n ]\n\n return cls.__find_script(\"bwctest.sh\", paths)\n", "path": "bundle-workflow/src/paths/script_finder.py"}]} | 1,200 | 206 |
gh_patches_debug_18280 | rasdani/github-patches | git_diff | mesonbuild__meson-3503 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
meson color output is broken with conemu (windows)
I currently use conemu with the quake-style dropdown menu, and while trying to run meson under windows, I get the following output

I think that on windows, it might be best to use the [colorama](https://pypi.org/project/colorama/) library as a conditional dependency on windows. This can be done inside of the `setup.py` with the `install_requires` looking like:
```python
install_requires =
colorama;sys_platform == 'win32'
```
Alternatively, simply importing colorama causes it to "do the right thing" on non-windows platforms. This also means you can get color support for pre-Windows 10 versions (Windows 10 added VT100 escape sequence support, but it requires an explicit opt in via a win32 function call)
I'm unsure if either of these options are valid, or even wanted.
meson color output is broken with conemu (windows)
I currently use conemu with the quake-style dropdown menu, and while trying to run meson under windows, I get the following output

I think that on windows, it might be best to use the [colorama](https://pypi.org/project/colorama/) library as a conditional dependency on windows. This can be done inside of the `setup.py` with the `install_requires` looking like:
```python
install_requires =
colorama;sys_platform == 'win32'
```
Alternatively, simply importing colorama causes it to "do the right thing" on non-windows platforms. This also means you can get color support for pre-Windows 10 versions (Windows 10 added VT100 escape sequence support, but it requires an explicit opt in via a win32 function call)
I'm unsure if either of these options are valid, or even wanted.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesonbuild/mlog.py`
Content:
```
1 # Copyright 2013-2014 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import sys, os, platform, io
16 from contextlib import contextmanager
17
18 """This is (mostly) a standalone module used to write logging
19 information about Meson runs. Some output goes to screen,
20 some to logging dir and some goes to both."""
21
22 if platform.system().lower() == 'windows':
23 colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('ANSICON')
24 else:
25 colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('TERM') != 'dumb'
26 log_dir = None
27 log_file = None
28 log_fname = 'meson-log.txt'
29 log_depth = 0
30
31 def initialize(logdir):
32 global log_dir, log_file
33 log_dir = logdir
34 log_file = open(os.path.join(logdir, log_fname), 'w', encoding='utf8')
35
36 def shutdown():
37 global log_file
38 if log_file is not None:
39 exception_around_goer = log_file
40 log_file = None
41 exception_around_goer.close()
42
43 class AnsiDecorator:
44 plain_code = "\033[0m"
45
46 def __init__(self, text, code):
47 self.text = text
48 self.code = code
49
50 def get_text(self, with_codes):
51 if with_codes:
52 return self.code + self.text + AnsiDecorator.plain_code
53 return self.text
54
55 def bold(text):
56 return AnsiDecorator(text, "\033[1m")
57
58 def red(text):
59 return AnsiDecorator(text, "\033[1;31m")
60
61 def green(text):
62 return AnsiDecorator(text, "\033[1;32m")
63
64 def yellow(text):
65 return AnsiDecorator(text, "\033[1;33m")
66
67 def cyan(text):
68 return AnsiDecorator(text, "\033[1;36m")
69
70 def process_markup(args, keep):
71 arr = []
72 for arg in args:
73 if isinstance(arg, str):
74 arr.append(arg)
75 elif isinstance(arg, AnsiDecorator):
76 arr.append(arg.get_text(keep))
77 else:
78 arr.append(str(arg))
79 return arr
80
81 def force_print(*args, **kwargs):
82 iostr = io.StringIO()
83 kwargs['file'] = iostr
84 print(*args, **kwargs)
85
86 raw = iostr.getvalue()
87 if log_depth > 0:
88 prepend = '|' * log_depth
89 raw = prepend + raw.replace('\n', '\n' + prepend, raw.count('\n') - 1)
90
91 # _Something_ is going to get printed.
92 try:
93 print(raw, end='')
94 except UnicodeEncodeError:
95 cleaned = raw.encode('ascii', 'replace').decode('ascii')
96 print(cleaned, end='')
97
98 def debug(*args, **kwargs):
99 arr = process_markup(args, False)
100 if log_file is not None:
101 print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.
102 log_file.flush()
103
104 def log(*args, **kwargs):
105 arr = process_markup(args, False)
106 if log_file is not None:
107 print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.
108 log_file.flush()
109 if colorize_console:
110 arr = process_markup(args, True)
111 force_print(*arr, **kwargs)
112
113 def _log_error(severity, *args, **kwargs):
114 from . import environment
115 if severity == 'warning':
116 args = (yellow('WARNING:'),) + args
117 elif severity == 'error':
118 args = (red('ERROR:'),) + args
119 else:
120 assert False, 'Invalid severity ' + severity
121
122 location = kwargs.pop('location', None)
123 if location is not None:
124 location_str = '{}:{}:'.format(os.path.join(location.subdir,
125 environment.build_filename),
126 location.lineno)
127 args = (location_str,) + args
128
129 log(*args, **kwargs)
130
131 def error(*args, **kwargs):
132 return _log_error('error', *args, **kwargs)
133
134 def warning(*args, **kwargs):
135 return _log_error('warning', *args, **kwargs)
136
137 def exception(e):
138 log()
139 if hasattr(e, 'file') and hasattr(e, 'lineno') and hasattr(e, 'colno'):
140 log('%s:%d:%d:' % (e.file, e.lineno, e.colno), red('ERROR: '), e)
141 else:
142 log(red('ERROR:'), e)
143
144 # Format a list for logging purposes as a string. It separates
145 # all but the last item with commas, and the last with 'and'.
146 def format_list(list):
147 l = len(list)
148 if l > 2:
149 return ' and '.join([', '.join(list[:-1]), list[-1]])
150 elif l == 2:
151 return ' and '.join(list)
152 elif l == 1:
153 return list[0]
154 else:
155 return ''
156
157 @contextmanager
158 def nested():
159 global log_depth
160 log_depth += 1
161 try:
162 yield
163 finally:
164 log_depth -= 1
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mesonbuild/mlog.py b/mesonbuild/mlog.py
--- a/mesonbuild/mlog.py
+++ b/mesonbuild/mlog.py
@@ -19,8 +19,22 @@
information about Meson runs. Some output goes to screen,
some to logging dir and some goes to both."""
+def _windows_ansi():
+ from ctypes import windll, byref
+ from ctypes.wintypes import DWORD
+
+ kernel = windll.kernel32
+ stdout = kernel.GetStdHandle(-11)
+ mode = DWORD()
+ if not kernel.GetConsoleMode(stdout, byref(mode)):
+ return False
+ # ENABLE_VIRTUAL_TERMINAL_PROCESSING == 0x4
+ # If the call to enable VT processing fails (returns 0), we fallback to
+ # original behavior
+ return kernel.SetConsoleMode(stdout, mode.value | 0x4) or os.environ.get('ANSICON')
+
if platform.system().lower() == 'windows':
- colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('ANSICON')
+ colorize_console = os.isatty(sys.stdout.fileno()) and _windows_ansi()
else:
colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('TERM') != 'dumb'
log_dir = None
| {"golden_diff": "diff --git a/mesonbuild/mlog.py b/mesonbuild/mlog.py\n--- a/mesonbuild/mlog.py\n+++ b/mesonbuild/mlog.py\n@@ -19,8 +19,22 @@\n information about Meson runs. Some output goes to screen,\n some to logging dir and some goes to both.\"\"\"\n \n+def _windows_ansi():\n+ from ctypes import windll, byref\n+ from ctypes.wintypes import DWORD\n+\n+ kernel = windll.kernel32\n+ stdout = kernel.GetStdHandle(-11)\n+ mode = DWORD()\n+ if not kernel.GetConsoleMode(stdout, byref(mode)):\n+ return False\n+ # ENABLE_VIRTUAL_TERMINAL_PROCESSING == 0x4\n+ # If the call to enable VT processing fails (returns 0), we fallback to\n+ # original behavior\n+ return kernel.SetConsoleMode(stdout, mode.value | 0x4) or os.environ.get('ANSICON')\n+\n if platform.system().lower() == 'windows':\n- colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('ANSICON')\n+ colorize_console = os.isatty(sys.stdout.fileno()) and _windows_ansi()\n else:\n colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('TERM') != 'dumb'\n log_dir = None\n", "issue": "meson color output is broken with conemu (windows)\nI currently use conemu with the quake-style dropdown menu, and while trying to run meson under windows, I get the following output\r\n\r\n\r\n\r\nI think that on windows, it might be best to use the [colorama](https://pypi.org/project/colorama/) library as a conditional dependency on windows. This can be done inside of the `setup.py` with the `install_requires` looking like:\r\n\r\n```python\r\ninstall_requires =\r\n colorama;sys_platform == 'win32'\r\n```\r\n\r\nAlternatively, simply importing colorama causes it to \"do the right thing\" on non-windows platforms. This also means you can get color support for pre-Windows 10 versions (Windows 10 added VT100 escape sequence support, but it requires an explicit opt in via a win32 function call)\r\n\r\nI'm unsure if either of these options are valid, or even wanted.\nmeson color output is broken with conemu (windows)\nI currently use conemu with the quake-style dropdown menu, and while trying to run meson under windows, I get the following output\r\n\r\n\r\n\r\nI think that on windows, it might be best to use the [colorama](https://pypi.org/project/colorama/) library as a conditional dependency on windows. This can be done inside of the `setup.py` with the `install_requires` looking like:\r\n\r\n```python\r\ninstall_requires =\r\n colorama;sys_platform == 'win32'\r\n```\r\n\r\nAlternatively, simply importing colorama causes it to \"do the right thing\" on non-windows platforms. This also means you can get color support for pre-Windows 10 versions (Windows 10 added VT100 escape sequence support, but it requires an explicit opt in via a win32 function call)\r\n\r\nI'm unsure if either of these options are valid, or even wanted.\n", "before_files": [{"content": "# Copyright 2013-2014 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys, os, platform, io\nfrom contextlib import contextmanager\n\n\"\"\"This is (mostly) a standalone module used to write logging\ninformation about Meson runs. Some output goes to screen,\nsome to logging dir and some goes to both.\"\"\"\n\nif platform.system().lower() == 'windows':\n colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('ANSICON')\nelse:\n colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('TERM') != 'dumb'\nlog_dir = None\nlog_file = None\nlog_fname = 'meson-log.txt'\nlog_depth = 0\n\ndef initialize(logdir):\n global log_dir, log_file\n log_dir = logdir\n log_file = open(os.path.join(logdir, log_fname), 'w', encoding='utf8')\n\ndef shutdown():\n global log_file\n if log_file is not None:\n exception_around_goer = log_file\n log_file = None\n exception_around_goer.close()\n\nclass AnsiDecorator:\n plain_code = \"\\033[0m\"\n\n def __init__(self, text, code):\n self.text = text\n self.code = code\n\n def get_text(self, with_codes):\n if with_codes:\n return self.code + self.text + AnsiDecorator.plain_code\n return self.text\n\ndef bold(text):\n return AnsiDecorator(text, \"\\033[1m\")\n\ndef red(text):\n return AnsiDecorator(text, \"\\033[1;31m\")\n\ndef green(text):\n return AnsiDecorator(text, \"\\033[1;32m\")\n\ndef yellow(text):\n return AnsiDecorator(text, \"\\033[1;33m\")\n\ndef cyan(text):\n return AnsiDecorator(text, \"\\033[1;36m\")\n\ndef process_markup(args, keep):\n arr = []\n for arg in args:\n if isinstance(arg, str):\n arr.append(arg)\n elif isinstance(arg, AnsiDecorator):\n arr.append(arg.get_text(keep))\n else:\n arr.append(str(arg))\n return arr\n\ndef force_print(*args, **kwargs):\n iostr = io.StringIO()\n kwargs['file'] = iostr\n print(*args, **kwargs)\n\n raw = iostr.getvalue()\n if log_depth > 0:\n prepend = '|' * log_depth\n raw = prepend + raw.replace('\\n', '\\n' + prepend, raw.count('\\n') - 1)\n\n # _Something_ is going to get printed.\n try:\n print(raw, end='')\n except UnicodeEncodeError:\n cleaned = raw.encode('ascii', 'replace').decode('ascii')\n print(cleaned, end='')\n\ndef debug(*args, **kwargs):\n arr = process_markup(args, False)\n if log_file is not None:\n print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.\n log_file.flush()\n\ndef log(*args, **kwargs):\n arr = process_markup(args, False)\n if log_file is not None:\n print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.\n log_file.flush()\n if colorize_console:\n arr = process_markup(args, True)\n force_print(*arr, **kwargs)\n\ndef _log_error(severity, *args, **kwargs):\n from . import environment\n if severity == 'warning':\n args = (yellow('WARNING:'),) + args\n elif severity == 'error':\n args = (red('ERROR:'),) + args\n else:\n assert False, 'Invalid severity ' + severity\n\n location = kwargs.pop('location', None)\n if location is not None:\n location_str = '{}:{}:'.format(os.path.join(location.subdir,\n environment.build_filename),\n location.lineno)\n args = (location_str,) + args\n\n log(*args, **kwargs)\n\ndef error(*args, **kwargs):\n return _log_error('error', *args, **kwargs)\n\ndef warning(*args, **kwargs):\n return _log_error('warning', *args, **kwargs)\n\ndef exception(e):\n log()\n if hasattr(e, 'file') and hasattr(e, 'lineno') and hasattr(e, 'colno'):\n log('%s:%d:%d:' % (e.file, e.lineno, e.colno), red('ERROR: '), e)\n else:\n log(red('ERROR:'), e)\n\n# Format a list for logging purposes as a string. It separates\n# all but the last item with commas, and the last with 'and'.\ndef format_list(list):\n l = len(list)\n if l > 2:\n return ' and '.join([', '.join(list[:-1]), list[-1]])\n elif l == 2:\n return ' and '.join(list)\n elif l == 1:\n return list[0]\n else:\n return ''\n\n@contextmanager\ndef nested():\n global log_depth\n log_depth += 1\n try:\n yield\n finally:\n log_depth -= 1\n", "path": "mesonbuild/mlog.py"}], "after_files": [{"content": "# Copyright 2013-2014 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys, os, platform, io\nfrom contextlib import contextmanager\n\n\"\"\"This is (mostly) a standalone module used to write logging\ninformation about Meson runs. Some output goes to screen,\nsome to logging dir and some goes to both.\"\"\"\n\ndef _windows_ansi():\n from ctypes import windll, byref\n from ctypes.wintypes import DWORD\n\n kernel = windll.kernel32\n stdout = kernel.GetStdHandle(-11)\n mode = DWORD()\n if not kernel.GetConsoleMode(stdout, byref(mode)):\n return False\n # ENABLE_VIRTUAL_TERMINAL_PROCESSING == 0x4\n # If the call to enable VT processing fails (returns 0), we fallback to\n # original behavior\n return kernel.SetConsoleMode(stdout, mode.value | 0x4) or os.environ.get('ANSICON')\n\nif platform.system().lower() == 'windows':\n colorize_console = os.isatty(sys.stdout.fileno()) and _windows_ansi()\nelse:\n colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('TERM') != 'dumb'\nlog_dir = None\nlog_file = None\nlog_fname = 'meson-log.txt'\nlog_depth = 0\n\ndef initialize(logdir):\n global log_dir, log_file\n log_dir = logdir\n log_file = open(os.path.join(logdir, log_fname), 'w', encoding='utf8')\n\ndef shutdown():\n global log_file\n if log_file is not None:\n exception_around_goer = log_file\n log_file = None\n exception_around_goer.close()\n\nclass AnsiDecorator:\n plain_code = \"\\033[0m\"\n\n def __init__(self, text, code):\n self.text = text\n self.code = code\n\n def get_text(self, with_codes):\n if with_codes:\n return self.code + self.text + AnsiDecorator.plain_code\n return self.text\n\ndef bold(text):\n return AnsiDecorator(text, \"\\033[1m\")\n\ndef red(text):\n return AnsiDecorator(text, \"\\033[1;31m\")\n\ndef green(text):\n return AnsiDecorator(text, \"\\033[1;32m\")\n\ndef yellow(text):\n return AnsiDecorator(text, \"\\033[1;33m\")\n\ndef cyan(text):\n return AnsiDecorator(text, \"\\033[1;36m\")\n\ndef process_markup(args, keep):\n arr = []\n for arg in args:\n if isinstance(arg, str):\n arr.append(arg)\n elif isinstance(arg, AnsiDecorator):\n arr.append(arg.get_text(keep))\n else:\n arr.append(str(arg))\n return arr\n\ndef force_print(*args, **kwargs):\n iostr = io.StringIO()\n kwargs['file'] = iostr\n print(*args, **kwargs)\n\n raw = iostr.getvalue()\n if log_depth > 0:\n prepend = '|' * log_depth\n raw = prepend + raw.replace('\\n', '\\n' + prepend, raw.count('\\n') - 1)\n\n # _Something_ is going to get printed.\n try:\n print(raw, end='')\n except UnicodeEncodeError:\n cleaned = raw.encode('ascii', 'replace').decode('ascii')\n print(cleaned, end='')\n\ndef debug(*args, **kwargs):\n arr = process_markup(args, False)\n if log_file is not None:\n print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.\n log_file.flush()\n\ndef log(*args, **kwargs):\n arr = process_markup(args, False)\n if log_file is not None:\n print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.\n log_file.flush()\n if colorize_console:\n arr = process_markup(args, True)\n force_print(*arr, **kwargs)\n\ndef _log_error(severity, *args, **kwargs):\n from . import environment\n if severity == 'warning':\n args = (yellow('WARNING:'),) + args\n elif severity == 'error':\n args = (red('ERROR:'),) + args\n else:\n assert False, 'Invalid severity ' + severity\n\n location = kwargs.pop('location', None)\n if location is not None:\n location_str = '{}:{}:'.format(os.path.join(location.subdir,\n environment.build_filename),\n location.lineno)\n args = (location_str,) + args\n\n log(*args, **kwargs)\n\ndef error(*args, **kwargs):\n return _log_error('error', *args, **kwargs)\n\ndef warning(*args, **kwargs):\n return _log_error('warning', *args, **kwargs)\n\ndef exception(e):\n log()\n if hasattr(e, 'file') and hasattr(e, 'lineno') and hasattr(e, 'colno'):\n log('%s:%d:%d:' % (e.file, e.lineno, e.colno), red('ERROR: '), e)\n else:\n log(red('ERROR:'), e)\n\n# Format a list for logging purposes as a string. It separates\n# all but the last item with commas, and the last with 'and'.\ndef format_list(list):\n l = len(list)\n if l > 2:\n return ' and '.join([', '.join(list[:-1]), list[-1]])\n elif l == 2:\n return ' and '.join(list)\n elif l == 1:\n return list[0]\n else:\n return ''\n\n@contextmanager\ndef nested():\n global log_depth\n log_depth += 1\n try:\n yield\n finally:\n log_depth -= 1\n", "path": "mesonbuild/mlog.py"}]} | 2,454 | 299 |
gh_patches_debug_43281 | rasdani/github-patches | git_diff | ipython__ipython-7140 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistent behavior between FileContentsManager and Contents Manager API docs
Discrepancies with [IPEP-27](https://github.com/ipython/ipython/wiki/IPEP-27:-Contents-Service#contents-api) noted while working on a custom subclass of `ContentsManager`:
- Where IPEP-27 lists `'modified'` as an expected dictionary key, `FileContentsManager` uses `'last_modified'` instead.
- IPEP-27 specifies that all three of `'contents'`, `'format'`, and `'mimetype'` will be null if `contents` is null. In many (possibly all?) cases however, `'mimetype'` is non-null when `'contents` is null.
Cf. https://github.com/ipython/ipython/pull/7140.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/html/services/contents/handlers.py`
Content:
```
1 """Tornado handlers for the contents web service."""
2
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the Modified BSD License.
5
6 import json
7
8 from tornado import web
9
10 from IPython.html.utils import url_path_join, url_escape
11 from IPython.utils.jsonutil import date_default
12
13 from IPython.html.base.handlers import (
14 IPythonHandler, json_errors, path_regex,
15 )
16
17
18 def sort_key(model):
19 """key function for case-insensitive sort by name and type"""
20 iname = model['name'].lower()
21 type_key = {
22 'directory' : '0',
23 'notebook' : '1',
24 'file' : '2',
25 }.get(model['type'], '9')
26 return u'%s%s' % (type_key, iname)
27
28 class ContentsHandler(IPythonHandler):
29
30 SUPPORTED_METHODS = (u'GET', u'PUT', u'PATCH', u'POST', u'DELETE')
31
32 def location_url(self, path):
33 """Return the full URL location of a file.
34
35 Parameters
36 ----------
37 path : unicode
38 The API path of the file, such as "foo/bar.txt".
39 """
40 return url_escape(url_path_join(
41 self.base_url, 'api', 'contents', path
42 ))
43
44 def _finish_model(self, model, location=True):
45 """Finish a JSON request with a model, setting relevant headers, etc."""
46 if location:
47 location = self.location_url(model['path'])
48 self.set_header('Location', location)
49 self.set_header('Last-Modified', model['last_modified'])
50 self.set_header('Content-Type', 'application/json')
51 self.finish(json.dumps(model, default=date_default))
52
53 @web.authenticated
54 @json_errors
55 def get(self, path=''):
56 """Return a model for a file or directory.
57
58 A directory model contains a list of models (without content)
59 of the files and directories it contains.
60 """
61 path = path or ''
62 type_ = self.get_query_argument('type', default=None)
63 if type_ not in {None, 'directory', 'file', 'notebook'}:
64 raise web.HTTPError(400, u'Type %r is invalid' % type_)
65
66 format = self.get_query_argument('format', default=None)
67 if format not in {None, 'text', 'base64'}:
68 raise web.HTTPError(400, u'Format %r is invalid' % format)
69
70 model = self.contents_manager.get(path=path, type_=type_, format=format)
71 if model['type'] == 'directory':
72 # group listing by type, then by name (case-insensitive)
73 # FIXME: sorting should be done in the frontends
74 model['content'].sort(key=sort_key)
75 self._finish_model(model, location=False)
76
77 @web.authenticated
78 @json_errors
79 def patch(self, path=''):
80 """PATCH renames a file or directory without re-uploading content."""
81 cm = self.contents_manager
82 model = self.get_json_body()
83 if model is None:
84 raise web.HTTPError(400, u'JSON body missing')
85 model = cm.update(model, path)
86 self._finish_model(model)
87
88 def _copy(self, copy_from, copy_to=None):
89 """Copy a file, optionally specifying a target directory."""
90 self.log.info(u"Copying {copy_from} to {copy_to}".format(
91 copy_from=copy_from,
92 copy_to=copy_to or '',
93 ))
94 model = self.contents_manager.copy(copy_from, copy_to)
95 self.set_status(201)
96 self._finish_model(model)
97
98 def _upload(self, model, path):
99 """Handle upload of a new file to path"""
100 self.log.info(u"Uploading file to %s", path)
101 model = self.contents_manager.new(model, path)
102 self.set_status(201)
103 self._finish_model(model)
104
105 def _new_untitled(self, path, type='', ext=''):
106 """Create a new, empty untitled entity"""
107 self.log.info(u"Creating new %s in %s", type or 'file', path)
108 model = self.contents_manager.new_untitled(path=path, type=type, ext=ext)
109 self.set_status(201)
110 self._finish_model(model)
111
112 def _save(self, model, path):
113 """Save an existing file."""
114 self.log.info(u"Saving file at %s", path)
115 model = self.contents_manager.save(model, path)
116 self._finish_model(model)
117
118 @web.authenticated
119 @json_errors
120 def post(self, path=''):
121 """Create a new file in the specified path.
122
123 POST creates new files. The server always decides on the name.
124
125 POST /api/contents/path
126 New untitled, empty file or directory.
127 POST /api/contents/path
128 with body {"copy_from" : "/path/to/OtherNotebook.ipynb"}
129 New copy of OtherNotebook in path
130 """
131
132 cm = self.contents_manager
133
134 if cm.file_exists(path):
135 raise web.HTTPError(400, "Cannot POST to files, use PUT instead.")
136
137 if not cm.dir_exists(path):
138 raise web.HTTPError(404, "No such directory: %s" % path)
139
140 model = self.get_json_body()
141
142 if model is not None:
143 copy_from = model.get('copy_from')
144 ext = model.get('ext', '')
145 type = model.get('type', '')
146 if copy_from:
147 self._copy(copy_from, path)
148 else:
149 self._new_untitled(path, type=type, ext=ext)
150 else:
151 self._new_untitled(path)
152
153 @web.authenticated
154 @json_errors
155 def put(self, path=''):
156 """Saves the file in the location specified by name and path.
157
158 PUT is very similar to POST, but the requester specifies the name,
159 whereas with POST, the server picks the name.
160
161 PUT /api/contents/path/Name.ipynb
162 Save notebook at ``path/Name.ipynb``. Notebook structure is specified
163 in `content` key of JSON request body. If content is not specified,
164 create a new empty notebook.
165 """
166 model = self.get_json_body()
167 if model:
168 if model.get('copy_from'):
169 raise web.HTTPError(400, "Cannot copy with PUT, only POST")
170 if self.contents_manager.file_exists(path):
171 self._save(model, path)
172 else:
173 self._upload(model, path)
174 else:
175 self._new_untitled(path)
176
177 @web.authenticated
178 @json_errors
179 def delete(self, path=''):
180 """delete a file in the given path"""
181 cm = self.contents_manager
182 self.log.warn('delete %s', path)
183 cm.delete(path)
184 self.set_status(204)
185 self.finish()
186
187
188 class CheckpointsHandler(IPythonHandler):
189
190 SUPPORTED_METHODS = ('GET', 'POST')
191
192 @web.authenticated
193 @json_errors
194 def get(self, path=''):
195 """get lists checkpoints for a file"""
196 cm = self.contents_manager
197 checkpoints = cm.list_checkpoints(path)
198 data = json.dumps(checkpoints, default=date_default)
199 self.finish(data)
200
201 @web.authenticated
202 @json_errors
203 def post(self, path=''):
204 """post creates a new checkpoint"""
205 cm = self.contents_manager
206 checkpoint = cm.create_checkpoint(path)
207 data = json.dumps(checkpoint, default=date_default)
208 location = url_path_join(self.base_url, 'api/contents',
209 path, 'checkpoints', checkpoint['id'])
210 self.set_header('Location', url_escape(location))
211 self.set_status(201)
212 self.finish(data)
213
214
215 class ModifyCheckpointsHandler(IPythonHandler):
216
217 SUPPORTED_METHODS = ('POST', 'DELETE')
218
219 @web.authenticated
220 @json_errors
221 def post(self, path, checkpoint_id):
222 """post restores a file from a checkpoint"""
223 cm = self.contents_manager
224 cm.restore_checkpoint(checkpoint_id, path)
225 self.set_status(204)
226 self.finish()
227
228 @web.authenticated
229 @json_errors
230 def delete(self, path, checkpoint_id):
231 """delete clears a checkpoint for a given file"""
232 cm = self.contents_manager
233 cm.delete_checkpoint(checkpoint_id, path)
234 self.set_status(204)
235 self.finish()
236
237
238 class NotebooksRedirectHandler(IPythonHandler):
239 """Redirect /api/notebooks to /api/contents"""
240 SUPPORTED_METHODS = ('GET', 'PUT', 'PATCH', 'POST', 'DELETE')
241
242 def get(self, path):
243 self.log.warn("/api/notebooks is deprecated, use /api/contents")
244 self.redirect(url_path_join(
245 self.base_url,
246 'api/contents',
247 path
248 ))
249
250 put = patch = post = delete = get
251
252
253 #-----------------------------------------------------------------------------
254 # URL to handler mappings
255 #-----------------------------------------------------------------------------
256
257
258 _checkpoint_id_regex = r"(?P<checkpoint_id>[\w-]+)"
259
260 default_handlers = [
261 (r"/api/contents%s/checkpoints" % path_regex, CheckpointsHandler),
262 (r"/api/contents%s/checkpoints/%s" % (path_regex, _checkpoint_id_regex),
263 ModifyCheckpointsHandler),
264 (r"/api/contents%s" % path_regex, ContentsHandler),
265 (r"/api/notebooks/?(.*)", NotebooksRedirectHandler),
266 ]
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/html/services/contents/handlers.py b/IPython/html/services/contents/handlers.py
--- a/IPython/html/services/contents/handlers.py
+++ b/IPython/html/services/contents/handlers.py
@@ -25,6 +25,56 @@
}.get(model['type'], '9')
return u'%s%s' % (type_key, iname)
+
+def validate_model(model, expect_content):
+ """
+ Validate a model returned by a ContentsManager method.
+
+ If expect_content is True, then we expect non-null entries for 'content'
+ and 'format'.
+ """
+ required_keys = {
+ "name",
+ "path",
+ "type",
+ "writable",
+ "created",
+ "last_modified",
+ "mimetype",
+ "content",
+ "format",
+ }
+ missing = required_keys - set(model.keys())
+ if missing:
+ raise web.HTTPError(
+ 500,
+ u"Missing Model Keys: {missing}".format(missing=missing),
+ )
+
+ maybe_none_keys = ['content', 'format']
+ if model['type'] == 'file':
+ # mimetype should be populated only for file models
+ maybe_none_keys.append('mimetype')
+ if expect_content:
+ errors = [key for key in maybe_none_keys if model[key] is None]
+ if errors:
+ raise web.HTTPError(
+ 500,
+ u"Keys unexpectedly None: {keys}".format(keys=errors),
+ )
+ else:
+ errors = {
+ key: model[key]
+ for key in maybe_none_keys
+ if model[key] is not None
+ }
+ if errors:
+ raise web.HTTPError(
+ 500,
+ u"Keys unexpectedly not None: {keys}".format(keys=errors),
+ )
+
+
class ContentsHandler(IPythonHandler):
SUPPORTED_METHODS = (u'GET', u'PUT', u'PATCH', u'POST', u'DELETE')
@@ -72,6 +122,7 @@
# group listing by type, then by name (case-insensitive)
# FIXME: sorting should be done in the frontends
model['content'].sort(key=sort_key)
+ validate_model(model, expect_content=True)
self._finish_model(model, location=False)
@web.authenticated
@@ -83,6 +134,7 @@
if model is None:
raise web.HTTPError(400, u'JSON body missing')
model = cm.update(model, path)
+ validate_model(model, expect_content=False)
self._finish_model(model)
def _copy(self, copy_from, copy_to=None):
@@ -93,6 +145,7 @@
))
model = self.contents_manager.copy(copy_from, copy_to)
self.set_status(201)
+ validate_model(model, expect_content=False)
self._finish_model(model)
def _upload(self, model, path):
@@ -100,6 +153,7 @@
self.log.info(u"Uploading file to %s", path)
model = self.contents_manager.new(model, path)
self.set_status(201)
+ validate_model(model, expect_content=False)
self._finish_model(model)
def _new_untitled(self, path, type='', ext=''):
@@ -107,12 +161,14 @@
self.log.info(u"Creating new %s in %s", type or 'file', path)
model = self.contents_manager.new_untitled(path=path, type=type, ext=ext)
self.set_status(201)
+ validate_model(model, expect_content=False)
self._finish_model(model)
def _save(self, model, path):
"""Save an existing file."""
self.log.info(u"Saving file at %s", path)
model = self.contents_manager.save(model, path)
+ validate_model(model, expect_content=False)
self._finish_model(model)
@web.authenticated
| {"golden_diff": "diff --git a/IPython/html/services/contents/handlers.py b/IPython/html/services/contents/handlers.py\n--- a/IPython/html/services/contents/handlers.py\n+++ b/IPython/html/services/contents/handlers.py\n@@ -25,6 +25,56 @@\n }.get(model['type'], '9')\n return u'%s%s' % (type_key, iname)\n \n+\n+def validate_model(model, expect_content):\n+ \"\"\"\n+ Validate a model returned by a ContentsManager method.\n+\n+ If expect_content is True, then we expect non-null entries for 'content'\n+ and 'format'.\n+ \"\"\"\n+ required_keys = {\n+ \"name\",\n+ \"path\",\n+ \"type\",\n+ \"writable\",\n+ \"created\",\n+ \"last_modified\",\n+ \"mimetype\",\n+ \"content\",\n+ \"format\",\n+ }\n+ missing = required_keys - set(model.keys())\n+ if missing:\n+ raise web.HTTPError(\n+ 500,\n+ u\"Missing Model Keys: {missing}\".format(missing=missing),\n+ )\n+\n+ maybe_none_keys = ['content', 'format']\n+ if model['type'] == 'file':\n+ # mimetype should be populated only for file models\n+ maybe_none_keys.append('mimetype')\n+ if expect_content:\n+ errors = [key for key in maybe_none_keys if model[key] is None]\n+ if errors:\n+ raise web.HTTPError(\n+ 500,\n+ u\"Keys unexpectedly None: {keys}\".format(keys=errors),\n+ )\n+ else:\n+ errors = {\n+ key: model[key]\n+ for key in maybe_none_keys\n+ if model[key] is not None\n+ }\n+ if errors:\n+ raise web.HTTPError(\n+ 500,\n+ u\"Keys unexpectedly not None: {keys}\".format(keys=errors),\n+ )\n+\n+\n class ContentsHandler(IPythonHandler):\n \n SUPPORTED_METHODS = (u'GET', u'PUT', u'PATCH', u'POST', u'DELETE')\n@@ -72,6 +122,7 @@\n # group listing by type, then by name (case-insensitive)\n # FIXME: sorting should be done in the frontends\n model['content'].sort(key=sort_key)\n+ validate_model(model, expect_content=True)\n self._finish_model(model, location=False)\n \n @web.authenticated\n@@ -83,6 +134,7 @@\n if model is None:\n raise web.HTTPError(400, u'JSON body missing')\n model = cm.update(model, path)\n+ validate_model(model, expect_content=False)\n self._finish_model(model)\n \n def _copy(self, copy_from, copy_to=None):\n@@ -93,6 +145,7 @@\n ))\n model = self.contents_manager.copy(copy_from, copy_to)\n self.set_status(201)\n+ validate_model(model, expect_content=False)\n self._finish_model(model)\n \n def _upload(self, model, path):\n@@ -100,6 +153,7 @@\n self.log.info(u\"Uploading file to %s\", path)\n model = self.contents_manager.new(model, path)\n self.set_status(201)\n+ validate_model(model, expect_content=False)\n self._finish_model(model)\n \n def _new_untitled(self, path, type='', ext=''):\n@@ -107,12 +161,14 @@\n self.log.info(u\"Creating new %s in %s\", type or 'file', path)\n model = self.contents_manager.new_untitled(path=path, type=type, ext=ext)\n self.set_status(201)\n+ validate_model(model, expect_content=False)\n self._finish_model(model)\n \n def _save(self, model, path):\n \"\"\"Save an existing file.\"\"\"\n self.log.info(u\"Saving file at %s\", path)\n model = self.contents_manager.save(model, path)\n+ validate_model(model, expect_content=False)\n self._finish_model(model)\n \n @web.authenticated\n", "issue": "Inconsistent behavior between FileContentsManager and Contents Manager API docs\nDiscrepancies with [IPEP-27](https://github.com/ipython/ipython/wiki/IPEP-27:-Contents-Service#contents-api) noted while working on a custom subclass of `ContentsManager`:\n- Where IPEP-27 lists `'modified'` as an expected dictionary key, `FileContentsManager` uses `'last_modified'` instead.\n- IPEP-27 specifies that all three of `'contents'`, `'format'`, and `'mimetype'` will be null if `contents` is null. In many (possibly all?) cases however, `'mimetype'` is non-null when `'contents` is null.\n\nCf. https://github.com/ipython/ipython/pull/7140.\n\n", "before_files": [{"content": "\"\"\"Tornado handlers for the contents web service.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\n\nfrom tornado import web\n\nfrom IPython.html.utils import url_path_join, url_escape\nfrom IPython.utils.jsonutil import date_default\n\nfrom IPython.html.base.handlers import (\n IPythonHandler, json_errors, path_regex,\n)\n\n\ndef sort_key(model):\n \"\"\"key function for case-insensitive sort by name and type\"\"\"\n iname = model['name'].lower()\n type_key = {\n 'directory' : '0',\n 'notebook' : '1',\n 'file' : '2',\n }.get(model['type'], '9')\n return u'%s%s' % (type_key, iname)\n\nclass ContentsHandler(IPythonHandler):\n\n SUPPORTED_METHODS = (u'GET', u'PUT', u'PATCH', u'POST', u'DELETE')\n\n def location_url(self, path):\n \"\"\"Return the full URL location of a file.\n\n Parameters\n ----------\n path : unicode\n The API path of the file, such as \"foo/bar.txt\".\n \"\"\"\n return url_escape(url_path_join(\n self.base_url, 'api', 'contents', path\n ))\n\n def _finish_model(self, model, location=True):\n \"\"\"Finish a JSON request with a model, setting relevant headers, etc.\"\"\"\n if location:\n location = self.location_url(model['path'])\n self.set_header('Location', location)\n self.set_header('Last-Modified', model['last_modified'])\n self.set_header('Content-Type', 'application/json')\n self.finish(json.dumps(model, default=date_default))\n\n @web.authenticated\n @json_errors\n def get(self, path=''):\n \"\"\"Return a model for a file or directory.\n\n A directory model contains a list of models (without content)\n of the files and directories it contains.\n \"\"\"\n path = path or ''\n type_ = self.get_query_argument('type', default=None)\n if type_ not in {None, 'directory', 'file', 'notebook'}:\n raise web.HTTPError(400, u'Type %r is invalid' % type_)\n\n format = self.get_query_argument('format', default=None)\n if format not in {None, 'text', 'base64'}:\n raise web.HTTPError(400, u'Format %r is invalid' % format)\n\n model = self.contents_manager.get(path=path, type_=type_, format=format)\n if model['type'] == 'directory':\n # group listing by type, then by name (case-insensitive)\n # FIXME: sorting should be done in the frontends\n model['content'].sort(key=sort_key)\n self._finish_model(model, location=False)\n\n @web.authenticated\n @json_errors\n def patch(self, path=''):\n \"\"\"PATCH renames a file or directory without re-uploading content.\"\"\"\n cm = self.contents_manager\n model = self.get_json_body()\n if model is None:\n raise web.HTTPError(400, u'JSON body missing')\n model = cm.update(model, path)\n self._finish_model(model)\n\n def _copy(self, copy_from, copy_to=None):\n \"\"\"Copy a file, optionally specifying a target directory.\"\"\"\n self.log.info(u\"Copying {copy_from} to {copy_to}\".format(\n copy_from=copy_from,\n copy_to=copy_to or '',\n ))\n model = self.contents_manager.copy(copy_from, copy_to)\n self.set_status(201)\n self._finish_model(model)\n\n def _upload(self, model, path):\n \"\"\"Handle upload of a new file to path\"\"\"\n self.log.info(u\"Uploading file to %s\", path)\n model = self.contents_manager.new(model, path)\n self.set_status(201)\n self._finish_model(model)\n \n def _new_untitled(self, path, type='', ext=''):\n \"\"\"Create a new, empty untitled entity\"\"\"\n self.log.info(u\"Creating new %s in %s\", type or 'file', path)\n model = self.contents_manager.new_untitled(path=path, type=type, ext=ext)\n self.set_status(201)\n self._finish_model(model)\n\n def _save(self, model, path):\n \"\"\"Save an existing file.\"\"\"\n self.log.info(u\"Saving file at %s\", path)\n model = self.contents_manager.save(model, path)\n self._finish_model(model)\n\n @web.authenticated\n @json_errors\n def post(self, path=''):\n \"\"\"Create a new file in the specified path.\n\n POST creates new files. The server always decides on the name.\n\n POST /api/contents/path\n New untitled, empty file or directory.\n POST /api/contents/path\n with body {\"copy_from\" : \"/path/to/OtherNotebook.ipynb\"}\n New copy of OtherNotebook in path\n \"\"\"\n\n cm = self.contents_manager\n\n if cm.file_exists(path):\n raise web.HTTPError(400, \"Cannot POST to files, use PUT instead.\")\n\n if not cm.dir_exists(path):\n raise web.HTTPError(404, \"No such directory: %s\" % path)\n\n model = self.get_json_body()\n\n if model is not None:\n copy_from = model.get('copy_from')\n ext = model.get('ext', '')\n type = model.get('type', '')\n if copy_from:\n self._copy(copy_from, path)\n else:\n self._new_untitled(path, type=type, ext=ext)\n else:\n self._new_untitled(path)\n\n @web.authenticated\n @json_errors\n def put(self, path=''):\n \"\"\"Saves the file in the location specified by name and path.\n\n PUT is very similar to POST, but the requester specifies the name,\n whereas with POST, the server picks the name.\n\n PUT /api/contents/path/Name.ipynb\n Save notebook at ``path/Name.ipynb``. Notebook structure is specified\n in `content` key of JSON request body. If content is not specified,\n create a new empty notebook.\n \"\"\"\n model = self.get_json_body()\n if model:\n if model.get('copy_from'):\n raise web.HTTPError(400, \"Cannot copy with PUT, only POST\")\n if self.contents_manager.file_exists(path):\n self._save(model, path)\n else:\n self._upload(model, path)\n else:\n self._new_untitled(path)\n\n @web.authenticated\n @json_errors\n def delete(self, path=''):\n \"\"\"delete a file in the given path\"\"\"\n cm = self.contents_manager\n self.log.warn('delete %s', path)\n cm.delete(path)\n self.set_status(204)\n self.finish()\n\n\nclass CheckpointsHandler(IPythonHandler):\n\n SUPPORTED_METHODS = ('GET', 'POST')\n\n @web.authenticated\n @json_errors\n def get(self, path=''):\n \"\"\"get lists checkpoints for a file\"\"\"\n cm = self.contents_manager\n checkpoints = cm.list_checkpoints(path)\n data = json.dumps(checkpoints, default=date_default)\n self.finish(data)\n\n @web.authenticated\n @json_errors\n def post(self, path=''):\n \"\"\"post creates a new checkpoint\"\"\"\n cm = self.contents_manager\n checkpoint = cm.create_checkpoint(path)\n data = json.dumps(checkpoint, default=date_default)\n location = url_path_join(self.base_url, 'api/contents',\n path, 'checkpoints', checkpoint['id'])\n self.set_header('Location', url_escape(location))\n self.set_status(201)\n self.finish(data)\n\n\nclass ModifyCheckpointsHandler(IPythonHandler):\n\n SUPPORTED_METHODS = ('POST', 'DELETE')\n\n @web.authenticated\n @json_errors\n def post(self, path, checkpoint_id):\n \"\"\"post restores a file from a checkpoint\"\"\"\n cm = self.contents_manager\n cm.restore_checkpoint(checkpoint_id, path)\n self.set_status(204)\n self.finish()\n\n @web.authenticated\n @json_errors\n def delete(self, path, checkpoint_id):\n \"\"\"delete clears a checkpoint for a given file\"\"\"\n cm = self.contents_manager\n cm.delete_checkpoint(checkpoint_id, path)\n self.set_status(204)\n self.finish()\n\n\nclass NotebooksRedirectHandler(IPythonHandler):\n \"\"\"Redirect /api/notebooks to /api/contents\"\"\"\n SUPPORTED_METHODS = ('GET', 'PUT', 'PATCH', 'POST', 'DELETE')\n\n def get(self, path):\n self.log.warn(\"/api/notebooks is deprecated, use /api/contents\")\n self.redirect(url_path_join(\n self.base_url,\n 'api/contents',\n path\n ))\n\n put = patch = post = delete = get\n\n\n#-----------------------------------------------------------------------------\n# URL to handler mappings\n#-----------------------------------------------------------------------------\n\n\n_checkpoint_id_regex = r\"(?P<checkpoint_id>[\\w-]+)\"\n\ndefault_handlers = [\n (r\"/api/contents%s/checkpoints\" % path_regex, CheckpointsHandler),\n (r\"/api/contents%s/checkpoints/%s\" % (path_regex, _checkpoint_id_regex),\n ModifyCheckpointsHandler),\n (r\"/api/contents%s\" % path_regex, ContentsHandler),\n (r\"/api/notebooks/?(.*)\", NotebooksRedirectHandler),\n]\n", "path": "IPython/html/services/contents/handlers.py"}], "after_files": [{"content": "\"\"\"Tornado handlers for the contents web service.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\n\nfrom tornado import web\n\nfrom IPython.html.utils import url_path_join, url_escape\nfrom IPython.utils.jsonutil import date_default\n\nfrom IPython.html.base.handlers import (\n IPythonHandler, json_errors, path_regex,\n)\n\n\ndef sort_key(model):\n \"\"\"key function for case-insensitive sort by name and type\"\"\"\n iname = model['name'].lower()\n type_key = {\n 'directory' : '0',\n 'notebook' : '1',\n 'file' : '2',\n }.get(model['type'], '9')\n return u'%s%s' % (type_key, iname)\n\n\ndef validate_model(model, expect_content):\n \"\"\"\n Validate a model returned by a ContentsManager method.\n\n If expect_content is True, then we expect non-null entries for 'content'\n and 'format'.\n \"\"\"\n required_keys = {\n \"name\",\n \"path\",\n \"type\",\n \"writable\",\n \"created\",\n \"last_modified\",\n \"mimetype\",\n \"content\",\n \"format\",\n }\n missing = required_keys - set(model.keys())\n if missing:\n raise web.HTTPError(\n 500,\n u\"Missing Model Keys: {missing}\".format(missing=missing),\n )\n\n maybe_none_keys = ['content', 'format']\n if model['type'] == 'file':\n # mimetype should be populated only for file models\n maybe_none_keys.append('mimetype')\n if expect_content:\n errors = [key for key in maybe_none_keys if model[key] is None]\n if errors:\n raise web.HTTPError(\n 500,\n u\"Keys unexpectedly None: {keys}\".format(keys=errors),\n )\n else:\n errors = {\n key: model[key]\n for key in maybe_none_keys\n if model[key] is not None\n }\n if errors:\n raise web.HTTPError(\n 500,\n u\"Keys unexpectedly not None: {keys}\".format(keys=errors),\n )\n\n\nclass ContentsHandler(IPythonHandler):\n\n SUPPORTED_METHODS = (u'GET', u'PUT', u'PATCH', u'POST', u'DELETE')\n\n def location_url(self, path):\n \"\"\"Return the full URL location of a file.\n\n Parameters\n ----------\n path : unicode\n The API path of the file, such as \"foo/bar.txt\".\n \"\"\"\n return url_escape(url_path_join(\n self.base_url, 'api', 'contents', path\n ))\n\n def _finish_model(self, model, location=True):\n \"\"\"Finish a JSON request with a model, setting relevant headers, etc.\"\"\"\n if location:\n location = self.location_url(model['path'])\n self.set_header('Location', location)\n self.set_header('Last-Modified', model['last_modified'])\n self.set_header('Content-Type', 'application/json')\n self.finish(json.dumps(model, default=date_default))\n\n @web.authenticated\n @json_errors\n def get(self, path=''):\n \"\"\"Return a model for a file or directory.\n\n A directory model contains a list of models (without content)\n of the files and directories it contains.\n \"\"\"\n path = path or ''\n type_ = self.get_query_argument('type', default=None)\n if type_ not in {None, 'directory', 'file', 'notebook'}:\n raise web.HTTPError(400, u'Type %r is invalid' % type_)\n\n format = self.get_query_argument('format', default=None)\n if format not in {None, 'text', 'base64'}:\n raise web.HTTPError(400, u'Format %r is invalid' % format)\n\n model = self.contents_manager.get(path=path, type_=type_, format=format)\n if model['type'] == 'directory':\n # group listing by type, then by name (case-insensitive)\n # FIXME: sorting should be done in the frontends\n model['content'].sort(key=sort_key)\n validate_model(model, expect_content=True)\n self._finish_model(model, location=False)\n\n @web.authenticated\n @json_errors\n def patch(self, path=''):\n \"\"\"PATCH renames a file or directory without re-uploading content.\"\"\"\n cm = self.contents_manager\n model = self.get_json_body()\n if model is None:\n raise web.HTTPError(400, u'JSON body missing')\n model = cm.update(model, path)\n validate_model(model, expect_content=False)\n self._finish_model(model)\n\n def _copy(self, copy_from, copy_to=None):\n \"\"\"Copy a file, optionally specifying a target directory.\"\"\"\n self.log.info(u\"Copying {copy_from} to {copy_to}\".format(\n copy_from=copy_from,\n copy_to=copy_to or '',\n ))\n model = self.contents_manager.copy(copy_from, copy_to)\n self.set_status(201)\n validate_model(model, expect_content=False)\n self._finish_model(model)\n\n def _upload(self, model, path):\n \"\"\"Handle upload of a new file to path\"\"\"\n self.log.info(u\"Uploading file to %s\", path)\n model = self.contents_manager.new(model, path)\n self.set_status(201)\n validate_model(model, expect_content=False)\n self._finish_model(model)\n \n def _new_untitled(self, path, type='', ext=''):\n \"\"\"Create a new, empty untitled entity\"\"\"\n self.log.info(u\"Creating new %s in %s\", type or 'file', path)\n model = self.contents_manager.new_untitled(path=path, type=type, ext=ext)\n self.set_status(201)\n validate_model(model, expect_content=False)\n self._finish_model(model)\n\n def _save(self, model, path):\n \"\"\"Save an existing file.\"\"\"\n self.log.info(u\"Saving file at %s\", path)\n model = self.contents_manager.save(model, path)\n validate_model(model, expect_content=False)\n self._finish_model(model)\n\n @web.authenticated\n @json_errors\n def post(self, path=''):\n \"\"\"Create a new file in the specified path.\n\n POST creates new files. The server always decides on the name.\n\n POST /api/contents/path\n New untitled, empty file or directory.\n POST /api/contents/path\n with body {\"copy_from\" : \"/path/to/OtherNotebook.ipynb\"}\n New copy of OtherNotebook in path\n \"\"\"\n\n cm = self.contents_manager\n\n if cm.file_exists(path):\n raise web.HTTPError(400, \"Cannot POST to files, use PUT instead.\")\n\n if not cm.dir_exists(path):\n raise web.HTTPError(404, \"No such directory: %s\" % path)\n\n model = self.get_json_body()\n\n if model is not None:\n copy_from = model.get('copy_from')\n ext = model.get('ext', '')\n type = model.get('type', '')\n if copy_from:\n self._copy(copy_from, path)\n else:\n self._new_untitled(path, type=type, ext=ext)\n else:\n self._new_untitled(path)\n\n @web.authenticated\n @json_errors\n def put(self, path=''):\n \"\"\"Saves the file in the location specified by name and path.\n\n PUT is very similar to POST, but the requester specifies the name,\n whereas with POST, the server picks the name.\n\n PUT /api/contents/path/Name.ipynb\n Save notebook at ``path/Name.ipynb``. Notebook structure is specified\n in `content` key of JSON request body. If content is not specified,\n create a new empty notebook.\n \"\"\"\n model = self.get_json_body()\n if model:\n if model.get('copy_from'):\n raise web.HTTPError(400, \"Cannot copy with PUT, only POST\")\n if self.contents_manager.file_exists(path):\n self._save(model, path)\n else:\n self._upload(model, path)\n else:\n self._new_untitled(path)\n\n @web.authenticated\n @json_errors\n def delete(self, path=''):\n \"\"\"delete a file in the given path\"\"\"\n cm = self.contents_manager\n self.log.warn('delete %s', path)\n cm.delete(path)\n self.set_status(204)\n self.finish()\n\n\nclass CheckpointsHandler(IPythonHandler):\n\n SUPPORTED_METHODS = ('GET', 'POST')\n\n @web.authenticated\n @json_errors\n def get(self, path=''):\n \"\"\"get lists checkpoints for a file\"\"\"\n cm = self.contents_manager\n checkpoints = cm.list_checkpoints(path)\n data = json.dumps(checkpoints, default=date_default)\n self.finish(data)\n\n @web.authenticated\n @json_errors\n def post(self, path=''):\n \"\"\"post creates a new checkpoint\"\"\"\n cm = self.contents_manager\n checkpoint = cm.create_checkpoint(path)\n data = json.dumps(checkpoint, default=date_default)\n location = url_path_join(self.base_url, 'api/contents',\n path, 'checkpoints', checkpoint['id'])\n self.set_header('Location', url_escape(location))\n self.set_status(201)\n self.finish(data)\n\n\nclass ModifyCheckpointsHandler(IPythonHandler):\n\n SUPPORTED_METHODS = ('POST', 'DELETE')\n\n @web.authenticated\n @json_errors\n def post(self, path, checkpoint_id):\n \"\"\"post restores a file from a checkpoint\"\"\"\n cm = self.contents_manager\n cm.restore_checkpoint(checkpoint_id, path)\n self.set_status(204)\n self.finish()\n\n @web.authenticated\n @json_errors\n def delete(self, path, checkpoint_id):\n \"\"\"delete clears a checkpoint for a given file\"\"\"\n cm = self.contents_manager\n cm.delete_checkpoint(checkpoint_id, path)\n self.set_status(204)\n self.finish()\n\n\nclass NotebooksRedirectHandler(IPythonHandler):\n \"\"\"Redirect /api/notebooks to /api/contents\"\"\"\n SUPPORTED_METHODS = ('GET', 'PUT', 'PATCH', 'POST', 'DELETE')\n\n def get(self, path):\n self.log.warn(\"/api/notebooks is deprecated, use /api/contents\")\n self.redirect(url_path_join(\n self.base_url,\n 'api/contents',\n path\n ))\n\n put = patch = post = delete = get\n\n\n#-----------------------------------------------------------------------------\n# URL to handler mappings\n#-----------------------------------------------------------------------------\n\n\n_checkpoint_id_regex = r\"(?P<checkpoint_id>[\\w-]+)\"\n\ndefault_handlers = [\n (r\"/api/contents%s/checkpoints\" % path_regex, CheckpointsHandler),\n (r\"/api/contents%s/checkpoints/%s\" % (path_regex, _checkpoint_id_regex),\n ModifyCheckpointsHandler),\n (r\"/api/contents%s\" % path_regex, ContentsHandler),\n (r\"/api/notebooks/?(.*)\", NotebooksRedirectHandler),\n]\n", "path": "IPython/html/services/contents/handlers.py"}]} | 3,217 | 927 |
gh_patches_debug_21626 | rasdani/github-patches | git_diff | learningequality__kolibri-2033 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
My Channels not listing the new channels after import
## Summary
After importing a channel, whether from local drive or Internet, it does not appear under `My Channels` until page reload.
## System information
- Version: `0.5.0-beta9`
- Operating system: Windows 7 & Ubuntu 16.10
- Browser: Chrome

On [version 0.4.0](https://github.com/learningequality/kolibri/issues/1640#issuecomment-313949731) at least there was a green notification, but now it's missing too:

State is correctly reported after deleting the channel, green notification included.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/tasks/api.py`
Content:
```
1 import logging as logger
2 import os
3
4 import requests
5 from barbequeue.common.classes import State
6 from barbequeue.exceptions import UserCancelledError
7 from django.apps.registry import AppRegistryNotReady
8 from django.core.management import call_command
9 from django.db import connections
10 from django.http import Http404
11 from django.utils.translation import ugettext as _
12 from kolibri.content.models import ChannelMetadataCache
13 from kolibri.content.utils.channels import get_mounted_drives_with_channel_info
14 from kolibri.content.utils.paths import get_content_database_file_path, get_content_database_file_url
15 from rest_framework import serializers, viewsets
16 from rest_framework.decorators import list_route
17 from rest_framework.response import Response
18
19 from .client import get_client
20 from .permissions import IsDeviceOwnerOnly
21
22 try:
23 from django.apps import apps
24
25 apps.check_apps_ready()
26 except AppRegistryNotReady:
27 import django
28
29 django.setup()
30
31
32 logging = logger.getLogger(__name__)
33
34 REMOTE_IMPORT = 'remoteimport'
35 LOCAL_IMPORT = 'localimport'
36 LOCAL_EXPORT = 'localexport'
37
38 id_tasktype = {}
39
40 class TasksViewSet(viewsets.ViewSet):
41 permission_classes = (IsDeviceOwnerOnly,)
42
43 def list(self, request):
44 jobs_response = [_job_to_response(j) for j in get_client().all_jobs()]
45 ids = [job["id"] for job in jobs_response]
46 # Clean up old job tasktypes
47 for key in id_tasktype.keys():
48 if key not in ids:
49 id_tasktype.pop(key)
50 get_client().clear(force=False)
51 return Response(jobs_response)
52
53 def create(self, request):
54 # unimplemented. Call out to the task-specific APIs for now.
55 pass
56
57 def retrieve(self, request, pk=None):
58 task = _job_to_response(get_client().status(pk))
59 return Response(task)
60
61 def destroy(self, request, pk=None):
62 # unimplemented for now.
63 pass
64
65 @list_route(methods=['post'])
66 def startremoteimport(self, request):
67 '''Download a channel's database from the main curation server, and then
68 download its content.
69
70 '''
71
72 if "channel_id" not in request.data:
73 raise serializers.ValidationError(
74 "The 'channel_id' field is required.")
75
76 channel_id = request.data['channel_id']
77
78 # ensure the requested channel_id can be found on the central server, otherwise error
79 status = requests.head(
80 get_content_database_file_url(channel_id)).status_code
81 if status == 404:
82 raise Http404(
83 _("The requested channel does not exist on the content server")
84 )
85
86 task_id = get_client().schedule(
87 _networkimport, channel_id, track_progress=True, cancellable=True)
88
89 id_tasktype[task_id] = REMOTE_IMPORT
90
91 # attempt to get the created Task, otherwise return pending status
92 resp = _job_to_response(get_client().status(task_id))
93
94 return Response(resp)
95
96 @list_route(methods=['post'])
97 def startlocalimport(self, request):
98 """
99 Import a channel from a local drive, and copy content to the local machine.
100 """
101 # Importing django/running setup because Windows...
102
103 if "drive_id" not in request.data:
104 raise serializers.ValidationError(
105 "The 'drive_id' field is required.")
106
107 task_id = get_client().schedule(
108 _localimport, request.data['drive_id'], track_progress=True, cancellable=True)
109
110 id_tasktype[task_id] = LOCAL_IMPORT
111
112 # attempt to get the created Task, otherwise return pending status
113 resp = _job_to_response(get_client().status(task_id))
114
115 return Response(resp)
116
117 @list_route(methods=['post'])
118 def startlocalexport(self, request):
119 '''
120 Export a channel to a local drive, and copy content to the drive.
121
122 '''
123
124 if "drive_id" not in request.data:
125 raise serializers.ValidationError(
126 "The 'drive_id' field is required.")
127
128 task_id = get_client().schedule(
129 _localexport, request.data['drive_id'], track_progress=True, cancellable=True)
130
131 id_tasktype[task_id] = LOCAL_EXPORT
132
133 # attempt to get the created Task, otherwise return pending status
134 resp = _job_to_response(get_client().status(task_id))
135
136 return Response(resp)
137
138 @list_route(methods=['post'])
139 def canceltask(self, request):
140 '''
141 Cancel a task with its task id given in the task_id parameter.
142 '''
143
144 if 'task_id' not in request.data:
145 raise serializers.ValidationError(
146 "The 'task_id' field is required.")
147
148 get_client().cancel(request.data['task_id'])
149 get_client().clear(force=True)
150 return Response({})
151
152 @list_route(methods=['post'])
153 def cleartasks(self, request):
154 '''
155 Cancels all running tasks.
156 '''
157
158 get_client().clear(force=True)
159 return Response({})
160
161 @list_route(methods=['get'])
162 def localdrive(self, request):
163 drives = get_mounted_drives_with_channel_info()
164
165 # make sure everything is a dict, before converting to JSON
166 assert isinstance(drives, dict)
167 out = [mountdata._asdict() for mountdata in drives.values()]
168
169 return Response(out)
170
171
172 def _networkimport(channel_id, update_progress=None, check_for_cancel=None):
173 call_command(
174 "importchannel",
175 "network",
176 channel_id,
177 update_progress=update_progress,
178 check_for_cancel=check_for_cancel)
179 try:
180 call_command(
181 "importcontent",
182 "network",
183 channel_id,
184 update_progress=update_progress,
185 check_for_cancel=check_for_cancel)
186 except UserCancelledError:
187 connections.close_all() # close all DB connections (FIX for #1818)
188 try:
189 os.remove(get_content_database_file_path(channel_id))
190 except OSError:
191 pass
192 ChannelMetadataCache.objects.filter(id=channel_id).delete()
193 raise
194 connections.close_all() # close all DB connections (FIX for #1818)
195
196 def _localimport(drive_id, update_progress=None, check_for_cancel=None):
197 drives = get_mounted_drives_with_channel_info()
198 drive = drives[drive_id]
199 # copy channel's db file then copy all the content files from storage dir
200 try:
201 for channel in drive.metadata["channels"]:
202 call_command(
203 "importchannel",
204 "local",
205 channel["id"],
206 drive.datafolder,
207 update_progress=update_progress,
208 check_for_cancel=check_for_cancel)
209 call_command(
210 "importcontent",
211 "local",
212 channel["id"],
213 drive.datafolder,
214 update_progress=update_progress,
215 check_for_cancel=check_for_cancel)
216 except UserCancelledError:
217 connections.close_all() # close all DB connections (FIX for #1818)
218 for channel in drive.metadata["channels"]:
219 channel_id = channel["id"]
220 try:
221 os.remove(get_content_database_file_path(channel_id))
222 except OSError:
223 pass
224 ChannelMetadataCache.objects.filter(id=channel_id).delete()
225 connections.close_all() # close all DB connections (FIX for #1818)s
226 raise
227 connections.close_all() # close all DB connections (FIX for #1818)
228
229
230 def _localexport(drive_id, update_progress=None, check_for_cancel=None):
231 drives = get_mounted_drives_with_channel_info()
232 drive = drives[drive_id]
233 for channel in ChannelMetadataCache.objects.all():
234 call_command(
235 "exportchannel",
236 channel.id,
237 drive.datafolder,
238 update_progress=update_progress,
239 check_for_cancel=check_for_cancel)
240 try:
241 call_command(
242 "exportcontent",
243 channel.id,
244 drive.datafolder,
245 update_progress=update_progress,
246 check_for_cancel=check_for_cancel)
247 except UserCancelledError:
248 try:
249 os.remove(get_content_database_file_path(channel.id, datafolder=drive.datafolder))
250 except OSError:
251 pass
252 connections.close_all() # close all DB connections (FIX for #1818)
253 raise
254 connections.close_all() # close all DB connections (FIX for #1818)
255
256
257 def _job_to_response(job):
258 if not job:
259 return {
260 "type": None,
261 "status": State.SCHEDULED,
262 "percentage": 0,
263 "progress": [],
264 "id": None,
265 }
266 else:
267 return {
268 "type": id_tasktype.get(job.job_id),
269 "status": job.state,
270 "exception": str(job.exception),
271 "traceback": str(job.traceback),
272 "percentage": job.percentage_progress,
273 "id": job.job_id,
274 }
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/tasks/api.py b/kolibri/tasks/api.py
--- a/kolibri/tasks/api.py
+++ b/kolibri/tasks/api.py
@@ -44,10 +44,10 @@
jobs_response = [_job_to_response(j) for j in get_client().all_jobs()]
ids = [job["id"] for job in jobs_response]
# Clean up old job tasktypes
- for key in id_tasktype.keys():
- if key not in ids:
- id_tasktype.pop(key)
- get_client().clear(force=False)
+ keys_to_pop = [key for key in id_tasktype.keys() if key not in ids]
+ for key in keys_to_pop:
+ id_tasktype.pop(key)
+
return Response(jobs_response)
def create(self, request):
@@ -64,9 +64,9 @@
@list_route(methods=['post'])
def startremoteimport(self, request):
- '''Download a channel's database from the main curation server, and then
+ '''
+ Download a channel's database from the main curation server, and then
download its content.
-
'''
if "channel_id" not in request.data:
| {"golden_diff": "diff --git a/kolibri/tasks/api.py b/kolibri/tasks/api.py\n--- a/kolibri/tasks/api.py\n+++ b/kolibri/tasks/api.py\n@@ -44,10 +44,10 @@\n jobs_response = [_job_to_response(j) for j in get_client().all_jobs()]\n ids = [job[\"id\"] for job in jobs_response]\n # Clean up old job tasktypes\n- for key in id_tasktype.keys():\n- if key not in ids:\n- id_tasktype.pop(key)\n- get_client().clear(force=False)\n+ keys_to_pop = [key for key in id_tasktype.keys() if key not in ids]\n+ for key in keys_to_pop:\n+ id_tasktype.pop(key)\n+\n return Response(jobs_response)\n \n def create(self, request):\n@@ -64,9 +64,9 @@\n \n @list_route(methods=['post'])\n def startremoteimport(self, request):\n- '''Download a channel's database from the main curation server, and then\n+ '''\n+ Download a channel's database from the main curation server, and then\n download its content.\n-\n '''\n \n if \"channel_id\" not in request.data:\n", "issue": "My Channels not listing the new channels after import\n## Summary\r\n\r\nAfter importing a channel, whether from local drive or Internet, it does not appear under `My Channels` until page reload. \r\n\r\n## System information\r\n\r\n - Version: `0.5.0-beta9`\r\n - Operating system: Windows 7 & Ubuntu 16.10\r\n - Browser: Chrome\r\n\r\n\r\n\r\n\r\nOn [version 0.4.0](https://github.com/learningequality/kolibri/issues/1640#issuecomment-313949731) at least there was a green notification, but now it's missing too:\r\n\r\n\r\n\r\nState is correctly reported after deleting the channel, green notification included.\r\n\r\n\n", "before_files": [{"content": "import logging as logger\nimport os\n\nimport requests\nfrom barbequeue.common.classes import State\nfrom barbequeue.exceptions import UserCancelledError\nfrom django.apps.registry import AppRegistryNotReady\nfrom django.core.management import call_command\nfrom django.db import connections\nfrom django.http import Http404\nfrom django.utils.translation import ugettext as _\nfrom kolibri.content.models import ChannelMetadataCache\nfrom kolibri.content.utils.channels import get_mounted_drives_with_channel_info\nfrom kolibri.content.utils.paths import get_content_database_file_path, get_content_database_file_url\nfrom rest_framework import serializers, viewsets\nfrom rest_framework.decorators import list_route\nfrom rest_framework.response import Response\n\nfrom .client import get_client\nfrom .permissions import IsDeviceOwnerOnly\n\ntry:\n from django.apps import apps\n\n apps.check_apps_ready()\nexcept AppRegistryNotReady:\n import django\n\n django.setup()\n\n\nlogging = logger.getLogger(__name__)\n\nREMOTE_IMPORT = 'remoteimport'\nLOCAL_IMPORT = 'localimport'\nLOCAL_EXPORT = 'localexport'\n\nid_tasktype = {}\n\nclass TasksViewSet(viewsets.ViewSet):\n permission_classes = (IsDeviceOwnerOnly,)\n\n def list(self, request):\n jobs_response = [_job_to_response(j) for j in get_client().all_jobs()]\n ids = [job[\"id\"] for job in jobs_response]\n # Clean up old job tasktypes\n for key in id_tasktype.keys():\n if key not in ids:\n id_tasktype.pop(key)\n get_client().clear(force=False)\n return Response(jobs_response)\n\n def create(self, request):\n # unimplemented. Call out to the task-specific APIs for now.\n pass\n\n def retrieve(self, request, pk=None):\n task = _job_to_response(get_client().status(pk))\n return Response(task)\n\n def destroy(self, request, pk=None):\n # unimplemented for now.\n pass\n\n @list_route(methods=['post'])\n def startremoteimport(self, request):\n '''Download a channel's database from the main curation server, and then\n download its content.\n\n '''\n\n if \"channel_id\" not in request.data:\n raise serializers.ValidationError(\n \"The 'channel_id' field is required.\")\n\n channel_id = request.data['channel_id']\n\n # ensure the requested channel_id can be found on the central server, otherwise error\n status = requests.head(\n get_content_database_file_url(channel_id)).status_code\n if status == 404:\n raise Http404(\n _(\"The requested channel does not exist on the content server\")\n )\n\n task_id = get_client().schedule(\n _networkimport, channel_id, track_progress=True, cancellable=True)\n\n id_tasktype[task_id] = REMOTE_IMPORT\n\n # attempt to get the created Task, otherwise return pending status\n resp = _job_to_response(get_client().status(task_id))\n\n return Response(resp)\n\n @list_route(methods=['post'])\n def startlocalimport(self, request):\n \"\"\"\n Import a channel from a local drive, and copy content to the local machine.\n \"\"\"\n # Importing django/running setup because Windows...\n\n if \"drive_id\" not in request.data:\n raise serializers.ValidationError(\n \"The 'drive_id' field is required.\")\n\n task_id = get_client().schedule(\n _localimport, request.data['drive_id'], track_progress=True, cancellable=True)\n\n id_tasktype[task_id] = LOCAL_IMPORT\n\n # attempt to get the created Task, otherwise return pending status\n resp = _job_to_response(get_client().status(task_id))\n\n return Response(resp)\n\n @list_route(methods=['post'])\n def startlocalexport(self, request):\n '''\n Export a channel to a local drive, and copy content to the drive.\n\n '''\n\n if \"drive_id\" not in request.data:\n raise serializers.ValidationError(\n \"The 'drive_id' field is required.\")\n\n task_id = get_client().schedule(\n _localexport, request.data['drive_id'], track_progress=True, cancellable=True)\n\n id_tasktype[task_id] = LOCAL_EXPORT\n\n # attempt to get the created Task, otherwise return pending status\n resp = _job_to_response(get_client().status(task_id))\n\n return Response(resp)\n\n @list_route(methods=['post'])\n def canceltask(self, request):\n '''\n Cancel a task with its task id given in the task_id parameter.\n '''\n\n if 'task_id' not in request.data:\n raise serializers.ValidationError(\n \"The 'task_id' field is required.\")\n\n get_client().cancel(request.data['task_id'])\n get_client().clear(force=True)\n return Response({})\n\n @list_route(methods=['post'])\n def cleartasks(self, request):\n '''\n Cancels all running tasks.\n '''\n\n get_client().clear(force=True)\n return Response({})\n\n @list_route(methods=['get'])\n def localdrive(self, request):\n drives = get_mounted_drives_with_channel_info()\n\n # make sure everything is a dict, before converting to JSON\n assert isinstance(drives, dict)\n out = [mountdata._asdict() for mountdata in drives.values()]\n\n return Response(out)\n\n\ndef _networkimport(channel_id, update_progress=None, check_for_cancel=None):\n call_command(\n \"importchannel\",\n \"network\",\n channel_id,\n update_progress=update_progress,\n check_for_cancel=check_for_cancel)\n try:\n call_command(\n \"importcontent\",\n \"network\",\n channel_id,\n update_progress=update_progress,\n check_for_cancel=check_for_cancel)\n except UserCancelledError:\n connections.close_all() # close all DB connections (FIX for #1818)\n try:\n os.remove(get_content_database_file_path(channel_id))\n except OSError:\n pass\n ChannelMetadataCache.objects.filter(id=channel_id).delete()\n raise\n connections.close_all() # close all DB connections (FIX for #1818)\n\ndef _localimport(drive_id, update_progress=None, check_for_cancel=None):\n drives = get_mounted_drives_with_channel_info()\n drive = drives[drive_id]\n # copy channel's db file then copy all the content files from storage dir\n try:\n for channel in drive.metadata[\"channels\"]:\n call_command(\n \"importchannel\",\n \"local\",\n channel[\"id\"],\n drive.datafolder,\n update_progress=update_progress,\n check_for_cancel=check_for_cancel)\n call_command(\n \"importcontent\",\n \"local\",\n channel[\"id\"],\n drive.datafolder,\n update_progress=update_progress,\n check_for_cancel=check_for_cancel)\n except UserCancelledError:\n connections.close_all() # close all DB connections (FIX for #1818)\n for channel in drive.metadata[\"channels\"]:\n channel_id = channel[\"id\"]\n try:\n os.remove(get_content_database_file_path(channel_id))\n except OSError:\n pass\n ChannelMetadataCache.objects.filter(id=channel_id).delete()\n connections.close_all() # close all DB connections (FIX for #1818)s\n raise\n connections.close_all() # close all DB connections (FIX for #1818)\n\n\ndef _localexport(drive_id, update_progress=None, check_for_cancel=None):\n drives = get_mounted_drives_with_channel_info()\n drive = drives[drive_id]\n for channel in ChannelMetadataCache.objects.all():\n call_command(\n \"exportchannel\",\n channel.id,\n drive.datafolder,\n update_progress=update_progress,\n check_for_cancel=check_for_cancel)\n try:\n call_command(\n \"exportcontent\",\n channel.id,\n drive.datafolder,\n update_progress=update_progress,\n check_for_cancel=check_for_cancel)\n except UserCancelledError:\n try:\n os.remove(get_content_database_file_path(channel.id, datafolder=drive.datafolder))\n except OSError:\n pass\n connections.close_all() # close all DB connections (FIX for #1818)\n raise\n connections.close_all() # close all DB connections (FIX for #1818)\n\n\ndef _job_to_response(job):\n if not job:\n return {\n \"type\": None,\n \"status\": State.SCHEDULED,\n \"percentage\": 0,\n \"progress\": [],\n \"id\": None,\n }\n else:\n return {\n \"type\": id_tasktype.get(job.job_id),\n \"status\": job.state,\n \"exception\": str(job.exception),\n \"traceback\": str(job.traceback),\n \"percentage\": job.percentage_progress,\n \"id\": job.job_id,\n }\n", "path": "kolibri/tasks/api.py"}], "after_files": [{"content": "import logging as logger\nimport os\n\nimport requests\nfrom barbequeue.common.classes import State\nfrom barbequeue.exceptions import UserCancelledError\nfrom django.apps.registry import AppRegistryNotReady\nfrom django.core.management import call_command\nfrom django.db import connections\nfrom django.http import Http404\nfrom django.utils.translation import ugettext as _\nfrom kolibri.content.models import ChannelMetadataCache\nfrom kolibri.content.utils.channels import get_mounted_drives_with_channel_info\nfrom kolibri.content.utils.paths import get_content_database_file_path, get_content_database_file_url\nfrom rest_framework import serializers, viewsets\nfrom rest_framework.decorators import list_route\nfrom rest_framework.response import Response\n\nfrom .client import get_client\nfrom .permissions import IsDeviceOwnerOnly\n\ntry:\n from django.apps import apps\n\n apps.check_apps_ready()\nexcept AppRegistryNotReady:\n import django\n\n django.setup()\n\n\nlogging = logger.getLogger(__name__)\n\nREMOTE_IMPORT = 'remoteimport'\nLOCAL_IMPORT = 'localimport'\nLOCAL_EXPORT = 'localexport'\n\nid_tasktype = {}\n\nclass TasksViewSet(viewsets.ViewSet):\n permission_classes = (IsDeviceOwnerOnly,)\n\n def list(self, request):\n jobs_response = [_job_to_response(j) for j in get_client().all_jobs()]\n ids = [job[\"id\"] for job in jobs_response]\n # Clean up old job tasktypes\n keys_to_pop = [key for key in id_tasktype.keys() if key not in ids]\n for key in keys_to_pop:\n id_tasktype.pop(key)\n\n return Response(jobs_response)\n\n def create(self, request):\n # unimplemented. Call out to the task-specific APIs for now.\n pass\n\n def retrieve(self, request, pk=None):\n task = _job_to_response(get_client().status(pk))\n return Response(task)\n\n def destroy(self, request, pk=None):\n # unimplemented for now.\n pass\n\n @list_route(methods=['post'])\n def startremoteimport(self, request):\n '''\n Download a channel's database from the main curation server, and then\n download its content.\n '''\n\n if \"channel_id\" not in request.data:\n raise serializers.ValidationError(\n \"The 'channel_id' field is required.\")\n\n channel_id = request.data['channel_id']\n\n # ensure the requested channel_id can be found on the central server, otherwise error\n status = requests.head(\n get_content_database_file_url(channel_id)).status_code\n if status == 404:\n raise Http404(\n _(\"The requested channel does not exist on the content server\")\n )\n\n task_id = get_client().schedule(\n _networkimport, channel_id, track_progress=True, cancellable=True)\n\n id_tasktype[task_id] = REMOTE_IMPORT\n\n # attempt to get the created Task, otherwise return pending status\n resp = _job_to_response(get_client().status(task_id))\n\n return Response(resp)\n\n @list_route(methods=['post'])\n def startlocalimport(self, request):\n \"\"\"\n Import a channel from a local drive, and copy content to the local machine.\n \"\"\"\n # Importing django/running setup because Windows...\n\n if \"drive_id\" not in request.data:\n raise serializers.ValidationError(\n \"The 'drive_id' field is required.\")\n\n task_id = get_client().schedule(\n _localimport, request.data['drive_id'], track_progress=True, cancellable=True)\n\n id_tasktype[task_id] = LOCAL_IMPORT\n\n # attempt to get the created Task, otherwise return pending status\n resp = _job_to_response(get_client().status(task_id))\n\n return Response(resp)\n\n @list_route(methods=['post'])\n def startlocalexport(self, request):\n '''\n Export a channel to a local drive, and copy content to the drive.\n\n '''\n\n if \"drive_id\" not in request.data:\n raise serializers.ValidationError(\n \"The 'drive_id' field is required.\")\n\n task_id = get_client().schedule(\n _localexport, request.data['drive_id'], track_progress=True, cancellable=True)\n\n id_tasktype[task_id] = LOCAL_EXPORT\n\n # attempt to get the created Task, otherwise return pending status\n resp = _job_to_response(get_client().status(task_id))\n\n return Response(resp)\n\n @list_route(methods=['post'])\n def canceltask(self, request):\n '''\n Cancel a task with its task id given in the task_id parameter.\n '''\n\n if 'task_id' not in request.data:\n raise serializers.ValidationError(\n \"The 'task_id' field is required.\")\n\n get_client().cancel(request.data['task_id'])\n get_client().clear(force=True)\n return Response({})\n\n @list_route(methods=['post'])\n def cleartasks(self, request):\n '''\n Cancels all running tasks.\n '''\n\n get_client().clear(force=True)\n return Response({})\n\n @list_route(methods=['get'])\n def localdrive(self, request):\n drives = get_mounted_drives_with_channel_info()\n\n # make sure everything is a dict, before converting to JSON\n assert isinstance(drives, dict)\n out = [mountdata._asdict() for mountdata in drives.values()]\n\n return Response(out)\n\n\ndef _networkimport(channel_id, update_progress=None, check_for_cancel=None):\n call_command(\n \"importchannel\",\n \"network\",\n channel_id,\n update_progress=update_progress,\n check_for_cancel=check_for_cancel)\n try:\n call_command(\n \"importcontent\",\n \"network\",\n channel_id,\n update_progress=update_progress,\n check_for_cancel=check_for_cancel)\n except UserCancelledError:\n connections.close_all() # close all DB connections (FIX for #1818)\n try:\n os.remove(get_content_database_file_path(channel_id))\n except OSError:\n pass\n ChannelMetadataCache.objects.filter(id=channel_id).delete()\n raise\n connections.close_all() # close all DB connections (FIX for #1818)\n\ndef _localimport(drive_id, update_progress=None, check_for_cancel=None):\n drives = get_mounted_drives_with_channel_info()\n drive = drives[drive_id]\n # copy channel's db file then copy all the content files from storage dir\n try:\n for channel in drive.metadata[\"channels\"]:\n call_command(\n \"importchannel\",\n \"local\",\n channel[\"id\"],\n drive.datafolder,\n update_progress=update_progress,\n check_for_cancel=check_for_cancel)\n call_command(\n \"importcontent\",\n \"local\",\n channel[\"id\"],\n drive.datafolder,\n update_progress=update_progress,\n check_for_cancel=check_for_cancel)\n except UserCancelledError:\n connections.close_all() # close all DB connections (FIX for #1818)\n for channel in drive.metadata[\"channels\"]:\n channel_id = channel[\"id\"]\n try:\n os.remove(get_content_database_file_path(channel_id))\n except OSError:\n pass\n ChannelMetadataCache.objects.filter(id=channel_id).delete()\n connections.close_all() # close all DB connections (FIX for #1818)s\n raise\n connections.close_all() # close all DB connections (FIX for #1818)\n\n\ndef _localexport(drive_id, update_progress=None, check_for_cancel=None):\n drives = get_mounted_drives_with_channel_info()\n drive = drives[drive_id]\n for channel in ChannelMetadataCache.objects.all():\n call_command(\n \"exportchannel\",\n channel.id,\n drive.datafolder,\n update_progress=update_progress,\n check_for_cancel=check_for_cancel)\n try:\n call_command(\n \"exportcontent\",\n channel.id,\n drive.datafolder,\n update_progress=update_progress,\n check_for_cancel=check_for_cancel)\n except UserCancelledError:\n try:\n os.remove(get_content_database_file_path(channel.id, datafolder=drive.datafolder))\n except OSError:\n pass\n connections.close_all() # close all DB connections (FIX for #1818)\n raise\n connections.close_all() # close all DB connections (FIX for #1818)\n\n\ndef _job_to_response(job):\n if not job:\n return {\n \"type\": None,\n \"status\": State.SCHEDULED,\n \"percentage\": 0,\n \"progress\": [],\n \"id\": None,\n }\n else:\n return {\n \"type\": id_tasktype.get(job.job_id),\n \"status\": job.state,\n \"exception\": str(job.exception),\n \"traceback\": str(job.traceback),\n \"percentage\": job.percentage_progress,\n \"id\": job.job_id,\n }\n", "path": "kolibri/tasks/api.py"}]} | 3,244 | 271 |
gh_patches_debug_35516 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3621 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]: [chat] Unable to run train_prompts.sh by single card
### 🐛 Describe the bug
Missing definition for prompt_sampler and pretrain_sampler in `examples/train_prompts.py` when dist.get_world_size() == 1.
### Environment
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `applications/Chat/examples/train_prompts.py`
Content:
```
1 import argparse
2
3 import pandas as pd
4 import torch
5 import torch.distributed as dist
6 from coati.dataset import DataCollatorForSupervisedDataset, PromptDataset, SupervisedDataset
7 from coati.models.bloom import BLOOMRM, BLOOMActor, BLOOMCritic
8 from coati.models.gpt import GPTRM, GPTActor, GPTCritic
9 from coati.models.llama import LlamaActor, LlamaCritic, LlamaRM
10 from coati.models.opt import OPTRM, OPTActor, OPTCritic
11 from coati.models.roberta import RoBERTaRM, RoBERTaActor, RoBERTaCritic
12 from coati.trainer import PPOTrainer
13 from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy
14 from coati.utils import prepare_llama_tokenizer_and_embedding
15 from torch.optim import Adam
16 from torch.utils.data import DataLoader
17 from torch.utils.data.distributed import DistributedSampler
18 from transformers import AutoTokenizer, BloomTokenizerFast, GPT2Tokenizer, LlamaTokenizer, RobertaTokenizer
19
20 from colossalai.nn.optimizer import HybridAdam
21
22
23 def main(args):
24 # configure strategy
25 if args.strategy == 'naive':
26 strategy = NaiveStrategy()
27 elif args.strategy == 'ddp':
28 strategy = DDPStrategy()
29 elif args.strategy == 'colossalai_gemini':
30 strategy = ColossalAIStrategy(stage=3, placement_policy='cuda', initial_scale=2**5)
31 elif args.strategy == 'colossalai_zero2':
32 strategy = ColossalAIStrategy(stage=2, placement_policy='cuda')
33 else:
34 raise ValueError(f'Unsupported strategy "{args.strategy}"')
35
36 if args.rm_path is not None:
37 state_dict = torch.load(args.rm_path, map_location='cpu')
38
39 # configure model
40 if args.model == 'gpt2':
41 initial_model = GPTActor(pretrained=args.pretrain)
42 elif args.model == 'bloom':
43 initial_model = BLOOMActor(pretrained=args.pretrain)
44 elif args.model == 'opt':
45 initial_model = OPTActor(pretrained=args.pretrain)
46 elif args.model == 'llama':
47 initial_model = LlamaActor(pretrained=args.pretrain)
48 elif args.model == 'roberta':
49 initial_model = RoBERTaActor(pretrained=args.pretrain)
50 else:
51 raise ValueError(f'Unsupported actor model "{args.model}"')
52
53 if args.rm_model == None:
54 rm_model_name = args.model
55 else:
56 rm_model_name = args.rm_model
57
58 if rm_model_name == 'gpt2':
59 reward_model = GPTRM(pretrained=args.rm_pretrain)
60 elif rm_model_name == 'bloom':
61 reward_model = BLOOMRM(pretrained=args.rm_pretrain)
62 elif rm_model_name == 'opt':
63 reward_model = OPTRM(pretrained=args.rm_pretrain)
64 elif rm_model_name == 'llama':
65 reward_model = LlamaRM(pretrained=args.rm_pretrain)
66 elif rm_model_name == 'roberta':
67 reward_model = RoBERTaRM(pretrained=args.rm_pretrain)
68 else:
69 raise ValueError(f'Unsupported reward model "{rm_model_name}"')
70
71 if args.rm_path is not None:
72 reward_model.load_state_dict(state_dict)
73
74 if args.strategy != 'colossalai_gemini':
75 initial_model.to(torch.float16).to(torch.cuda.current_device())
76 reward_model.to(torch.float16).to(torch.cuda.current_device())
77
78 with strategy.model_init_context():
79 if args.model == 'gpt2':
80 actor = GPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
81 elif args.model == 'bloom':
82 actor = BLOOMActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
83 elif args.model == 'opt':
84 actor = OPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
85 elif args.model == 'llama':
86 actor = LlamaActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
87 elif args.model == 'roberta':
88 actor = RoBERTaActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
89 else:
90 raise ValueError(f'Unsupported actor model "{args.model}"')
91
92 if rm_model_name == 'gpt2':
93 critic = GPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
94 elif rm_model_name == 'bloom':
95 critic = BLOOMCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
96 elif rm_model_name == 'opt':
97 critic = OPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
98 elif rm_model_name == 'llama':
99 critic = LlamaCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
100 elif rm_model_name == 'roberta':
101 critic = RoBERTaCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
102 else:
103 raise ValueError(f'Unsupported reward model "{rm_model_name}"')
104
105 if args.rm_path is not None:
106 critic.load_state_dict(state_dict)
107 del state_dict
108
109 if args.strategy != 'colossalai_gemini':
110 critic.to(torch.float16).to(torch.cuda.current_device())
111 actor.to(torch.float16).to(torch.cuda.current_device())
112
113 # configure optimizer
114 if args.strategy.startswith('colossalai'):
115 actor_optim = HybridAdam(actor.parameters(), lr=1e-7)
116 critic_optim = HybridAdam(critic.parameters(), lr=1e-7)
117 else:
118 actor_optim = Adam(actor.parameters(), lr=1e-7)
119 critic_optim = Adam(critic.parameters(), lr=1e-7)
120
121 # configure tokenizer
122 if args.model == 'gpt2':
123 tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
124 elif args.model == 'bloom':
125 tokenizer = BloomTokenizerFast.from_pretrained('bigscience/bloom-560m')
126 elif args.model == 'opt':
127 tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
128 elif args.model == 'llama':
129 tokenizer = LlamaTokenizer.from_pretrained(args.pretrain)
130 tokenizer.eos_token = '<\s>'
131 elif args.model == 'roberta':
132 tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
133 else:
134 raise ValueError(f'Unsupported model "{args.model}"')
135
136 if args.model == 'llama':
137 tokenizer = prepare_llama_tokenizer_and_embedding(tokenizer, actor)
138 else:
139 tokenizer.pad_token = tokenizer.eos_token
140
141 data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
142
143 prompt_dataset = PromptDataset(tokenizer=tokenizer, data_path=args.prompt_path, max_datasets_size=16384)
144 if dist.is_initialized() and dist.get_world_size() > 1:
145 prompt_sampler = DistributedSampler(prompt_dataset, shuffle=True, seed=42, drop_last=True)
146 prompt_dataloader = DataLoader(prompt_dataset,
147 shuffle=(prompt_sampler is None),
148 sampler=prompt_sampler,
149 batch_size=args.train_batch_size)
150
151 pretrain_dataset = SupervisedDataset(tokenizer=tokenizer, data_path=args.pretrain_dataset, max_datasets_size=16384)
152 if dist.is_initialized() and dist.get_world_size() > 1:
153 pretrain_sampler = DistributedSampler(pretrain_dataset, shuffle=True, seed=42, drop_last=True)
154 pretrain_dataloader = DataLoader(pretrain_dataset,
155 shuffle=(pretrain_sampler is None),
156 sampler=pretrain_sampler,
157 batch_size=args.ptx_batch_size,
158 collate_fn=data_collator)
159
160 def tokenize_fn(texts):
161 # MUST padding to max length to ensure inputs of all ranks have the same length
162 # Different length may lead to hang when using gemini, as different generation steps
163 batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True)
164 return {k: v.to(torch.cuda.current_device()) for k, v in batch.items()}
165
166 (actor, actor_optim), (critic, critic_optim) = strategy.prepare((actor, actor_optim), (critic, critic_optim))
167
168 # configure trainer
169 trainer = PPOTrainer(
170 strategy,
171 actor,
172 critic,
173 reward_model,
174 initial_model,
175 actor_optim,
176 critic_optim,
177 kl_coef=args.kl_coef,
178 ptx_coef=args.ptx_coef,
179 max_epochs=args.max_epochs,
180 train_batch_size=args.train_batch_size,
181 experience_batch_size=args.experience_batch_size,
182 tokenizer=tokenize_fn,
183 max_length=128,
184 do_sample=True,
185 temperature=1.0,
186 top_k=50,
187 pad_token_id=tokenizer.pad_token_id,
188 eos_token_id=tokenizer.eos_token_id,
189 )
190
191 trainer.fit(prompt_dataloader=prompt_dataloader,
192 pretrain_dataloader=pretrain_dataloader,
193 num_episodes=args.num_episodes,
194 max_timesteps=args.max_timesteps,
195 update_timesteps=args.update_timesteps)
196
197 # save model checkpoint after fitting
198 trainer.save_model(args.save_path, only_rank0=True, tokenizer=tokenizer)
199 # save optimizer checkpoint on all ranks
200 if args.need_optim_ckpt:
201 strategy.save_optimizer(actor_optim,
202 'actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()),
203 only_rank0=False)
204
205
206 if __name__ == '__main__':
207 parser = argparse.ArgumentParser()
208 parser.add_argument('--prompt_path', type=str, default=None, help='path to the prompt dataset')
209 parser.add_argument('--pretrain_dataset', type=str, default=None, help='path to the pretrained dataset')
210 parser.add_argument('--strategy',
211 choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'],
212 default='naive',
213 help='strategy to use')
214 parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama', 'roberta'])
215 parser.add_argument('--pretrain', type=str, default=None)
216 parser.add_argument('--rm_model', default=None, choices=['gpt2', 'bloom', 'opt', 'llama', 'roberta'])
217 parser.add_argument('--rm_path', type=str, default=None)
218 parser.add_argument('--rm_pretrain', type=str, default=None)
219 parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts')
220 parser.add_argument('--need_optim_ckpt', type=bool, default=False)
221 parser.add_argument('--num_episodes', type=int, default=10)
222 parser.add_argument('--max_timesteps', type=int, default=10)
223 parser.add_argument('--update_timesteps', type=int, default=10)
224 parser.add_argument('--max_epochs', type=int, default=5)
225 parser.add_argument('--train_batch_size', type=int, default=8)
226 parser.add_argument('--ptx_batch_size', type=int, default=1)
227 parser.add_argument('--experience_batch_size', type=int, default=8)
228 parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank")
229 parser.add_argument('--kl_coef', type=float, default=0.1)
230 parser.add_argument('--ptx_coef', type=float, default=0.9)
231 args = parser.parse_args()
232 main(args)
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/applications/Chat/examples/train_prompts.py b/applications/Chat/examples/train_prompts.py
--- a/applications/Chat/examples/train_prompts.py
+++ b/applications/Chat/examples/train_prompts.py
@@ -8,7 +8,7 @@
from coati.models.gpt import GPTRM, GPTActor, GPTCritic
from coati.models.llama import LlamaActor, LlamaCritic, LlamaRM
from coati.models.opt import OPTRM, OPTActor, OPTCritic
-from coati.models.roberta import RoBERTaRM, RoBERTaActor, RoBERTaCritic
+from coati.models.roberta import RoBERTaActor, RoBERTaCritic, RoBERTaRM
from coati.trainer import PPOTrainer
from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy
from coati.utils import prepare_llama_tokenizer_and_embedding
@@ -143,6 +143,8 @@
prompt_dataset = PromptDataset(tokenizer=tokenizer, data_path=args.prompt_path, max_datasets_size=16384)
if dist.is_initialized() and dist.get_world_size() > 1:
prompt_sampler = DistributedSampler(prompt_dataset, shuffle=True, seed=42, drop_last=True)
+ else:
+ prompt_sampler = None
prompt_dataloader = DataLoader(prompt_dataset,
shuffle=(prompt_sampler is None),
sampler=prompt_sampler,
@@ -151,6 +153,8 @@
pretrain_dataset = SupervisedDataset(tokenizer=tokenizer, data_path=args.pretrain_dataset, max_datasets_size=16384)
if dist.is_initialized() and dist.get_world_size() > 1:
pretrain_sampler = DistributedSampler(pretrain_dataset, shuffle=True, seed=42, drop_last=True)
+ else:
+ pretrain_sampler = None
pretrain_dataloader = DataLoader(pretrain_dataset,
shuffle=(pretrain_sampler is None),
sampler=pretrain_sampler,
| {"golden_diff": "diff --git a/applications/Chat/examples/train_prompts.py b/applications/Chat/examples/train_prompts.py\n--- a/applications/Chat/examples/train_prompts.py\n+++ b/applications/Chat/examples/train_prompts.py\n@@ -8,7 +8,7 @@\n from coati.models.gpt import GPTRM, GPTActor, GPTCritic\n from coati.models.llama import LlamaActor, LlamaCritic, LlamaRM\n from coati.models.opt import OPTRM, OPTActor, OPTCritic\n-from coati.models.roberta import RoBERTaRM, RoBERTaActor, RoBERTaCritic\n+from coati.models.roberta import RoBERTaActor, RoBERTaCritic, RoBERTaRM\n from coati.trainer import PPOTrainer\n from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy\n from coati.utils import prepare_llama_tokenizer_and_embedding\n@@ -143,6 +143,8 @@\n prompt_dataset = PromptDataset(tokenizer=tokenizer, data_path=args.prompt_path, max_datasets_size=16384)\n if dist.is_initialized() and dist.get_world_size() > 1:\n prompt_sampler = DistributedSampler(prompt_dataset, shuffle=True, seed=42, drop_last=True)\n+ else:\n+ prompt_sampler = None\n prompt_dataloader = DataLoader(prompt_dataset,\n shuffle=(prompt_sampler is None),\n sampler=prompt_sampler,\n@@ -151,6 +153,8 @@\n pretrain_dataset = SupervisedDataset(tokenizer=tokenizer, data_path=args.pretrain_dataset, max_datasets_size=16384)\n if dist.is_initialized() and dist.get_world_size() > 1:\n pretrain_sampler = DistributedSampler(pretrain_dataset, shuffle=True, seed=42, drop_last=True)\n+ else:\n+ pretrain_sampler = None\n pretrain_dataloader = DataLoader(pretrain_dataset,\n shuffle=(pretrain_sampler is None),\n sampler=pretrain_sampler,\n", "issue": "[BUG]: [chat] Unable to run train_prompts.sh by single card\n### \ud83d\udc1b Describe the bug\n\nMissing definition for prompt_sampler and pretrain_sampler in `examples/train_prompts.py` when dist.get_world_size() == 1.\n\n### Environment\n\n_No response_\n", "before_files": [{"content": "import argparse\n\nimport pandas as pd\nimport torch\nimport torch.distributed as dist\nfrom coati.dataset import DataCollatorForSupervisedDataset, PromptDataset, SupervisedDataset\nfrom coati.models.bloom import BLOOMRM, BLOOMActor, BLOOMCritic\nfrom coati.models.gpt import GPTRM, GPTActor, GPTCritic\nfrom coati.models.llama import LlamaActor, LlamaCritic, LlamaRM\nfrom coati.models.opt import OPTRM, OPTActor, OPTCritic\nfrom coati.models.roberta import RoBERTaRM, RoBERTaActor, RoBERTaCritic\nfrom coati.trainer import PPOTrainer\nfrom coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy\nfrom coati.utils import prepare_llama_tokenizer_and_embedding\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nfrom transformers import AutoTokenizer, BloomTokenizerFast, GPT2Tokenizer, LlamaTokenizer, RobertaTokenizer\n\nfrom colossalai.nn.optimizer import HybridAdam\n\n\ndef main(args):\n # configure strategy\n if args.strategy == 'naive':\n strategy = NaiveStrategy()\n elif args.strategy == 'ddp':\n strategy = DDPStrategy()\n elif args.strategy == 'colossalai_gemini':\n strategy = ColossalAIStrategy(stage=3, placement_policy='cuda', initial_scale=2**5)\n elif args.strategy == 'colossalai_zero2':\n strategy = ColossalAIStrategy(stage=2, placement_policy='cuda')\n else:\n raise ValueError(f'Unsupported strategy \"{args.strategy}\"')\n\n if args.rm_path is not None:\n state_dict = torch.load(args.rm_path, map_location='cpu')\n\n # configure model\n if args.model == 'gpt2':\n initial_model = GPTActor(pretrained=args.pretrain)\n elif args.model == 'bloom':\n initial_model = BLOOMActor(pretrained=args.pretrain)\n elif args.model == 'opt':\n initial_model = OPTActor(pretrained=args.pretrain)\n elif args.model == 'llama':\n initial_model = LlamaActor(pretrained=args.pretrain)\n elif args.model == 'roberta':\n initial_model = RoBERTaActor(pretrained=args.pretrain)\n else:\n raise ValueError(f'Unsupported actor model \"{args.model}\"')\n\n if args.rm_model == None:\n rm_model_name = args.model\n else:\n rm_model_name = args.rm_model\n\n if rm_model_name == 'gpt2':\n reward_model = GPTRM(pretrained=args.rm_pretrain)\n elif rm_model_name == 'bloom':\n reward_model = BLOOMRM(pretrained=args.rm_pretrain)\n elif rm_model_name == 'opt':\n reward_model = OPTRM(pretrained=args.rm_pretrain)\n elif rm_model_name == 'llama':\n reward_model = LlamaRM(pretrained=args.rm_pretrain)\n elif rm_model_name == 'roberta':\n reward_model = RoBERTaRM(pretrained=args.rm_pretrain)\n else:\n raise ValueError(f'Unsupported reward model \"{rm_model_name}\"')\n\n if args.rm_path is not None:\n reward_model.load_state_dict(state_dict)\n\n if args.strategy != 'colossalai_gemini':\n initial_model.to(torch.float16).to(torch.cuda.current_device())\n reward_model.to(torch.float16).to(torch.cuda.current_device())\n\n with strategy.model_init_context():\n if args.model == 'gpt2':\n actor = GPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n elif args.model == 'bloom':\n actor = BLOOMActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n elif args.model == 'opt':\n actor = OPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n elif args.model == 'llama':\n actor = LlamaActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n elif args.model == 'roberta':\n actor = RoBERTaActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n else:\n raise ValueError(f'Unsupported actor model \"{args.model}\"')\n\n if rm_model_name == 'gpt2':\n critic = GPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n elif rm_model_name == 'bloom':\n critic = BLOOMCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n elif rm_model_name == 'opt':\n critic = OPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n elif rm_model_name == 'llama':\n critic = LlamaCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n elif rm_model_name == 'roberta':\n critic = RoBERTaCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n else:\n raise ValueError(f'Unsupported reward model \"{rm_model_name}\"')\n\n if args.rm_path is not None:\n critic.load_state_dict(state_dict)\n del state_dict\n\n if args.strategy != 'colossalai_gemini':\n critic.to(torch.float16).to(torch.cuda.current_device())\n actor.to(torch.float16).to(torch.cuda.current_device())\n\n # configure optimizer\n if args.strategy.startswith('colossalai'):\n actor_optim = HybridAdam(actor.parameters(), lr=1e-7)\n critic_optim = HybridAdam(critic.parameters(), lr=1e-7)\n else:\n actor_optim = Adam(actor.parameters(), lr=1e-7)\n critic_optim = Adam(critic.parameters(), lr=1e-7)\n\n # configure tokenizer\n if args.model == 'gpt2':\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n elif args.model == 'bloom':\n tokenizer = BloomTokenizerFast.from_pretrained('bigscience/bloom-560m')\n elif args.model == 'opt':\n tokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-350m\")\n elif args.model == 'llama':\n tokenizer = LlamaTokenizer.from_pretrained(args.pretrain)\n tokenizer.eos_token = '<\\s>'\n elif args.model == 'roberta':\n tokenizer = RobertaTokenizer.from_pretrained(\"roberta-base\")\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n\n if args.model == 'llama':\n tokenizer = prepare_llama_tokenizer_and_embedding(tokenizer, actor)\n else:\n tokenizer.pad_token = tokenizer.eos_token\n\n data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)\n\n prompt_dataset = PromptDataset(tokenizer=tokenizer, data_path=args.prompt_path, max_datasets_size=16384)\n if dist.is_initialized() and dist.get_world_size() > 1:\n prompt_sampler = DistributedSampler(prompt_dataset, shuffle=True, seed=42, drop_last=True)\n prompt_dataloader = DataLoader(prompt_dataset,\n shuffle=(prompt_sampler is None),\n sampler=prompt_sampler,\n batch_size=args.train_batch_size)\n\n pretrain_dataset = SupervisedDataset(tokenizer=tokenizer, data_path=args.pretrain_dataset, max_datasets_size=16384)\n if dist.is_initialized() and dist.get_world_size() > 1:\n pretrain_sampler = DistributedSampler(pretrain_dataset, shuffle=True, seed=42, drop_last=True)\n pretrain_dataloader = DataLoader(pretrain_dataset,\n shuffle=(pretrain_sampler is None),\n sampler=pretrain_sampler,\n batch_size=args.ptx_batch_size,\n collate_fn=data_collator)\n\n def tokenize_fn(texts):\n # MUST padding to max length to ensure inputs of all ranks have the same length\n # Different length may lead to hang when using gemini, as different generation steps\n batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True)\n return {k: v.to(torch.cuda.current_device()) for k, v in batch.items()}\n\n (actor, actor_optim), (critic, critic_optim) = strategy.prepare((actor, actor_optim), (critic, critic_optim))\n\n # configure trainer\n trainer = PPOTrainer(\n strategy,\n actor,\n critic,\n reward_model,\n initial_model,\n actor_optim,\n critic_optim,\n kl_coef=args.kl_coef,\n ptx_coef=args.ptx_coef,\n max_epochs=args.max_epochs,\n train_batch_size=args.train_batch_size,\n experience_batch_size=args.experience_batch_size,\n tokenizer=tokenize_fn,\n max_length=128,\n do_sample=True,\n temperature=1.0,\n top_k=50,\n pad_token_id=tokenizer.pad_token_id,\n eos_token_id=tokenizer.eos_token_id,\n )\n\n trainer.fit(prompt_dataloader=prompt_dataloader,\n pretrain_dataloader=pretrain_dataloader,\n num_episodes=args.num_episodes,\n max_timesteps=args.max_timesteps,\n update_timesteps=args.update_timesteps)\n\n # save model checkpoint after fitting\n trainer.save_model(args.save_path, only_rank0=True, tokenizer=tokenizer)\n # save optimizer checkpoint on all ranks\n if args.need_optim_ckpt:\n strategy.save_optimizer(actor_optim,\n 'actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()),\n only_rank0=False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--prompt_path', type=str, default=None, help='path to the prompt dataset')\n parser.add_argument('--pretrain_dataset', type=str, default=None, help='path to the pretrained dataset')\n parser.add_argument('--strategy',\n choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'],\n default='naive',\n help='strategy to use')\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama', 'roberta'])\n parser.add_argument('--pretrain', type=str, default=None)\n parser.add_argument('--rm_model', default=None, choices=['gpt2', 'bloom', 'opt', 'llama', 'roberta'])\n parser.add_argument('--rm_path', type=str, default=None)\n parser.add_argument('--rm_pretrain', type=str, default=None)\n parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts')\n parser.add_argument('--need_optim_ckpt', type=bool, default=False)\n parser.add_argument('--num_episodes', type=int, default=10)\n parser.add_argument('--max_timesteps', type=int, default=10)\n parser.add_argument('--update_timesteps', type=int, default=10)\n parser.add_argument('--max_epochs', type=int, default=5)\n parser.add_argument('--train_batch_size', type=int, default=8)\n parser.add_argument('--ptx_batch_size', type=int, default=1)\n parser.add_argument('--experience_batch_size', type=int, default=8)\n parser.add_argument('--lora_rank', type=int, default=0, help=\"low-rank adaptation matrices rank\")\n parser.add_argument('--kl_coef', type=float, default=0.1)\n parser.add_argument('--ptx_coef', type=float, default=0.9)\n args = parser.parse_args()\n main(args)\n", "path": "applications/Chat/examples/train_prompts.py"}], "after_files": [{"content": "import argparse\n\nimport pandas as pd\nimport torch\nimport torch.distributed as dist\nfrom coati.dataset import DataCollatorForSupervisedDataset, PromptDataset, SupervisedDataset\nfrom coati.models.bloom import BLOOMRM, BLOOMActor, BLOOMCritic\nfrom coati.models.gpt import GPTRM, GPTActor, GPTCritic\nfrom coati.models.llama import LlamaActor, LlamaCritic, LlamaRM\nfrom coati.models.opt import OPTRM, OPTActor, OPTCritic\nfrom coati.models.roberta import RoBERTaActor, RoBERTaCritic, RoBERTaRM\nfrom coati.trainer import PPOTrainer\nfrom coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy\nfrom coati.utils import prepare_llama_tokenizer_and_embedding\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nfrom transformers import AutoTokenizer, BloomTokenizerFast, GPT2Tokenizer, LlamaTokenizer, RobertaTokenizer\n\nfrom colossalai.nn.optimizer import HybridAdam\n\n\ndef main(args):\n # configure strategy\n if args.strategy == 'naive':\n strategy = NaiveStrategy()\n elif args.strategy == 'ddp':\n strategy = DDPStrategy()\n elif args.strategy == 'colossalai_gemini':\n strategy = ColossalAIStrategy(stage=3, placement_policy='cuda', initial_scale=2**5)\n elif args.strategy == 'colossalai_zero2':\n strategy = ColossalAIStrategy(stage=2, placement_policy='cuda')\n else:\n raise ValueError(f'Unsupported strategy \"{args.strategy}\"')\n\n if args.rm_path is not None:\n state_dict = torch.load(args.rm_path, map_location='cpu')\n\n # configure model\n if args.model == 'gpt2':\n initial_model = GPTActor(pretrained=args.pretrain)\n elif args.model == 'bloom':\n initial_model = BLOOMActor(pretrained=args.pretrain)\n elif args.model == 'opt':\n initial_model = OPTActor(pretrained=args.pretrain)\n elif args.model == 'llama':\n initial_model = LlamaActor(pretrained=args.pretrain)\n elif args.model == 'roberta':\n initial_model = RoBERTaActor(pretrained=args.pretrain)\n else:\n raise ValueError(f'Unsupported actor model \"{args.model}\"')\n\n if args.rm_model == None:\n rm_model_name = args.model\n else:\n rm_model_name = args.rm_model\n\n if rm_model_name == 'gpt2':\n reward_model = GPTRM(pretrained=args.rm_pretrain)\n elif rm_model_name == 'bloom':\n reward_model = BLOOMRM(pretrained=args.rm_pretrain)\n elif rm_model_name == 'opt':\n reward_model = OPTRM(pretrained=args.rm_pretrain)\n elif rm_model_name == 'llama':\n reward_model = LlamaRM(pretrained=args.rm_pretrain)\n elif rm_model_name == 'roberta':\n reward_model = RoBERTaRM(pretrained=args.rm_pretrain)\n else:\n raise ValueError(f'Unsupported reward model \"{rm_model_name}\"')\n\n if args.rm_path is not None:\n reward_model.load_state_dict(state_dict)\n\n if args.strategy != 'colossalai_gemini':\n initial_model.to(torch.float16).to(torch.cuda.current_device())\n reward_model.to(torch.float16).to(torch.cuda.current_device())\n\n with strategy.model_init_context():\n if args.model == 'gpt2':\n actor = GPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n elif args.model == 'bloom':\n actor = BLOOMActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n elif args.model == 'opt':\n actor = OPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n elif args.model == 'llama':\n actor = LlamaActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n elif args.model == 'roberta':\n actor = RoBERTaActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n else:\n raise ValueError(f'Unsupported actor model \"{args.model}\"')\n\n if rm_model_name == 'gpt2':\n critic = GPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n elif rm_model_name == 'bloom':\n critic = BLOOMCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n elif rm_model_name == 'opt':\n critic = OPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n elif rm_model_name == 'llama':\n critic = LlamaCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n elif rm_model_name == 'roberta':\n critic = RoBERTaCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n else:\n raise ValueError(f'Unsupported reward model \"{rm_model_name}\"')\n\n if args.rm_path is not None:\n critic.load_state_dict(state_dict)\n del state_dict\n\n if args.strategy != 'colossalai_gemini':\n critic.to(torch.float16).to(torch.cuda.current_device())\n actor.to(torch.float16).to(torch.cuda.current_device())\n\n # configure optimizer\n if args.strategy.startswith('colossalai'):\n actor_optim = HybridAdam(actor.parameters(), lr=1e-7)\n critic_optim = HybridAdam(critic.parameters(), lr=1e-7)\n else:\n actor_optim = Adam(actor.parameters(), lr=1e-7)\n critic_optim = Adam(critic.parameters(), lr=1e-7)\n\n # configure tokenizer\n if args.model == 'gpt2':\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n elif args.model == 'bloom':\n tokenizer = BloomTokenizerFast.from_pretrained('bigscience/bloom-560m')\n elif args.model == 'opt':\n tokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-350m\")\n elif args.model == 'llama':\n tokenizer = LlamaTokenizer.from_pretrained(args.pretrain)\n tokenizer.eos_token = '<\\s>'\n elif args.model == 'roberta':\n tokenizer = RobertaTokenizer.from_pretrained(\"roberta-base\")\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n\n if args.model == 'llama':\n tokenizer = prepare_llama_tokenizer_and_embedding(tokenizer, actor)\n else:\n tokenizer.pad_token = tokenizer.eos_token\n\n data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)\n\n prompt_dataset = PromptDataset(tokenizer=tokenizer, data_path=args.prompt_path, max_datasets_size=16384)\n if dist.is_initialized() and dist.get_world_size() > 1:\n prompt_sampler = DistributedSampler(prompt_dataset, shuffle=True, seed=42, drop_last=True)\n else:\n prompt_sampler = None\n prompt_dataloader = DataLoader(prompt_dataset,\n shuffle=(prompt_sampler is None),\n sampler=prompt_sampler,\n batch_size=args.train_batch_size)\n\n pretrain_dataset = SupervisedDataset(tokenizer=tokenizer, data_path=args.pretrain_dataset, max_datasets_size=16384)\n if dist.is_initialized() and dist.get_world_size() > 1:\n pretrain_sampler = DistributedSampler(pretrain_dataset, shuffle=True, seed=42, drop_last=True)\n else:\n pretrain_sampler = None\n pretrain_dataloader = DataLoader(pretrain_dataset,\n shuffle=(pretrain_sampler is None),\n sampler=pretrain_sampler,\n batch_size=args.ptx_batch_size,\n collate_fn=data_collator)\n\n def tokenize_fn(texts):\n # MUST padding to max length to ensure inputs of all ranks have the same length\n # Different length may lead to hang when using gemini, as different generation steps\n batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True)\n return {k: v.to(torch.cuda.current_device()) for k, v in batch.items()}\n\n (actor, actor_optim), (critic, critic_optim) = strategy.prepare((actor, actor_optim), (critic, critic_optim))\n\n # configure trainer\n trainer = PPOTrainer(\n strategy,\n actor,\n critic,\n reward_model,\n initial_model,\n actor_optim,\n critic_optim,\n kl_coef=args.kl_coef,\n ptx_coef=args.ptx_coef,\n max_epochs=args.max_epochs,\n train_batch_size=args.train_batch_size,\n experience_batch_size=args.experience_batch_size,\n tokenizer=tokenize_fn,\n max_length=128,\n do_sample=True,\n temperature=1.0,\n top_k=50,\n pad_token_id=tokenizer.pad_token_id,\n eos_token_id=tokenizer.eos_token_id,\n )\n\n trainer.fit(prompt_dataloader=prompt_dataloader,\n pretrain_dataloader=pretrain_dataloader,\n num_episodes=args.num_episodes,\n max_timesteps=args.max_timesteps,\n update_timesteps=args.update_timesteps)\n\n # save model checkpoint after fitting\n trainer.save_model(args.save_path, only_rank0=True, tokenizer=tokenizer)\n # save optimizer checkpoint on all ranks\n if args.need_optim_ckpt:\n strategy.save_optimizer(actor_optim,\n 'actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()),\n only_rank0=False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--prompt_path', type=str, default=None, help='path to the prompt dataset')\n parser.add_argument('--pretrain_dataset', type=str, default=None, help='path to the pretrained dataset')\n parser.add_argument('--strategy',\n choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'],\n default='naive',\n help='strategy to use')\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama', 'roberta'])\n parser.add_argument('--pretrain', type=str, default=None)\n parser.add_argument('--rm_model', default=None, choices=['gpt2', 'bloom', 'opt', 'llama', 'roberta'])\n parser.add_argument('--rm_path', type=str, default=None)\n parser.add_argument('--rm_pretrain', type=str, default=None)\n parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts')\n parser.add_argument('--need_optim_ckpt', type=bool, default=False)\n parser.add_argument('--num_episodes', type=int, default=10)\n parser.add_argument('--max_timesteps', type=int, default=10)\n parser.add_argument('--update_timesteps', type=int, default=10)\n parser.add_argument('--max_epochs', type=int, default=5)\n parser.add_argument('--train_batch_size', type=int, default=8)\n parser.add_argument('--ptx_batch_size', type=int, default=1)\n parser.add_argument('--experience_batch_size', type=int, default=8)\n parser.add_argument('--lora_rank', type=int, default=0, help=\"low-rank adaptation matrices rank\")\n parser.add_argument('--kl_coef', type=float, default=0.1)\n parser.add_argument('--ptx_coef', type=float, default=0.9)\n args = parser.parse_args()\n main(args)\n", "path": "applications/Chat/examples/train_prompts.py"}]} | 3,452 | 445 |
gh_patches_debug_24583 | rasdani/github-patches | git_diff | fal-ai__dbt-fal-490 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stop trying to run sources in dbt runner
<!-- *** Make sure you have searched for an existing bug report for this issue *** -->
**Describe the bug**
fal takes sources as valid nodes to run in dbt and tries to run them.
It mostly ends up in a no-op operation, but if the source shares name with a model it can run the wrong model.
**Expected behavior**
fal should not try to run sources in dbt.
**Screenshots**

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/fal/cli/selectors.py`
Content:
```
1 import itertools
2 import re
3 from dataclasses import dataclass
4 from typing import List, Optional, Union, Iterator
5 from fal.node_graph import NodeGraph
6 from faldbt.project import CompileArgs, FalDbt
7 from dbt.task.compile import CompileTask
8 from enum import Enum
9 from functools import reduce
10 import networkx as nx
11 from dbt.logger import GLOBAL_LOGGER as logger
12
13
14 class ExecutionPlan:
15 """
16 Represents a fal flow excution
17 """
18
19 before_scripts: List[str]
20 dbt_models: List[str]
21 after_scripts: List[str]
22 project_name: str
23
24 def __init__(self, unique_ids: List[str], project_name):
25 self.before_scripts = []
26 self.dbt_models = []
27 self.after_scripts = []
28 self.project_name = project_name
29 for id in unique_ids:
30 if _is_before_script(id):
31 self.before_scripts.append(id)
32 elif _is_after_script(id):
33 self.after_scripts.append(id)
34 else:
35 self.dbt_models.append(id)
36
37 @property
38 def nodes(self) -> List[str]:
39 return self.before_scripts + self.after_scripts + self.dbt_models
40
41 @classmethod
42 def create_plan_from_graph(cls, parsed, nodeGraph: NodeGraph, fal_dbt: FalDbt):
43 """
44 Creates and ExecutionPlan from the cli arguments
45 """
46 unique_ids = list(nodeGraph.graph.nodes.keys())
47 ids_to_execute = []
48
49 ids_to_exclude = []
50
51 if parsed.select:
52 ids_to_execute = _filter_node_ids(
53 unique_ids, fal_dbt, list(parsed.select), nodeGraph
54 )
55
56 else:
57 ids_to_execute.extend(unique_ids)
58
59 if "exclude" in parsed and parsed.exclude:
60 ids_to_exclude = _filter_node_ids(
61 unique_ids, fal_dbt, list(parsed.exclude), nodeGraph
62 )
63
64 ids_to_execute = [i for i in ids_to_execute if i not in ids_to_exclude]
65 return cls(list(set(ids_to_execute)), fal_dbt.project_name)
66
67
68 @dataclass
69 class SelectionUnion:
70 components: List[str]
71
72
73 @dataclass
74 class SelectionIntersection:
75 components: List[str]
76
77
78 def parse_union(
79 components: List[str],
80 ) -> SelectionUnion:
81 # Based on the original implemention at dbt-core.
82
83 # turn ['a b', 'c'] -> ['a', 'b', 'c']
84 raw_specs = itertools.chain.from_iterable(r.split(OP_SET_UNION) for r in components)
85 union_components = []
86
87 # ['a', 'b', 'c,d'] -> union('a', 'b', intersection('c', 'd'))
88 for raw_spec in raw_specs:
89 union_components.append(
90 SelectionIntersection(
91 raw_spec.split(OP_SET_INTERSECTION),
92 )
93 )
94 return SelectionUnion(
95 union_components,
96 )
97
98
99 def _filter_node_ids(
100 unique_ids: List[str],
101 fal_dbt: FalDbt,
102 selectors: List[str],
103 nodeGraph: NodeGraph,
104 ) -> List[str]:
105 """Filter list of unique_ids according to a selector."""
106 output = set()
107
108 union = parse_union(selectors)
109 for intersection in union.components:
110 try:
111 plan_outputs = [
112 set(SelectorPlan(selector, unique_ids, fal_dbt).execute(nodeGraph))
113 for selector in intersection.components
114 if selector
115 ]
116 except nx.NetworkXError:
117 # When the user selects a non-existent node, don't fail immediately
118 # but rather just continue processing the rest of selectors.
119 plan_outputs = []
120
121 if plan_outputs:
122 output |= set.intersection(*plan_outputs)
123
124 return list(output)
125
126
127 def _get_children_with_parents(node_id: str, nodeGraph: NodeGraph) -> List[str]:
128 children = nodeGraph.get_descendants(node_id)
129 output = reduce(lambda l, ch: l + nodeGraph.get_ancestors(ch), children, children)
130
131 output = list(set(output))
132
133 return output
134
135
136 def _expand_script(script_name: str, unique_ids: List[str]) -> List[str]:
137 """
138 Expands the selected script name to unique id format.
139 for example [scripta.py] to [script.modelB.AFTER.scripta.py, script.modelA.BEFORE.scripta.py]
140 """
141
142 def contains_script_name(id: str):
143 return script_name in id
144
145 return list(filter(contains_script_name, unique_ids))
146
147
148 class SelectType(Enum):
149 MODEL = 1
150 SCRIPT = 2
151 COMPLEX = 3
152
153
154 @dataclass(init=False)
155 class SelectorPlan:
156 """
157 Represents a single selector, for example in the command
158
159 fal flow run --select script.py+
160
161 script.py+ is the SelectorPlan with needs_children attribute set to true
162 """
163
164 unique_ids: List[str]
165 children: bool
166 children_levels: Optional[int]
167 children_with_parents: bool
168 parents: bool
169 parents_levels: Optional[int]
170 type: SelectType
171 raw: str
172
173 def __init__(self, selector: str, unique_ids: List[str], fal_dbt: FalDbt):
174 self.raw = selector
175 self.children_with_parents = OP_CHILDREN_WITH_PARENTS.match(selector)
176 selector = OP_CHILDREN_WITH_PARENTS.rest(selector)
177
178 self.parents = OP_PARENTS.match(selector)
179 self.parents_levels = OP_PARENTS.depth(selector)
180 selector = OP_PARENTS.rest(selector)
181
182 self.children = OP_CHILDREN.match(selector)
183 self.children_levels = OP_CHILDREN.depth(selector)
184 selector = OP_CHILDREN.rest(selector)
185
186 self.type = _to_select_type(selector)
187
188 if self.type == SelectType.MODEL:
189 self.unique_ids = [f"model.{fal_dbt.project_name}.{selector}"]
190 elif self.type == SelectType.SCRIPT:
191 self.unique_ids = _expand_script(selector, unique_ids)
192 elif self.type == SelectType.COMPLEX:
193 self.unique_ids = unique_ids_from_complex_selector(selector, fal_dbt)
194
195 def __post_init__(self):
196 if self.children and self.children_with_parents:
197 raise RuntimeError(
198 f'Invalid node spec {self.raw} - "@" prefix and "+" suffix are incompatible'
199 )
200
201 def execute(self, nodeGraph: NodeGraph) -> Iterator[str]:
202 for id in self.unique_ids:
203 yield id
204
205 if self.children:
206 if self.children_levels is None:
207 children = nodeGraph.get_descendants(id)
208 else:
209 children = nodeGraph.get_successors(id, self.children_levels)
210 yield from children
211
212 if self.parents:
213 if self.parents_levels is None:
214 parents = nodeGraph.get_ancestors(id)
215 else:
216 parents = nodeGraph.get_predecessors(id, self.parents_levels)
217 yield from parents
218
219 if self.children_with_parents:
220 ids = _get_children_with_parents(id, nodeGraph)
221 yield from ids
222
223
224 def unique_ids_from_complex_selector(select, fal_dbt: FalDbt) -> List[str]:
225 args = CompileArgs(None, [select], [select], tuple(), fal_dbt._state, None)
226 compile_task = CompileTask(args, fal_dbt._config)
227 compile_task._runtime_initialize()
228 spec = compile_task.get_selection_spec()
229 graph = compile_task.get_node_selector().get_graph_queue(spec)
230 return list(graph.queued)
231
232
233 def _to_select_type(selector: str) -> SelectType:
234 if ":" in selector:
235 return SelectType.COMPLEX
236 else:
237 if _is_script_node(selector):
238 return SelectType.SCRIPT
239 else:
240 return SelectType.MODEL
241
242
243 def _is_script_node(node_name: str) -> bool:
244 return node_name.endswith(".py") or node_name.endswith(".ipynb")
245
246
247 class SelectorGraphOp:
248 _regex: re.Pattern
249
250 def __init__(self, regex: re.Pattern):
251 self._regex = regex
252 assert (
253 "rest" in regex.groupindex
254 ), 'rest must be in regex. Use `re.compile("something(?P<rest>.*)")`'
255
256 def _select(self, selector: str, group: Union[str, int]) -> Optional[str]:
257 match = self._regex.match(selector)
258 if match:
259 return match.group(group)
260
261 def match(self, selector: str) -> bool:
262 return self._select(selector, 0) is not None
263
264 def rest(self, selector: str) -> str:
265 rest = self._select(selector, "rest")
266 if rest is not None:
267 return rest
268 return selector
269
270
271 class SelectorGraphOpDepth(SelectorGraphOp):
272 def depth(self, selector: str) -> Optional[int]:
273 depth = self._select(selector, "depth")
274 if depth:
275 return int(depth)
276
277
278 # Graph operators from their regex Patterns
279 OP_CHILDREN_WITH_PARENTS = SelectorGraphOp(re.compile("^\\@(?P<rest>.*)"))
280 OP_PARENTS = SelectorGraphOpDepth(re.compile("^(?P<depth>\\d*)\\+(?P<rest>.*)"))
281 OP_CHILDREN = SelectorGraphOpDepth(re.compile("(?P<rest>.*)\\+(?P<depth>\\d*)$"))
282
283 # Logic based set operators
284 OP_SET_UNION = " "
285 OP_SET_INTERSECTION = ","
286
287 IS_BEFORE_SCRIPT_REGEX = re.compile("^script\\..+\\.BEFORE\\..+\\.(py|ipynb)$")
288 IS_AFTER_SCRIPT_REGEX = re.compile("^script\\..+\\.AFTER\\..+\\.(py|ipynb)")
289
290
291 def _is_before_script(id: str) -> bool:
292 return bool(IS_BEFORE_SCRIPT_REGEX.match(id))
293
294
295 def _is_after_script(id: str) -> bool:
296 return bool(IS_AFTER_SCRIPT_REGEX.match(id))
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/fal/cli/selectors.py b/src/fal/cli/selectors.py
--- a/src/fal/cli/selectors.py
+++ b/src/fal/cli/selectors.py
@@ -44,24 +44,23 @@
Creates and ExecutionPlan from the cli arguments
"""
unique_ids = list(nodeGraph.graph.nodes.keys())
- ids_to_execute = []
-
- ids_to_exclude = []
+ ids_to_execute = unique_ids
if parsed.select:
ids_to_execute = _filter_node_ids(
unique_ids, fal_dbt, list(parsed.select), nodeGraph
)
- else:
- ids_to_execute.extend(unique_ids)
-
+ ids_to_exclude = []
if "exclude" in parsed and parsed.exclude:
ids_to_exclude = _filter_node_ids(
unique_ids, fal_dbt, list(parsed.exclude), nodeGraph
)
ids_to_execute = [i for i in ids_to_execute if i not in ids_to_exclude]
+
+ # Remove non-model nodes (sources, maybe more?) by making sure they are in the node_lookup dict
+ ids_to_execute = [i for i in ids_to_execute if i in nodeGraph.node_lookup]
return cls(list(set(ids_to_execute)), fal_dbt.project_name)
| {"golden_diff": "diff --git a/src/fal/cli/selectors.py b/src/fal/cli/selectors.py\n--- a/src/fal/cli/selectors.py\n+++ b/src/fal/cli/selectors.py\n@@ -44,24 +44,23 @@\n Creates and ExecutionPlan from the cli arguments\n \"\"\"\n unique_ids = list(nodeGraph.graph.nodes.keys())\n- ids_to_execute = []\n-\n- ids_to_exclude = []\n \n+ ids_to_execute = unique_ids\n if parsed.select:\n ids_to_execute = _filter_node_ids(\n unique_ids, fal_dbt, list(parsed.select), nodeGraph\n )\n \n- else:\n- ids_to_execute.extend(unique_ids)\n-\n+ ids_to_exclude = []\n if \"exclude\" in parsed and parsed.exclude:\n ids_to_exclude = _filter_node_ids(\n unique_ids, fal_dbt, list(parsed.exclude), nodeGraph\n )\n \n ids_to_execute = [i for i in ids_to_execute if i not in ids_to_exclude]\n+\n+ # Remove non-model nodes (sources, maybe more?) by making sure they are in the node_lookup dict\n+ ids_to_execute = [i for i in ids_to_execute if i in nodeGraph.node_lookup]\n return cls(list(set(ids_to_execute)), fal_dbt.project_name)\n", "issue": "Stop trying to run sources in dbt runner\n<!-- *** Make sure you have searched for an existing bug report for this issue *** -->\r\n\r\n**Describe the bug**\r\nfal takes sources as valid nodes to run in dbt and tries to run them.\r\nIt mostly ends up in a no-op operation, but if the source shares name with a model it can run the wrong model.\r\n\r\n**Expected behavior**\r\nfal should not try to run sources in dbt.\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import itertools\nimport re\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Union, Iterator\nfrom fal.node_graph import NodeGraph\nfrom faldbt.project import CompileArgs, FalDbt\nfrom dbt.task.compile import CompileTask\nfrom enum import Enum\nfrom functools import reduce\nimport networkx as nx\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\n\nclass ExecutionPlan:\n \"\"\"\n Represents a fal flow excution\n \"\"\"\n\n before_scripts: List[str]\n dbt_models: List[str]\n after_scripts: List[str]\n project_name: str\n\n def __init__(self, unique_ids: List[str], project_name):\n self.before_scripts = []\n self.dbt_models = []\n self.after_scripts = []\n self.project_name = project_name\n for id in unique_ids:\n if _is_before_script(id):\n self.before_scripts.append(id)\n elif _is_after_script(id):\n self.after_scripts.append(id)\n else:\n self.dbt_models.append(id)\n\n @property\n def nodes(self) -> List[str]:\n return self.before_scripts + self.after_scripts + self.dbt_models\n\n @classmethod\n def create_plan_from_graph(cls, parsed, nodeGraph: NodeGraph, fal_dbt: FalDbt):\n \"\"\"\n Creates and ExecutionPlan from the cli arguments\n \"\"\"\n unique_ids = list(nodeGraph.graph.nodes.keys())\n ids_to_execute = []\n\n ids_to_exclude = []\n\n if parsed.select:\n ids_to_execute = _filter_node_ids(\n unique_ids, fal_dbt, list(parsed.select), nodeGraph\n )\n\n else:\n ids_to_execute.extend(unique_ids)\n\n if \"exclude\" in parsed and parsed.exclude:\n ids_to_exclude = _filter_node_ids(\n unique_ids, fal_dbt, list(parsed.exclude), nodeGraph\n )\n\n ids_to_execute = [i for i in ids_to_execute if i not in ids_to_exclude]\n return cls(list(set(ids_to_execute)), fal_dbt.project_name)\n\n\n@dataclass\nclass SelectionUnion:\n components: List[str]\n\n\n@dataclass\nclass SelectionIntersection:\n components: List[str]\n\n\ndef parse_union(\n components: List[str],\n) -> SelectionUnion:\n # Based on the original implemention at dbt-core.\n\n # turn ['a b', 'c'] -> ['a', 'b', 'c']\n raw_specs = itertools.chain.from_iterable(r.split(OP_SET_UNION) for r in components)\n union_components = []\n\n # ['a', 'b', 'c,d'] -> union('a', 'b', intersection('c', 'd'))\n for raw_spec in raw_specs:\n union_components.append(\n SelectionIntersection(\n raw_spec.split(OP_SET_INTERSECTION),\n )\n )\n return SelectionUnion(\n union_components,\n )\n\n\ndef _filter_node_ids(\n unique_ids: List[str],\n fal_dbt: FalDbt,\n selectors: List[str],\n nodeGraph: NodeGraph,\n) -> List[str]:\n \"\"\"Filter list of unique_ids according to a selector.\"\"\"\n output = set()\n\n union = parse_union(selectors)\n for intersection in union.components:\n try:\n plan_outputs = [\n set(SelectorPlan(selector, unique_ids, fal_dbt).execute(nodeGraph))\n for selector in intersection.components\n if selector\n ]\n except nx.NetworkXError:\n # When the user selects a non-existent node, don't fail immediately\n # but rather just continue processing the rest of selectors.\n plan_outputs = []\n\n if plan_outputs:\n output |= set.intersection(*plan_outputs)\n\n return list(output)\n\n\ndef _get_children_with_parents(node_id: str, nodeGraph: NodeGraph) -> List[str]:\n children = nodeGraph.get_descendants(node_id)\n output = reduce(lambda l, ch: l + nodeGraph.get_ancestors(ch), children, children)\n\n output = list(set(output))\n\n return output\n\n\ndef _expand_script(script_name: str, unique_ids: List[str]) -> List[str]:\n \"\"\"\n Expands the selected script name to unique id format.\n for example [scripta.py] to [script.modelB.AFTER.scripta.py, script.modelA.BEFORE.scripta.py]\n \"\"\"\n\n def contains_script_name(id: str):\n return script_name in id\n\n return list(filter(contains_script_name, unique_ids))\n\n\nclass SelectType(Enum):\n MODEL = 1\n SCRIPT = 2\n COMPLEX = 3\n\n\n@dataclass(init=False)\nclass SelectorPlan:\n \"\"\"\n Represents a single selector, for example in the command\n\n fal flow run --select script.py+\n\n script.py+ is the SelectorPlan with needs_children attribute set to true\n \"\"\"\n\n unique_ids: List[str]\n children: bool\n children_levels: Optional[int]\n children_with_parents: bool\n parents: bool\n parents_levels: Optional[int]\n type: SelectType\n raw: str\n\n def __init__(self, selector: str, unique_ids: List[str], fal_dbt: FalDbt):\n self.raw = selector\n self.children_with_parents = OP_CHILDREN_WITH_PARENTS.match(selector)\n selector = OP_CHILDREN_WITH_PARENTS.rest(selector)\n\n self.parents = OP_PARENTS.match(selector)\n self.parents_levels = OP_PARENTS.depth(selector)\n selector = OP_PARENTS.rest(selector)\n\n self.children = OP_CHILDREN.match(selector)\n self.children_levels = OP_CHILDREN.depth(selector)\n selector = OP_CHILDREN.rest(selector)\n\n self.type = _to_select_type(selector)\n\n if self.type == SelectType.MODEL:\n self.unique_ids = [f\"model.{fal_dbt.project_name}.{selector}\"]\n elif self.type == SelectType.SCRIPT:\n self.unique_ids = _expand_script(selector, unique_ids)\n elif self.type == SelectType.COMPLEX:\n self.unique_ids = unique_ids_from_complex_selector(selector, fal_dbt)\n\n def __post_init__(self):\n if self.children and self.children_with_parents:\n raise RuntimeError(\n f'Invalid node spec {self.raw} - \"@\" prefix and \"+\" suffix are incompatible'\n )\n\n def execute(self, nodeGraph: NodeGraph) -> Iterator[str]:\n for id in self.unique_ids:\n yield id\n\n if self.children:\n if self.children_levels is None:\n children = nodeGraph.get_descendants(id)\n else:\n children = nodeGraph.get_successors(id, self.children_levels)\n yield from children\n\n if self.parents:\n if self.parents_levels is None:\n parents = nodeGraph.get_ancestors(id)\n else:\n parents = nodeGraph.get_predecessors(id, self.parents_levels)\n yield from parents\n\n if self.children_with_parents:\n ids = _get_children_with_parents(id, nodeGraph)\n yield from ids\n\n\ndef unique_ids_from_complex_selector(select, fal_dbt: FalDbt) -> List[str]:\n args = CompileArgs(None, [select], [select], tuple(), fal_dbt._state, None)\n compile_task = CompileTask(args, fal_dbt._config)\n compile_task._runtime_initialize()\n spec = compile_task.get_selection_spec()\n graph = compile_task.get_node_selector().get_graph_queue(spec)\n return list(graph.queued)\n\n\ndef _to_select_type(selector: str) -> SelectType:\n if \":\" in selector:\n return SelectType.COMPLEX\n else:\n if _is_script_node(selector):\n return SelectType.SCRIPT\n else:\n return SelectType.MODEL\n\n\ndef _is_script_node(node_name: str) -> bool:\n return node_name.endswith(\".py\") or node_name.endswith(\".ipynb\")\n\n\nclass SelectorGraphOp:\n _regex: re.Pattern\n\n def __init__(self, regex: re.Pattern):\n self._regex = regex\n assert (\n \"rest\" in regex.groupindex\n ), 'rest must be in regex. Use `re.compile(\"something(?P<rest>.*)\")`'\n\n def _select(self, selector: str, group: Union[str, int]) -> Optional[str]:\n match = self._regex.match(selector)\n if match:\n return match.group(group)\n\n def match(self, selector: str) -> bool:\n return self._select(selector, 0) is not None\n\n def rest(self, selector: str) -> str:\n rest = self._select(selector, \"rest\")\n if rest is not None:\n return rest\n return selector\n\n\nclass SelectorGraphOpDepth(SelectorGraphOp):\n def depth(self, selector: str) -> Optional[int]:\n depth = self._select(selector, \"depth\")\n if depth:\n return int(depth)\n\n\n# Graph operators from their regex Patterns\nOP_CHILDREN_WITH_PARENTS = SelectorGraphOp(re.compile(\"^\\\\@(?P<rest>.*)\"))\nOP_PARENTS = SelectorGraphOpDepth(re.compile(\"^(?P<depth>\\\\d*)\\\\+(?P<rest>.*)\"))\nOP_CHILDREN = SelectorGraphOpDepth(re.compile(\"(?P<rest>.*)\\\\+(?P<depth>\\\\d*)$\"))\n\n# Logic based set operators\nOP_SET_UNION = \" \"\nOP_SET_INTERSECTION = \",\"\n\nIS_BEFORE_SCRIPT_REGEX = re.compile(\"^script\\\\..+\\\\.BEFORE\\\\..+\\\\.(py|ipynb)$\")\nIS_AFTER_SCRIPT_REGEX = re.compile(\"^script\\\\..+\\\\.AFTER\\\\..+\\\\.(py|ipynb)\")\n\n\ndef _is_before_script(id: str) -> bool:\n return bool(IS_BEFORE_SCRIPT_REGEX.match(id))\n\n\ndef _is_after_script(id: str) -> bool:\n return bool(IS_AFTER_SCRIPT_REGEX.match(id))\n", "path": "src/fal/cli/selectors.py"}], "after_files": [{"content": "import itertools\nimport re\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Union, Iterator\nfrom fal.node_graph import NodeGraph\nfrom faldbt.project import CompileArgs, FalDbt\nfrom dbt.task.compile import CompileTask\nfrom enum import Enum\nfrom functools import reduce\nimport networkx as nx\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\n\nclass ExecutionPlan:\n \"\"\"\n Represents a fal flow excution\n \"\"\"\n\n before_scripts: List[str]\n dbt_models: List[str]\n after_scripts: List[str]\n project_name: str\n\n def __init__(self, unique_ids: List[str], project_name):\n self.before_scripts = []\n self.dbt_models = []\n self.after_scripts = []\n self.project_name = project_name\n for id in unique_ids:\n if _is_before_script(id):\n self.before_scripts.append(id)\n elif _is_after_script(id):\n self.after_scripts.append(id)\n else:\n self.dbt_models.append(id)\n\n @property\n def nodes(self) -> List[str]:\n return self.before_scripts + self.after_scripts + self.dbt_models\n\n @classmethod\n def create_plan_from_graph(cls, parsed, nodeGraph: NodeGraph, fal_dbt: FalDbt):\n \"\"\"\n Creates and ExecutionPlan from the cli arguments\n \"\"\"\n unique_ids = list(nodeGraph.graph.nodes.keys())\n\n ids_to_execute = unique_ids\n if parsed.select:\n ids_to_execute = _filter_node_ids(\n unique_ids, fal_dbt, list(parsed.select), nodeGraph\n )\n\n ids_to_exclude = []\n if \"exclude\" in parsed and parsed.exclude:\n ids_to_exclude = _filter_node_ids(\n unique_ids, fal_dbt, list(parsed.exclude), nodeGraph\n )\n\n ids_to_execute = [i for i in ids_to_execute if i not in ids_to_exclude]\n\n # Remove non-model nodes (sources, maybe more?) by making sure they are in the node_lookup dict\n ids_to_execute = [i for i in ids_to_execute if i in nodeGraph.node_lookup]\n return cls(list(set(ids_to_execute)), fal_dbt.project_name)\n\n\n@dataclass\nclass SelectionUnion:\n components: List[str]\n\n\n@dataclass\nclass SelectionIntersection:\n components: List[str]\n\n\ndef parse_union(\n components: List[str],\n) -> SelectionUnion:\n # Based on the original implemention at dbt-core.\n\n # turn ['a b', 'c'] -> ['a', 'b', 'c']\n raw_specs = itertools.chain.from_iterable(r.split(OP_SET_UNION) for r in components)\n union_components = []\n\n # ['a', 'b', 'c,d'] -> union('a', 'b', intersection('c', 'd'))\n for raw_spec in raw_specs:\n union_components.append(\n SelectionIntersection(\n raw_spec.split(OP_SET_INTERSECTION),\n )\n )\n return SelectionUnion(\n union_components,\n )\n\n\ndef _filter_node_ids(\n unique_ids: List[str],\n fal_dbt: FalDbt,\n selectors: List[str],\n nodeGraph: NodeGraph,\n) -> List[str]:\n \"\"\"Filter list of unique_ids according to a selector.\"\"\"\n output = set()\n\n union = parse_union(selectors)\n for intersection in union.components:\n try:\n plan_outputs = [\n set(SelectorPlan(selector, unique_ids, fal_dbt).execute(nodeGraph))\n for selector in intersection.components\n if selector\n ]\n except nx.NetworkXError:\n # When the user selects a non-existent node, don't fail immediately\n # but rather just continue processing the rest of selectors.\n plan_outputs = []\n\n if plan_outputs:\n output |= set.intersection(*plan_outputs)\n\n return list(output)\n\n\ndef _get_children_with_parents(node_id: str, nodeGraph: NodeGraph) -> List[str]:\n children = nodeGraph.get_descendants(node_id)\n output = reduce(lambda l, ch: l + nodeGraph.get_ancestors(ch), children, children)\n\n output = list(set(output))\n\n return output\n\n\ndef _expand_script(script_name: str, unique_ids: List[str]) -> List[str]:\n \"\"\"\n Expands the selected script name to unique id format.\n for example [scripta.py] to [script.modelB.AFTER.scripta.py, script.modelA.BEFORE.scripta.py]\n \"\"\"\n\n def contains_script_name(id: str):\n return script_name in id\n\n return list(filter(contains_script_name, unique_ids))\n\n\nclass SelectType(Enum):\n MODEL = 1\n SCRIPT = 2\n COMPLEX = 3\n\n\n@dataclass(init=False)\nclass SelectorPlan:\n \"\"\"\n Represents a single selector, for example in the command\n\n fal flow run --select script.py+\n\n script.py+ is the SelectorPlan with needs_children attribute set to true\n \"\"\"\n\n unique_ids: List[str]\n children: bool\n children_levels: Optional[int]\n children_with_parents: bool\n parents: bool\n parents_levels: Optional[int]\n type: SelectType\n raw: str\n\n def __init__(self, selector: str, unique_ids: List[str], fal_dbt: FalDbt):\n self.raw = selector\n self.children_with_parents = OP_CHILDREN_WITH_PARENTS.match(selector)\n selector = OP_CHILDREN_WITH_PARENTS.rest(selector)\n\n self.parents = OP_PARENTS.match(selector)\n self.parents_levels = OP_PARENTS.depth(selector)\n selector = OP_PARENTS.rest(selector)\n\n self.children = OP_CHILDREN.match(selector)\n self.children_levels = OP_CHILDREN.depth(selector)\n selector = OP_CHILDREN.rest(selector)\n\n self.type = _to_select_type(selector)\n\n if self.type == SelectType.MODEL:\n self.unique_ids = [f\"model.{fal_dbt.project_name}.{selector}\"]\n elif self.type == SelectType.SCRIPT:\n self.unique_ids = _expand_script(selector, unique_ids)\n elif self.type == SelectType.COMPLEX:\n self.unique_ids = unique_ids_from_complex_selector(selector, fal_dbt)\n\n def __post_init__(self):\n if self.children and self.children_with_parents:\n raise RuntimeError(\n f'Invalid node spec {self.raw} - \"@\" prefix and \"+\" suffix are incompatible'\n )\n\n def execute(self, nodeGraph: NodeGraph) -> Iterator[str]:\n for id in self.unique_ids:\n yield id\n\n if self.children:\n if self.children_levels is None:\n children = nodeGraph.get_descendants(id)\n else:\n children = nodeGraph.get_successors(id, self.children_levels)\n yield from children\n\n if self.parents:\n if self.parents_levels is None:\n parents = nodeGraph.get_ancestors(id)\n else:\n parents = nodeGraph.get_predecessors(id, self.parents_levels)\n yield from parents\n\n if self.children_with_parents:\n ids = _get_children_with_parents(id, nodeGraph)\n yield from ids\n\n\ndef unique_ids_from_complex_selector(select, fal_dbt: FalDbt) -> List[str]:\n args = CompileArgs(None, [select], [select], tuple(), fal_dbt._state, None)\n compile_task = CompileTask(args, fal_dbt._config)\n compile_task._runtime_initialize()\n spec = compile_task.get_selection_spec()\n graph = compile_task.get_node_selector().get_graph_queue(spec)\n return list(graph.queued)\n\n\ndef _to_select_type(selector: str) -> SelectType:\n if \":\" in selector:\n return SelectType.COMPLEX\n else:\n if _is_script_node(selector):\n return SelectType.SCRIPT\n else:\n return SelectType.MODEL\n\n\ndef _is_script_node(node_name: str) -> bool:\n return node_name.endswith(\".py\") or node_name.endswith(\".ipynb\")\n\n\nclass SelectorGraphOp:\n _regex: re.Pattern\n\n def __init__(self, regex: re.Pattern):\n self._regex = regex\n assert (\n \"rest\" in regex.groupindex\n ), 'rest must be in regex. Use `re.compile(\"something(?P<rest>.*)\")`'\n\n def _select(self, selector: str, group: Union[str, int]) -> Optional[str]:\n match = self._regex.match(selector)\n if match:\n return match.group(group)\n\n def match(self, selector: str) -> bool:\n return self._select(selector, 0) is not None\n\n def rest(self, selector: str) -> str:\n rest = self._select(selector, \"rest\")\n if rest is not None:\n return rest\n return selector\n\n\nclass SelectorGraphOpDepth(SelectorGraphOp):\n def depth(self, selector: str) -> Optional[int]:\n depth = self._select(selector, \"depth\")\n if depth:\n return int(depth)\n\n\n# Graph operators from their regex Patterns\nOP_CHILDREN_WITH_PARENTS = SelectorGraphOp(re.compile(\"^\\\\@(?P<rest>.*)\"))\nOP_PARENTS = SelectorGraphOpDepth(re.compile(\"^(?P<depth>\\\\d*)\\\\+(?P<rest>.*)\"))\nOP_CHILDREN = SelectorGraphOpDepth(re.compile(\"(?P<rest>.*)\\\\+(?P<depth>\\\\d*)$\"))\n\n# Logic based set operators\nOP_SET_UNION = \" \"\nOP_SET_INTERSECTION = \",\"\n\nIS_BEFORE_SCRIPT_REGEX = re.compile(\"^script\\\\..+\\\\.BEFORE\\\\..+\\\\.(py|ipynb)$\")\nIS_AFTER_SCRIPT_REGEX = re.compile(\"^script\\\\..+\\\\.AFTER\\\\..+\\\\.(py|ipynb)\")\n\n\ndef _is_before_script(id: str) -> bool:\n return bool(IS_BEFORE_SCRIPT_REGEX.match(id))\n\n\ndef _is_after_script(id: str) -> bool:\n return bool(IS_AFTER_SCRIPT_REGEX.match(id))\n", "path": "src/fal/cli/selectors.py"}]} | 3,310 | 278 |
gh_patches_debug_23762 | rasdani/github-patches | git_diff | sktime__sktime-2726 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] PolynomialTrendForecaster does not work with period index of frequency 30T
**Describe the bug**
PolynomialTrendForecaster does not work with period index of frequency 30T (30 mnins) due to incorrect X values
**To Reproduce**
```python
import numpy as np
import pandas as pd
from sktime.forecasting.trend import PolynomialTrendForecaster
np.random.seed(5005)
index = pd.date_range(
start="2020-10-01 00:00:00", end="2020-10-01 23:30:00", freq="30T"
)
data = pd.DataFrame(
{
"index": index,
"target": np.random.randint(0, 255, index.shape[0], dtype=int),
}
).set_index("index")
data.index = data.index.to_period()
forecaster = PolynomialTrendForecaster(degree=1)
forecaster.fit(data)
```
```python-traceback
File "c:\Users\Nikhil\OneDrive\my_libraries\my_python_libraries\pycaret\pycaret_dev\time_series_debug.py", line 41, in <module>
forecaster.fit(data)
File "C:\Users\Nikhil\.conda\envs\pycaret_dev_sktime_0p11_3\lib\site-packages\sktime\forecasting\base\_base.py", line 265, in fit
self._fit(y=y_inner, X=X_inner, fh=fh)
File "C:\Users\Nikhil\.conda\envs\pycaret_dev_sktime_0p11_3\lib\site-packages\sktime\forecasting\trend.py", line 181, in _fit
self.regressor_.fit(X, y)
File "C:\Users\Nikhil\.conda\envs\pycaret_dev_sktime_0p11_3\lib\site-packages\sklearn\pipeline.py", line 382, in fit
self._final_estimator.fit(Xt, y, **fit_params_last_step)
File "C:\Users\Nikhil\.conda\envs\pycaret_dev_sktime_0p11_3\lib\site-packages\sklearn\linear_model\_base.py", line 684, in fit
X, y = self._validate_data(
File "C:\Users\Nikhil\.conda\envs\pycaret_dev_sktime_0p11_3\lib\site-packages\sklearn\base.py", line 596, in _validate_data
X, y = check_X_y(X, y, **check_params)
File "C:\Users\Nikhil\.conda\envs\pycaret_dev_sktime_0p11_3\lib\site-packages\sklearn\utils\validation.py", line 1092, in check_X_y
check_consistent_length(X, y)
File "C:\Users\Nikhil\.conda\envs\pycaret_dev_sktime_0p11_3\lib\site-packages\sklearn\utils\validation.py", line 387, in check_consistent_length
raise ValueError(
ValueError: Found input variables with inconsistent numbers of samples: [1411, 48]
```
**Expected behavior**
Expected to fit correctly as it should have probably been fixed (previously reported https://github.com/alan-turing-institute/sktime/issues/1876)
**Additional context**
Originally reported here: https://github.com/pycaret/pycaret/issues/2607
**Versions**
<details>
System:
python: 3.8.13 (default, Mar 28 2022, 06:59:08) [MSC v.1916 64 bit (AMD64)]
executable: C:\Users\Nikhil\.conda\envs\pycaret_dev_sktime_0p11_3\python.exe
machine: Windows-10-10.0.19044-SP0
Python dependencies:
pip: 21.2.2
setuptools: 61.2.0
sklearn: 1.1.1
sktime: 0.11.4
statsmodels: 0.13.2
numpy: 1.21.6
scipy: 1.8.1
pandas: 1.4.2
matplotlib: 3.5.2
joblib: 1.1.0
numba: 0.55.2
pmdarima: 1.8.5
tsfresh: None
</details>
<!-- Thanks for contributing! -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/utils/datetime.py`
Content:
```
1 #!/usr/bin/env python3 -u
2 # -*- coding: utf-8 -*-
3 """Time format related utilities."""
4
5 __author__ = ["mloning", "xiaobenbenecho", "khrapovs"]
6 __all__ = []
7
8 import re
9 from typing import Tuple
10
11 import numpy as np
12 import pandas as pd
13
14 from sktime.utils.validation.series import check_time_index, is_integer_index
15
16
17 def _coerce_duration_to_int(duration, freq=None):
18 """Coerce durations into integer representations for a given unit of duration.
19
20 Parameters
21 ----------
22 duration : pd.DateOffset, pd.Timedelta, pd.TimedeltaIndex, pd.Index, int
23 Duration type or collection of duration types
24 freq : str
25 Frequency of the above duration type.
26
27 Returns
28 -------
29 ret : int
30 Duration in integer values for given unit
31 """
32 if isinstance(duration, int):
33 return duration
34 elif isinstance(duration, pd.tseries.offsets.DateOffset):
35 return duration.n
36 elif isinstance(duration, pd.Index) and isinstance(
37 duration[0], pd.tseries.offsets.BaseOffset
38 ):
39 count = _get_intervals_count_and_unit(freq)[0]
40 return pd.Index([d.n / count for d in duration], dtype=int)
41 elif isinstance(duration, (pd.Timedelta, pd.TimedeltaIndex)):
42 count, unit = _get_intervals_count_and_unit(freq)
43 # integer conversion only works reliably with non-ambiguous units (
44 # e.g. days, seconds but not months, years)
45 try:
46 if isinstance(duration, pd.Timedelta):
47 return int(duration / pd.Timedelta(count, unit))
48 if isinstance(duration, pd.TimedeltaIndex):
49 return (duration / pd.Timedelta(count, unit)).astype(int)
50 except ValueError:
51 raise ValueError(
52 "Index type not supported. Please consider using pd.PeriodIndex."
53 )
54 else:
55 raise TypeError("`duration` type not understood.")
56
57
58 def _get_intervals_count_and_unit(freq: str) -> Tuple[int, str]:
59 """Extract interval count and unit from frequency string.
60
61 Supports eg: W, 3W, W-SUN, BQS, (B)Q(S)-MAR patterns, from which we
62 extract the count and the unit. See
63 https://pandas.pydata.org/docs/user_guide/timeseries.html#timeseries-offset-aliases
64 """
65 if freq is None:
66 raise ValueError("frequency is missing")
67 m = re.match(r"(?P<count>\d*)(?P<unit>[a-zA-Z]+)$", freq)
68 if not m:
69 raise ValueError(f"pandas frequency {freq} not understood.")
70 count, unit = m.groups()
71 count = 1 if not count else int(count)
72 return count, unit
73
74
75 def _get_freq(x):
76 """Get unit for conversion of time deltas to integer."""
77 if hasattr(x, "freqstr"):
78 if x.freqstr is None:
79 return None
80 elif "-" in x.freqstr:
81 return x.freqstr.split("-")[0]
82 else:
83 return x.freqstr
84 else:
85 return None
86
87
88 def _shift(x, by=1):
89 """Shift time point `x` by a step (`by`) given frequency of `x`.
90
91 Parameters
92 ----------
93 x : pd.Period, pd.Timestamp, int
94 Time point
95 by : int
96
97 Returns
98 -------
99 ret : pd.Period, pd.Timestamp, int
100 Shifted time point
101 """
102 assert isinstance(x, (pd.Period, pd.Timestamp, int, np.integer)), type(x)
103 assert isinstance(by, (int, np.integer)) or is_integer_index(by), type(by)
104 if isinstance(x, pd.Timestamp):
105 if not hasattr(x, "freq") or x.freq is None:
106 raise ValueError("No `freq` information available")
107 by *= x.freq
108 return x + by
109
110
111 def _get_duration(x, y=None, coerce_to_int=False, unit=None):
112 """Compute duration between the time indices.
113
114 Parameters
115 ----------
116 x : pd.Index, pd.Timestamp, pd.Period, int
117 y : pd.Timestamp, pd.Period, int, optional (default=None)
118 coerce_to_int : bool
119 If True, duration is returned as integer value for given unit
120 unit : str
121 Time unit
122
123 Returns
124 -------
125 ret : duration type
126 Duration
127 """
128 if y is None:
129 x = check_time_index(x)
130 duration = x[-1] - x[0]
131 else:
132 assert isinstance(x, (int, np.integer, pd.Period, pd.Timestamp))
133 # check types allowing (np.integer, int) combinations to pass
134 assert type(x) is type(y) or (
135 isinstance(x, (np.integer, int)) and isinstance(x, (np.integer, int))
136 )
137 duration = x - y
138
139 # coerce to integer result for given time unit
140 if coerce_to_int and isinstance(
141 x, (pd.PeriodIndex, pd.DatetimeIndex, pd.Period, pd.Timestamp)
142 ):
143 if unit is None:
144 # try to get the unit from the data if not given
145 unit = _get_freq(x)
146 duration = _coerce_duration_to_int(duration, freq=unit)
147 return duration
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sktime/utils/datetime.py b/sktime/utils/datetime.py
--- a/sktime/utils/datetime.py
+++ b/sktime/utils/datetime.py
@@ -6,7 +6,7 @@
__all__ = []
import re
-from typing import Tuple
+from typing import Tuple, Union
import numpy as np
import pandas as pd
@@ -14,7 +14,10 @@
from sktime.utils.validation.series import check_time_index, is_integer_index
-def _coerce_duration_to_int(duration, freq=None):
+def _coerce_duration_to_int(
+ duration: Union[int, pd.Timedelta, pd.tseries.offsets.BaseOffset, pd.Index],
+ freq: str = None,
+) -> Union[int, pd.Index]:
"""Coerce durations into integer representations for a given unit of duration.
Parameters
@@ -31,8 +34,8 @@
"""
if isinstance(duration, int):
return duration
- elif isinstance(duration, pd.tseries.offsets.DateOffset):
- return duration.n
+ elif isinstance(duration, pd.tseries.offsets.BaseOffset):
+ return int(duration.n / _get_intervals_count_and_unit(freq)[0])
elif isinstance(duration, pd.Index) and isinstance(
duration[0], pd.tseries.offsets.BaseOffset
):
| {"golden_diff": "diff --git a/sktime/utils/datetime.py b/sktime/utils/datetime.py\n--- a/sktime/utils/datetime.py\n+++ b/sktime/utils/datetime.py\n@@ -6,7 +6,7 @@\n __all__ = []\n \n import re\n-from typing import Tuple\n+from typing import Tuple, Union\n \n import numpy as np\n import pandas as pd\n@@ -14,7 +14,10 @@\n from sktime.utils.validation.series import check_time_index, is_integer_index\n \n \n-def _coerce_duration_to_int(duration, freq=None):\n+def _coerce_duration_to_int(\n+ duration: Union[int, pd.Timedelta, pd.tseries.offsets.BaseOffset, pd.Index],\n+ freq: str = None,\n+) -> Union[int, pd.Index]:\n \"\"\"Coerce durations into integer representations for a given unit of duration.\n \n Parameters\n@@ -31,8 +34,8 @@\n \"\"\"\n if isinstance(duration, int):\n return duration\n- elif isinstance(duration, pd.tseries.offsets.DateOffset):\n- return duration.n\n+ elif isinstance(duration, pd.tseries.offsets.BaseOffset):\n+ return int(duration.n / _get_intervals_count_and_unit(freq)[0])\n elif isinstance(duration, pd.Index) and isinstance(\n duration[0], pd.tseries.offsets.BaseOffset\n ):\n", "issue": "[BUG] PolynomialTrendForecaster does not work with period index of frequency 30T\n**Describe the bug**\r\nPolynomialTrendForecaster does not work with period index of frequency 30T (30 mnins) due to incorrect X values \r\n\r\n\r\n**To Reproduce**\r\n\r\n```python\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sktime.forecasting.trend import PolynomialTrendForecaster\r\n\r\n\r\nnp.random.seed(5005)\r\nindex = pd.date_range(\r\n start=\"2020-10-01 00:00:00\", end=\"2020-10-01 23:30:00\", freq=\"30T\"\r\n)\r\ndata = pd.DataFrame(\r\n {\r\n \"index\": index,\r\n \"target\": np.random.randint(0, 255, index.shape[0], dtype=int),\r\n }\r\n).set_index(\"index\")\r\ndata.index = data.index.to_period()\r\n\r\nforecaster = PolynomialTrendForecaster(degree=1)\r\nforecaster.fit(data)\r\n```\r\n\r\n```python-traceback\r\n File \"c:\\Users\\Nikhil\\OneDrive\\my_libraries\\my_python_libraries\\pycaret\\pycaret_dev\\time_series_debug.py\", line 41, in <module>\r\n forecaster.fit(data)\r\n File \"C:\\Users\\Nikhil\\.conda\\envs\\pycaret_dev_sktime_0p11_3\\lib\\site-packages\\sktime\\forecasting\\base\\_base.py\", line 265, in fit\r\n self._fit(y=y_inner, X=X_inner, fh=fh)\r\n File \"C:\\Users\\Nikhil\\.conda\\envs\\pycaret_dev_sktime_0p11_3\\lib\\site-packages\\sktime\\forecasting\\trend.py\", line 181, in _fit\r\n self.regressor_.fit(X, y)\r\n File \"C:\\Users\\Nikhil\\.conda\\envs\\pycaret_dev_sktime_0p11_3\\lib\\site-packages\\sklearn\\pipeline.py\", line 382, in fit\r\n self._final_estimator.fit(Xt, y, **fit_params_last_step)\r\n File \"C:\\Users\\Nikhil\\.conda\\envs\\pycaret_dev_sktime_0p11_3\\lib\\site-packages\\sklearn\\linear_model\\_base.py\", line 684, in fit\r\n X, y = self._validate_data(\r\n File \"C:\\Users\\Nikhil\\.conda\\envs\\pycaret_dev_sktime_0p11_3\\lib\\site-packages\\sklearn\\base.py\", line 596, in _validate_data\r\n X, y = check_X_y(X, y, **check_params)\r\n File \"C:\\Users\\Nikhil\\.conda\\envs\\pycaret_dev_sktime_0p11_3\\lib\\site-packages\\sklearn\\utils\\validation.py\", line 1092, in check_X_y\r\n check_consistent_length(X, y)\r\n File \"C:\\Users\\Nikhil\\.conda\\envs\\pycaret_dev_sktime_0p11_3\\lib\\site-packages\\sklearn\\utils\\validation.py\", line 387, in check_consistent_length\r\n raise ValueError(\r\nValueError: Found input variables with inconsistent numbers of samples: [1411, 48]\r\n```\r\n\r\n**Expected behavior**\r\nExpected to fit correctly as it should have probably been fixed (previously reported https://github.com/alan-turing-institute/sktime/issues/1876)\r\n\r\n**Additional context**\r\nOriginally reported here: https://github.com/pycaret/pycaret/issues/2607\r\n\r\n**Versions**\r\n<details>\r\n\r\nSystem:\r\n python: 3.8.13 (default, Mar 28 2022, 06:59:08) [MSC v.1916 64 bit (AMD64)]\r\nexecutable: C:\\Users\\Nikhil\\.conda\\envs\\pycaret_dev_sktime_0p11_3\\python.exe\r\n machine: Windows-10-10.0.19044-SP0\r\n\r\nPython dependencies:\r\n pip: 21.2.2\r\n setuptools: 61.2.0\r\n sklearn: 1.1.1\r\n sktime: 0.11.4\r\n statsmodels: 0.13.2\r\n numpy: 1.21.6\r\n scipy: 1.8.1\r\n pandas: 1.4.2\r\n matplotlib: 3.5.2\r\n joblib: 1.1.0\r\n numba: 0.55.2\r\n pmdarima: 1.8.5\r\n tsfresh: None\r\n\r\n</details>\r\n\r\n<!-- Thanks for contributing! -->\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n\"\"\"Time format related utilities.\"\"\"\n\n__author__ = [\"mloning\", \"xiaobenbenecho\", \"khrapovs\"]\n__all__ = []\n\nimport re\nfrom typing import Tuple\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.utils.validation.series import check_time_index, is_integer_index\n\n\ndef _coerce_duration_to_int(duration, freq=None):\n \"\"\"Coerce durations into integer representations for a given unit of duration.\n\n Parameters\n ----------\n duration : pd.DateOffset, pd.Timedelta, pd.TimedeltaIndex, pd.Index, int\n Duration type or collection of duration types\n freq : str\n Frequency of the above duration type.\n\n Returns\n -------\n ret : int\n Duration in integer values for given unit\n \"\"\"\n if isinstance(duration, int):\n return duration\n elif isinstance(duration, pd.tseries.offsets.DateOffset):\n return duration.n\n elif isinstance(duration, pd.Index) and isinstance(\n duration[0], pd.tseries.offsets.BaseOffset\n ):\n count = _get_intervals_count_and_unit(freq)[0]\n return pd.Index([d.n / count for d in duration], dtype=int)\n elif isinstance(duration, (pd.Timedelta, pd.TimedeltaIndex)):\n count, unit = _get_intervals_count_and_unit(freq)\n # integer conversion only works reliably with non-ambiguous units (\n # e.g. days, seconds but not months, years)\n try:\n if isinstance(duration, pd.Timedelta):\n return int(duration / pd.Timedelta(count, unit))\n if isinstance(duration, pd.TimedeltaIndex):\n return (duration / pd.Timedelta(count, unit)).astype(int)\n except ValueError:\n raise ValueError(\n \"Index type not supported. Please consider using pd.PeriodIndex.\"\n )\n else:\n raise TypeError(\"`duration` type not understood.\")\n\n\ndef _get_intervals_count_and_unit(freq: str) -> Tuple[int, str]:\n \"\"\"Extract interval count and unit from frequency string.\n\n Supports eg: W, 3W, W-SUN, BQS, (B)Q(S)-MAR patterns, from which we\n extract the count and the unit. See\n https://pandas.pydata.org/docs/user_guide/timeseries.html#timeseries-offset-aliases\n \"\"\"\n if freq is None:\n raise ValueError(\"frequency is missing\")\n m = re.match(r\"(?P<count>\\d*)(?P<unit>[a-zA-Z]+)$\", freq)\n if not m:\n raise ValueError(f\"pandas frequency {freq} not understood.\")\n count, unit = m.groups()\n count = 1 if not count else int(count)\n return count, unit\n\n\ndef _get_freq(x):\n \"\"\"Get unit for conversion of time deltas to integer.\"\"\"\n if hasattr(x, \"freqstr\"):\n if x.freqstr is None:\n return None\n elif \"-\" in x.freqstr:\n return x.freqstr.split(\"-\")[0]\n else:\n return x.freqstr\n else:\n return None\n\n\ndef _shift(x, by=1):\n \"\"\"Shift time point `x` by a step (`by`) given frequency of `x`.\n\n Parameters\n ----------\n x : pd.Period, pd.Timestamp, int\n Time point\n by : int\n\n Returns\n -------\n ret : pd.Period, pd.Timestamp, int\n Shifted time point\n \"\"\"\n assert isinstance(x, (pd.Period, pd.Timestamp, int, np.integer)), type(x)\n assert isinstance(by, (int, np.integer)) or is_integer_index(by), type(by)\n if isinstance(x, pd.Timestamp):\n if not hasattr(x, \"freq\") or x.freq is None:\n raise ValueError(\"No `freq` information available\")\n by *= x.freq\n return x + by\n\n\ndef _get_duration(x, y=None, coerce_to_int=False, unit=None):\n \"\"\"Compute duration between the time indices.\n\n Parameters\n ----------\n x : pd.Index, pd.Timestamp, pd.Period, int\n y : pd.Timestamp, pd.Period, int, optional (default=None)\n coerce_to_int : bool\n If True, duration is returned as integer value for given unit\n unit : str\n Time unit\n\n Returns\n -------\n ret : duration type\n Duration\n \"\"\"\n if y is None:\n x = check_time_index(x)\n duration = x[-1] - x[0]\n else:\n assert isinstance(x, (int, np.integer, pd.Period, pd.Timestamp))\n # check types allowing (np.integer, int) combinations to pass\n assert type(x) is type(y) or (\n isinstance(x, (np.integer, int)) and isinstance(x, (np.integer, int))\n )\n duration = x - y\n\n # coerce to integer result for given time unit\n if coerce_to_int and isinstance(\n x, (pd.PeriodIndex, pd.DatetimeIndex, pd.Period, pd.Timestamp)\n ):\n if unit is None:\n # try to get the unit from the data if not given\n unit = _get_freq(x)\n duration = _coerce_duration_to_int(duration, freq=unit)\n return duration\n", "path": "sktime/utils/datetime.py"}], "after_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n\"\"\"Time format related utilities.\"\"\"\n\n__author__ = [\"mloning\", \"xiaobenbenecho\", \"khrapovs\"]\n__all__ = []\n\nimport re\nfrom typing import Tuple, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.utils.validation.series import check_time_index, is_integer_index\n\n\ndef _coerce_duration_to_int(\n duration: Union[int, pd.Timedelta, pd.tseries.offsets.BaseOffset, pd.Index],\n freq: str = None,\n) -> Union[int, pd.Index]:\n \"\"\"Coerce durations into integer representations for a given unit of duration.\n\n Parameters\n ----------\n duration : pd.DateOffset, pd.Timedelta, pd.TimedeltaIndex, pd.Index, int\n Duration type or collection of duration types\n freq : str\n Frequency of the above duration type.\n\n Returns\n -------\n ret : int\n Duration in integer values for given unit\n \"\"\"\n if isinstance(duration, int):\n return duration\n elif isinstance(duration, pd.tseries.offsets.BaseOffset):\n return int(duration.n / _get_intervals_count_and_unit(freq)[0])\n elif isinstance(duration, pd.Index) and isinstance(\n duration[0], pd.tseries.offsets.BaseOffset\n ):\n count = _get_intervals_count_and_unit(freq)[0]\n return pd.Index([d.n / count for d in duration], dtype=int)\n elif isinstance(duration, (pd.Timedelta, pd.TimedeltaIndex)):\n count, unit = _get_intervals_count_and_unit(freq)\n # integer conversion only works reliably with non-ambiguous units (\n # e.g. days, seconds but not months, years)\n try:\n if isinstance(duration, pd.Timedelta):\n return int(duration / pd.Timedelta(count, unit))\n if isinstance(duration, pd.TimedeltaIndex):\n return (duration / pd.Timedelta(count, unit)).astype(int)\n except ValueError:\n raise ValueError(\n \"Index type not supported. Please consider using pd.PeriodIndex.\"\n )\n else:\n raise TypeError(\"`duration` type not understood.\")\n\n\ndef _get_intervals_count_and_unit(freq: str) -> Tuple[int, str]:\n \"\"\"Extract interval count and unit from frequency string.\n\n Supports eg: W, 3W, W-SUN, BQS, (B)Q(S)-MAR patterns, from which we\n extract the count and the unit. See\n https://pandas.pydata.org/docs/user_guide/timeseries.html#timeseries-offset-aliases\n \"\"\"\n if freq is None:\n raise ValueError(\"frequency is missing\")\n m = re.match(r\"(?P<count>\\d*)(?P<unit>[a-zA-Z]+)$\", freq)\n if not m:\n raise ValueError(f\"pandas frequency {freq} not understood.\")\n count, unit = m.groups()\n count = 1 if not count else int(count)\n return count, unit\n\n\ndef _get_freq(x):\n \"\"\"Get unit for conversion of time deltas to integer.\"\"\"\n if hasattr(x, \"freqstr\"):\n if x.freqstr is None:\n return None\n elif \"-\" in x.freqstr:\n return x.freqstr.split(\"-\")[0]\n else:\n return x.freqstr\n else:\n return None\n\n\ndef _shift(x, by=1):\n \"\"\"Shift time point `x` by a step (`by`) given frequency of `x`.\n\n Parameters\n ----------\n x : pd.Period, pd.Timestamp, int\n Time point\n by : int\n\n Returns\n -------\n ret : pd.Period, pd.Timestamp, int\n Shifted time point\n \"\"\"\n assert isinstance(x, (pd.Period, pd.Timestamp, int, np.integer)), type(x)\n assert isinstance(by, (int, np.integer)) or is_integer_index(by), type(by)\n if isinstance(x, pd.Timestamp):\n if not hasattr(x, \"freq\") or x.freq is None:\n raise ValueError(\"No `freq` information available\")\n by *= x.freq\n return x + by\n\n\ndef _get_duration(x, y=None, coerce_to_int=False, unit=None):\n \"\"\"Compute duration between the time indices.\n\n Parameters\n ----------\n x : pd.Index, pd.Timestamp, pd.Period, int\n y : pd.Timestamp, pd.Period, int, optional (default=None)\n coerce_to_int : bool\n If True, duration is returned as integer value for given unit\n unit : str\n Time unit\n\n Returns\n -------\n ret : duration type\n Duration\n \"\"\"\n if y is None:\n x = check_time_index(x)\n duration = x[-1] - x[0]\n else:\n assert isinstance(x, (int, np.integer, pd.Period, pd.Timestamp))\n # check types allowing (np.integer, int) combinations to pass\n assert type(x) is type(y) or (\n isinstance(x, (np.integer, int)) and isinstance(x, (np.integer, int))\n )\n duration = x - y\n\n # coerce to integer result for given time unit\n if coerce_to_int and isinstance(\n x, (pd.PeriodIndex, pd.DatetimeIndex, pd.Period, pd.Timestamp)\n ):\n if unit is None:\n # try to get the unit from the data if not given\n unit = _get_freq(x)\n duration = _coerce_duration_to_int(duration, freq=unit)\n return duration\n", "path": "sktime/utils/datetime.py"}]} | 2,832 | 292 |
gh_patches_debug_13817 | rasdani/github-patches | git_diff | sunpy__sunpy-4183 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Example of reading data/header with io.fits.read is broken
In the docs, we have [the following example of how to read the data and header directly from a file using `sunpy.io.fits.read`](https://docs.sunpy.org/en/stable/code_ref/map.html#fixing-map-metadata),
```python
[data,header] = sunpy.io.fits.read(filename)
```
There are a few problems here.
1. If you have 2 HDUs, this returns the first to `data` and the second to `header`, obviously not the intended behavior.
2. You can use the `hdus` kwarg to select only a specific HDU but it turns out this is broken because the rest of the function expects an `HDUList` and after we slice it, we don't cast it back to an `HDUList` so everything after this explodes in fun and unexpected ways.
3. Even accounting for the first two problems, this function returns a list of data,header pairs, not a list of data and headers so you get a `ValueError` when trying to return this to a tuple as above. Fixing items 1 and 2, this example could be amended to,
```python
data, header = sunpy.io.fits.read(filename)[0]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/io/fits.py`
Content:
```
1 """
2 This module provides a FITS file reader.
3
4 Notes
5 -----
6
7 1. FITS files allow comments to be attached to every value in the header.
8 This is implemented in this module as a KEYCOMMENTS dictionary in the
9 sunpy header. To add a comment to the file on write, add a comment to this
10 dictionary with the same name as a key in the header (upcased).
11
12 2. Due to the way `~astropy.io.fits` works with images, the header dictionary may
13 differ depending on whether is accessed before or after the fits[0].data
14 is requested. If the header is read before the data then the original
15 header will be returned. If the header is read after the data has been
16 accessed then the data will have been scaled and a modified header
17 reflecting these changes will be returned: BITPIX may differ and
18 BSCALE and B_ZERO may be dropped in the modified version.
19
20 3. The verify('silentfix+warn') call attempts to handle violations of the FITS
21 standard. For example, ``nan`` values will be converted to "nan" strings.
22 Attempting to cast a `astropy.io.fits.Header` to a dictionary while it contains
23 invalid header tags will result in an error so verifying it early on
24 makes the header easier to work with later.
25 """
26 import os
27 import re
28 import sys
29 import warnings
30 import traceback
31 import collections
32
33 from astropy.io import fits
34
35 from sunpy.io.header import FileHeader
36 from sunpy.util.exceptions import SunpyUserWarning
37
38 __all__ = ['header_to_fits', 'read', 'get_header', 'write', 'extract_waveunit']
39
40 HDPair = collections.namedtuple('HDPair', ['data', 'header'])
41
42
43 def read(filepath, hdus=None, memmap=None, **kwargs):
44 """
45 Read a fits file.
46
47 Parameters
48 ----------
49 filepath : `str`
50 The fits file to be read.
51 hdu: `int` or iterable
52 The HDU indexes to read from the file.
53
54 Returns
55 -------
56 pairs : `list`
57 A list of (data, header) tuples
58
59 Notes
60 -----
61 This routine reads all the HDU's in a fits file and returns a list of the
62 data and a FileHeader instance for each one.
63
64 Also all comments in the original file are concatenated into a single
65 "comment" key in the returned FileHeader.
66 """
67 with fits.open(filepath, ignore_blank=True, memmap=memmap) as hdulist:
68 if hdus is not None:
69 if isinstance(hdus, int):
70 hdulist = hdulist[hdus]
71 elif isinstance(hdus, collections.Iterable):
72 hdulist = [hdulist[i] for i in hdus]
73
74 hdulist.verify('silentfix+warn')
75
76 headers = get_header(hdulist)
77 pairs = []
78
79 for i, (hdu, header) in enumerate(zip(hdulist, headers)):
80 try:
81 pairs.append(HDPair(hdu.data, header))
82 except (KeyError, ValueError) as e:
83 message = f"Error when reading HDU {i}. Skipping.\n"
84 for line in traceback.format_tb(sys.exc_info()[2]):
85 message += line
86 message += '\n'
87 message += repr(e)
88 warnings.warn(message, SunpyUserWarning)
89
90 return pairs
91
92
93 def get_header(afile):
94 """
95 Read a fits file and return just the headers for all HDU's. In each header,
96 the key WAVEUNIT denotes the wavelength unit which is used to describe the
97 value of the key "WAVELNTH".
98
99 Parameters
100 ----------
101 afile : `str` or `astropy.io.fits.HDUList`
102 The file to be read, or HDUList to process.
103
104 Returns
105 -------
106 headers : `list`
107 A list of `sunpy.io.helper.FileHeader` headers.
108 """
109 if isinstance(afile, fits.HDUList):
110 hdulist = afile
111 close = False
112 else:
113 hdulist = fits.open(afile, ignore_blank=True)
114 hdulist.verify('silentfix')
115 close = True
116
117 try:
118 headers = []
119 for hdu in hdulist:
120 try:
121 comment = "".join(hdu.header['COMMENT']).strip()
122 except KeyError:
123 comment = ""
124 try:
125 history = "".join(hdu.header['HISTORY']).strip()
126 except KeyError:
127 history = ""
128
129 header = FileHeader(hdu.header)
130 header['COMMENT'] = comment
131 header['HISTORY'] = history
132
133 # Strip out KEYCOMMENTS to a dict, the hard way
134 keydict = {}
135 for card in hdu.header.cards:
136 if card.comment != '':
137 keydict.update({card.keyword: card.comment})
138 header['KEYCOMMENTS'] = keydict
139 header['WAVEUNIT'] = extract_waveunit(header)
140
141 headers.append(header)
142 finally:
143 if close:
144 hdulist.close()
145 return headers
146
147
148 def write(fname, data, header, hdu_type=None, **kwargs):
149 """
150 Take a data header pair and write a FITS file.
151
152 Parameters
153 ----------
154 fname : `str`
155 File name, with extension.
156 data : `numpy.ndarray`
157 n-dimensional data array.
158 header : `dict`
159 A header dictionary.
160 hdu_type: `None` or `~astropy.io.fits.CompImageHDU`
161 `None` will return a normal FITS files.
162 `~astropy.io.fits.CompImageHDU` will rice compress the FITS file.
163 kwargs :
164 Additional keyword arguments are given to
165 `~astropy.io.fits.HDUList.writeto`.
166 """
167 # Copy header so the one in memory is left alone while changing it for
168 # write.
169 header = header.copy()
170
171 fits_header = header_to_fits(header)
172
173 if isinstance(fname, str):
174 fname = os.path.expanduser(fname)
175
176 fitskwargs = {'output_verify': 'fix'}
177 fitskwargs.update(kwargs)
178
179 if not hdu_type:
180 hdu_type = fits.PrimaryHDU
181
182 hdu = hdu_type(data=data, header=fits_header)
183
184 if not isinstance(hdu, fits.PrimaryHDU):
185 hdul = fits.HDUList([fits.PrimaryHDU(), hdu])
186 else:
187 hdul = fits.HDUList([hdu])
188
189 hdul.writeto(fname, **fitskwargs)
190
191
192 def header_to_fits(header):
193 """
194 Convert a header dict to a `~astropy.fits.Header`.
195 """
196 # Copy the header to avoid modifying it in place
197 header = header.copy()
198 # The comments need to be added to the header separately from the normal
199 # kwargs. Find and deal with them:
200 fits_header = fits.Header()
201 # Check Header
202 key_comments = header.pop('KEYCOMMENTS', False)
203
204 for k, v in header.items():
205 # Drop any keys that have non-ascii characters
206 if not fits.Card._ascii_text_re.match(str(v)):
207 warnings.warn(f'The meta key {k} is not valid ascii, dropping from the FITS header',
208 SunpyUserWarning)
209 continue
210 # Drop any keys which are too long to save into FITS
211 if len(k) > 8:
212 warnings.warn(f"The meta key {k} is too long, dropping from the FITS header "
213 "(maximum allowed key length is 8 characters).",
214 SunpyUserWarning)
215 continue
216
217 if k.upper() in ('COMMENT', 'HV_COMMENT'):
218 comments = str(v).split('\n')
219 for com in comments:
220 fits_header.add_comment(com)
221 elif k.upper() == 'HISTORY':
222 hists = str(v).split('\n')
223 for hist in hists:
224 fits_header.add_history(hist)
225 elif isinstance(v, fits.header._HeaderCommentaryCards):
226 if k != '':
227 fits_header.append(fits.Card(k, str(v).split('\n')))
228 else:
229 # For some horrific reason, we save a list to the wavelnth key in
230 # sources/rhessi.py. This is the least invasive fix for that stupidity.
231 if isinstance(v, list):
232 v = str(v)
233 fits_header.append(fits.Card(k, v))
234
235 if isinstance(key_comments, dict):
236 for k, v in key_comments.items():
237 # Check that the Card for the comment exists before trying to write to it.
238 if k in fits_header:
239 fits_header.comments[k] = v
240 elif key_comments:
241
242 raise TypeError("KEYCOMMENTS must be a dictionary")
243
244 return fits_header
245
246
247 def extract_waveunit(header):
248 """
249 Attempt to read the wavelength unit from a given FITS header.
250
251 Parameters
252 ----------
253 header : `sunpy.io.header.FileHeader`
254 One `~sunpy.io.header.FileHeader` instance which was created by
255 reading a FITS file. For example, `sunpy.io.fits.get_header` returns a list of
256 such instances.
257
258 Returns
259 -------
260 waveunit : `str`
261 The wavelength unit that could be found or ``None`` otherwise.
262
263 Examples
264 --------
265 The goal of this function is to return a string that can be used in
266 conjunction with the astropy.units module so that the return value can be
267 directly passed to `astropy.units.Unit`.
268
269 >>> import astropy.units
270 >>> header = {'WAVEUNIT': 'Angstrom', 'KEYCOMMENTS': {}}
271 >>> waveunit = extract_waveunit(header)
272 >>> if waveunit is not None:
273 ... unit = astropy.units.Unit(waveunit)
274 """
275 # algorithm: try the following procedures in the following order and return
276 # as soon as a waveunit could be detected
277 # 1. read header('WAVEUNIT'). If None, go to step 2.
278 # 1.1 -9 -> 'nm'
279 # 1.2 -10 -> 'angstrom'
280 # 1.3 0 -> go to step 2
281 # 1.4 if neither of the above, return the value itself in lowercase
282 # 2. parse waveunit_comment
283 # 2.1 'in meters' -> 'm'
284 # 3. parse wavelnth_comment
285 # 3.1 "[$UNIT] ..." -> $UNIT
286 # 3.2 "Observed wavelength ($UNIT)" -> $UNIT
287 def parse_waveunit_comment(waveunit_comment):
288 if waveunit_comment == 'in meters':
289 return 'm'
290
291 waveunit_comment = header['KEYCOMMENTS'].get('WAVEUNIT')
292 wavelnth_comment = header['KEYCOMMENTS'].get('WAVELNTH')
293 waveunit = header.get('WAVEUNIT')
294 if waveunit is not None:
295 metre_submultiples = {
296 0: parse_waveunit_comment(waveunit_comment),
297 -1: 'dm',
298 -2: 'cm',
299 -3: 'mm',
300 -6: 'um',
301 -9: 'nm',
302 -10: 'angstrom',
303 -12: 'pm',
304 -15: 'fm',
305 -18: 'am',
306 -21: 'zm',
307 -24: 'ym'}
308 waveunit = metre_submultiples.get(waveunit, str(waveunit).lower())
309 elif waveunit_comment is not None:
310 waveunit = parse_waveunit_comment(waveunit_comment)
311 elif wavelnth_comment is not None:
312 # supported formats (where $UNIT is the unit like "nm" or "Angstrom"):
313 # "Observed wavelength ($UNIT)"
314 # "[$UNIT] ..."
315 parentheses_pattern = r'Observed wavelength \((\w+?)\)$'
316 brackets_pattern = r'^\[(\w+?)\]'
317 for pattern in [parentheses_pattern, brackets_pattern]:
318 m = re.search(pattern, wavelnth_comment)
319 if m is not None:
320 waveunit = m.group(1)
321 break
322 if waveunit == '':
323 return None # To fix problems associated with HMI FITS.
324 return waveunit
325
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/io/fits.py b/sunpy/io/fits.py
--- a/sunpy/io/fits.py
+++ b/sunpy/io/fits.py
@@ -48,7 +48,7 @@
----------
filepath : `str`
The fits file to be read.
- hdu: `int` or iterable
+ hdus: `int` or iterable
The HDU indexes to read from the file.
Returns
@@ -71,7 +71,9 @@
elif isinstance(hdus, collections.Iterable):
hdulist = [hdulist[i] for i in hdus]
- hdulist.verify('silentfix+warn')
+ hdulist = fits.hdu.HDUList(hdulist)
+ for h in hdulist:
+ h.verify('silentfix+warn')
headers = get_header(hdulist)
pairs = []
| {"golden_diff": "diff --git a/sunpy/io/fits.py b/sunpy/io/fits.py\n--- a/sunpy/io/fits.py\n+++ b/sunpy/io/fits.py\n@@ -48,7 +48,7 @@\n ----------\n filepath : `str`\n The fits file to be read.\n- hdu: `int` or iterable\n+ hdus: `int` or iterable\n The HDU indexes to read from the file.\n \n Returns\n@@ -71,7 +71,9 @@\n elif isinstance(hdus, collections.Iterable):\n hdulist = [hdulist[i] for i in hdus]\n \n- hdulist.verify('silentfix+warn')\n+ hdulist = fits.hdu.HDUList(hdulist)\n+ for h in hdulist:\n+ h.verify('silentfix+warn')\n \n headers = get_header(hdulist)\n pairs = []\n", "issue": "Example of reading data/header with io.fits.read is broken\nIn the docs, we have [the following example of how to read the data and header directly from a file using `sunpy.io.fits.read`](https://docs.sunpy.org/en/stable/code_ref/map.html#fixing-map-metadata),\r\n\r\n```python\r\n[data,header] = sunpy.io.fits.read(filename)\r\n```\r\n\r\nThere are a few problems here.\r\n\r\n1. If you have 2 HDUs, this returns the first to `data` and the second to `header`, obviously not the intended behavior.\r\n2. You can use the `hdus` kwarg to select only a specific HDU but it turns out this is broken because the rest of the function expects an `HDUList` and after we slice it, we don't cast it back to an `HDUList` so everything after this explodes in fun and unexpected ways.\r\n3. Even accounting for the first two problems, this function returns a list of data,header pairs, not a list of data and headers so you get a `ValueError` when trying to return this to a tuple as above. Fixing items 1 and 2, this example could be amended to,\r\n\r\n```python\r\ndata, header = sunpy.io.fits.read(filename)[0]\r\n```\n", "before_files": [{"content": "\"\"\"\nThis module provides a FITS file reader.\n\nNotes\n-----\n\n1. FITS files allow comments to be attached to every value in the header.\n This is implemented in this module as a KEYCOMMENTS dictionary in the\n sunpy header. To add a comment to the file on write, add a comment to this\n dictionary with the same name as a key in the header (upcased).\n\n2. Due to the way `~astropy.io.fits` works with images, the header dictionary may\n differ depending on whether is accessed before or after the fits[0].data\n is requested. If the header is read before the data then the original\n header will be returned. If the header is read after the data has been\n accessed then the data will have been scaled and a modified header\n reflecting these changes will be returned: BITPIX may differ and\n BSCALE and B_ZERO may be dropped in the modified version.\n\n3. The verify('silentfix+warn') call attempts to handle violations of the FITS\n standard. For example, ``nan`` values will be converted to \"nan\" strings.\n Attempting to cast a `astropy.io.fits.Header` to a dictionary while it contains\n invalid header tags will result in an error so verifying it early on\n makes the header easier to work with later.\n\"\"\"\nimport os\nimport re\nimport sys\nimport warnings\nimport traceback\nimport collections\n\nfrom astropy.io import fits\n\nfrom sunpy.io.header import FileHeader\nfrom sunpy.util.exceptions import SunpyUserWarning\n\n__all__ = ['header_to_fits', 'read', 'get_header', 'write', 'extract_waveunit']\n\nHDPair = collections.namedtuple('HDPair', ['data', 'header'])\n\n\ndef read(filepath, hdus=None, memmap=None, **kwargs):\n \"\"\"\n Read a fits file.\n\n Parameters\n ----------\n filepath : `str`\n The fits file to be read.\n hdu: `int` or iterable\n The HDU indexes to read from the file.\n\n Returns\n -------\n pairs : `list`\n A list of (data, header) tuples\n\n Notes\n -----\n This routine reads all the HDU's in a fits file and returns a list of the\n data and a FileHeader instance for each one.\n\n Also all comments in the original file are concatenated into a single\n \"comment\" key in the returned FileHeader.\n \"\"\"\n with fits.open(filepath, ignore_blank=True, memmap=memmap) as hdulist:\n if hdus is not None:\n if isinstance(hdus, int):\n hdulist = hdulist[hdus]\n elif isinstance(hdus, collections.Iterable):\n hdulist = [hdulist[i] for i in hdus]\n\n hdulist.verify('silentfix+warn')\n\n headers = get_header(hdulist)\n pairs = []\n\n for i, (hdu, header) in enumerate(zip(hdulist, headers)):\n try:\n pairs.append(HDPair(hdu.data, header))\n except (KeyError, ValueError) as e:\n message = f\"Error when reading HDU {i}. Skipping.\\n\"\n for line in traceback.format_tb(sys.exc_info()[2]):\n message += line\n message += '\\n'\n message += repr(e)\n warnings.warn(message, SunpyUserWarning)\n\n return pairs\n\n\ndef get_header(afile):\n \"\"\"\n Read a fits file and return just the headers for all HDU's. In each header,\n the key WAVEUNIT denotes the wavelength unit which is used to describe the\n value of the key \"WAVELNTH\".\n\n Parameters\n ----------\n afile : `str` or `astropy.io.fits.HDUList`\n The file to be read, or HDUList to process.\n\n Returns\n -------\n headers : `list`\n A list of `sunpy.io.helper.FileHeader` headers.\n \"\"\"\n if isinstance(afile, fits.HDUList):\n hdulist = afile\n close = False\n else:\n hdulist = fits.open(afile, ignore_blank=True)\n hdulist.verify('silentfix')\n close = True\n\n try:\n headers = []\n for hdu in hdulist:\n try:\n comment = \"\".join(hdu.header['COMMENT']).strip()\n except KeyError:\n comment = \"\"\n try:\n history = \"\".join(hdu.header['HISTORY']).strip()\n except KeyError:\n history = \"\"\n\n header = FileHeader(hdu.header)\n header['COMMENT'] = comment\n header['HISTORY'] = history\n\n # Strip out KEYCOMMENTS to a dict, the hard way\n keydict = {}\n for card in hdu.header.cards:\n if card.comment != '':\n keydict.update({card.keyword: card.comment})\n header['KEYCOMMENTS'] = keydict\n header['WAVEUNIT'] = extract_waveunit(header)\n\n headers.append(header)\n finally:\n if close:\n hdulist.close()\n return headers\n\n\ndef write(fname, data, header, hdu_type=None, **kwargs):\n \"\"\"\n Take a data header pair and write a FITS file.\n\n Parameters\n ----------\n fname : `str`\n File name, with extension.\n data : `numpy.ndarray`\n n-dimensional data array.\n header : `dict`\n A header dictionary.\n hdu_type: `None` or `~astropy.io.fits.CompImageHDU`\n `None` will return a normal FITS files.\n `~astropy.io.fits.CompImageHDU` will rice compress the FITS file.\n kwargs :\n Additional keyword arguments are given to\n `~astropy.io.fits.HDUList.writeto`.\n \"\"\"\n # Copy header so the one in memory is left alone while changing it for\n # write.\n header = header.copy()\n\n fits_header = header_to_fits(header)\n\n if isinstance(fname, str):\n fname = os.path.expanduser(fname)\n\n fitskwargs = {'output_verify': 'fix'}\n fitskwargs.update(kwargs)\n\n if not hdu_type:\n hdu_type = fits.PrimaryHDU\n\n hdu = hdu_type(data=data, header=fits_header)\n\n if not isinstance(hdu, fits.PrimaryHDU):\n hdul = fits.HDUList([fits.PrimaryHDU(), hdu])\n else:\n hdul = fits.HDUList([hdu])\n\n hdul.writeto(fname, **fitskwargs)\n\n\ndef header_to_fits(header):\n \"\"\"\n Convert a header dict to a `~astropy.fits.Header`.\n \"\"\"\n # Copy the header to avoid modifying it in place\n header = header.copy()\n # The comments need to be added to the header separately from the normal\n # kwargs. Find and deal with them:\n fits_header = fits.Header()\n # Check Header\n key_comments = header.pop('KEYCOMMENTS', False)\n\n for k, v in header.items():\n # Drop any keys that have non-ascii characters\n if not fits.Card._ascii_text_re.match(str(v)):\n warnings.warn(f'The meta key {k} is not valid ascii, dropping from the FITS header',\n SunpyUserWarning)\n continue\n # Drop any keys which are too long to save into FITS\n if len(k) > 8:\n warnings.warn(f\"The meta key {k} is too long, dropping from the FITS header \"\n \"(maximum allowed key length is 8 characters).\",\n SunpyUserWarning)\n continue\n\n if k.upper() in ('COMMENT', 'HV_COMMENT'):\n comments = str(v).split('\\n')\n for com in comments:\n fits_header.add_comment(com)\n elif k.upper() == 'HISTORY':\n hists = str(v).split('\\n')\n for hist in hists:\n fits_header.add_history(hist)\n elif isinstance(v, fits.header._HeaderCommentaryCards):\n if k != '':\n fits_header.append(fits.Card(k, str(v).split('\\n')))\n else:\n # For some horrific reason, we save a list to the wavelnth key in\n # sources/rhessi.py. This is the least invasive fix for that stupidity.\n if isinstance(v, list):\n v = str(v)\n fits_header.append(fits.Card(k, v))\n\n if isinstance(key_comments, dict):\n for k, v in key_comments.items():\n # Check that the Card for the comment exists before trying to write to it.\n if k in fits_header:\n fits_header.comments[k] = v\n elif key_comments:\n\n raise TypeError(\"KEYCOMMENTS must be a dictionary\")\n\n return fits_header\n\n\ndef extract_waveunit(header):\n \"\"\"\n Attempt to read the wavelength unit from a given FITS header.\n\n Parameters\n ----------\n header : `sunpy.io.header.FileHeader`\n One `~sunpy.io.header.FileHeader` instance which was created by\n reading a FITS file. For example, `sunpy.io.fits.get_header` returns a list of\n such instances.\n\n Returns\n -------\n waveunit : `str`\n The wavelength unit that could be found or ``None`` otherwise.\n\n Examples\n --------\n The goal of this function is to return a string that can be used in\n conjunction with the astropy.units module so that the return value can be\n directly passed to `astropy.units.Unit`.\n\n >>> import astropy.units\n >>> header = {'WAVEUNIT': 'Angstrom', 'KEYCOMMENTS': {}}\n >>> waveunit = extract_waveunit(header)\n >>> if waveunit is not None:\n ... unit = astropy.units.Unit(waveunit)\n \"\"\"\n # algorithm: try the following procedures in the following order and return\n # as soon as a waveunit could be detected\n # 1. read header('WAVEUNIT'). If None, go to step 2.\n # 1.1 -9 -> 'nm'\n # 1.2 -10 -> 'angstrom'\n # 1.3 0 -> go to step 2\n # 1.4 if neither of the above, return the value itself in lowercase\n # 2. parse waveunit_comment\n # 2.1 'in meters' -> 'm'\n # 3. parse wavelnth_comment\n # 3.1 \"[$UNIT] ...\" -> $UNIT\n # 3.2 \"Observed wavelength ($UNIT)\" -> $UNIT\n def parse_waveunit_comment(waveunit_comment):\n if waveunit_comment == 'in meters':\n return 'm'\n\n waveunit_comment = header['KEYCOMMENTS'].get('WAVEUNIT')\n wavelnth_comment = header['KEYCOMMENTS'].get('WAVELNTH')\n waveunit = header.get('WAVEUNIT')\n if waveunit is not None:\n metre_submultiples = {\n 0: parse_waveunit_comment(waveunit_comment),\n -1: 'dm',\n -2: 'cm',\n -3: 'mm',\n -6: 'um',\n -9: 'nm',\n -10: 'angstrom',\n -12: 'pm',\n -15: 'fm',\n -18: 'am',\n -21: 'zm',\n -24: 'ym'}\n waveunit = metre_submultiples.get(waveunit, str(waveunit).lower())\n elif waveunit_comment is not None:\n waveunit = parse_waveunit_comment(waveunit_comment)\n elif wavelnth_comment is not None:\n # supported formats (where $UNIT is the unit like \"nm\" or \"Angstrom\"):\n # \"Observed wavelength ($UNIT)\"\n # \"[$UNIT] ...\"\n parentheses_pattern = r'Observed wavelength \\((\\w+?)\\)$'\n brackets_pattern = r'^\\[(\\w+?)\\]'\n for pattern in [parentheses_pattern, brackets_pattern]:\n m = re.search(pattern, wavelnth_comment)\n if m is not None:\n waveunit = m.group(1)\n break\n if waveunit == '':\n return None # To fix problems associated with HMI FITS.\n return waveunit\n", "path": "sunpy/io/fits.py"}], "after_files": [{"content": "\"\"\"\nThis module provides a FITS file reader.\n\nNotes\n-----\n\n1. FITS files allow comments to be attached to every value in the header.\n This is implemented in this module as a KEYCOMMENTS dictionary in the\n sunpy header. To add a comment to the file on write, add a comment to this\n dictionary with the same name as a key in the header (upcased).\n\n2. Due to the way `~astropy.io.fits` works with images, the header dictionary may\n differ depending on whether is accessed before or after the fits[0].data\n is requested. If the header is read before the data then the original\n header will be returned. If the header is read after the data has been\n accessed then the data will have been scaled and a modified header\n reflecting these changes will be returned: BITPIX may differ and\n BSCALE and B_ZERO may be dropped in the modified version.\n\n3. The verify('silentfix+warn') call attempts to handle violations of the FITS\n standard. For example, ``nan`` values will be converted to \"nan\" strings.\n Attempting to cast a `astropy.io.fits.Header` to a dictionary while it contains\n invalid header tags will result in an error so verifying it early on\n makes the header easier to work with later.\n\"\"\"\nimport os\nimport re\nimport sys\nimport warnings\nimport traceback\nimport collections\n\nfrom astropy.io import fits\n\nfrom sunpy.io.header import FileHeader\nfrom sunpy.util.exceptions import SunpyUserWarning\n\n__all__ = ['header_to_fits', 'read', 'get_header', 'write', 'extract_waveunit']\n\nHDPair = collections.namedtuple('HDPair', ['data', 'header'])\n\n\ndef read(filepath, hdus=None, memmap=None, **kwargs):\n \"\"\"\n Read a fits file.\n\n Parameters\n ----------\n filepath : `str`\n The fits file to be read.\n hdus: `int` or iterable\n The HDU indexes to read from the file.\n\n Returns\n -------\n pairs : `list`\n A list of (data, header) tuples\n\n Notes\n -----\n This routine reads all the HDU's in a fits file and returns a list of the\n data and a FileHeader instance for each one.\n\n Also all comments in the original file are concatenated into a single\n \"comment\" key in the returned FileHeader.\n \"\"\"\n with fits.open(filepath, ignore_blank=True, memmap=memmap) as hdulist:\n if hdus is not None:\n if isinstance(hdus, int):\n hdulist = hdulist[hdus]\n elif isinstance(hdus, collections.Iterable):\n hdulist = [hdulist[i] for i in hdus]\n\n hdulist = fits.hdu.HDUList(hdulist)\n for h in hdulist:\n h.verify('silentfix+warn')\n\n headers = get_header(hdulist)\n pairs = []\n\n for i, (hdu, header) in enumerate(zip(hdulist, headers)):\n try:\n pairs.append(HDPair(hdu.data, header))\n except (KeyError, ValueError) as e:\n message = f\"Error when reading HDU {i}. Skipping.\\n\"\n for line in traceback.format_tb(sys.exc_info()[2]):\n message += line\n message += '\\n'\n message += repr(e)\n warnings.warn(message, SunpyUserWarning)\n\n return pairs\n\n\ndef get_header(afile):\n \"\"\"\n Read a fits file and return just the headers for all HDU's. In each header,\n the key WAVEUNIT denotes the wavelength unit which is used to describe the\n value of the key \"WAVELNTH\".\n\n Parameters\n ----------\n afile : `str` or `astropy.io.fits.HDUList`\n The file to be read, or HDUList to process.\n\n Returns\n -------\n headers : `list`\n A list of `sunpy.io.helper.FileHeader` headers.\n \"\"\"\n if isinstance(afile, fits.HDUList):\n hdulist = afile\n close = False\n else:\n hdulist = fits.open(afile, ignore_blank=True)\n hdulist.verify('silentfix')\n close = True\n\n try:\n headers = []\n for hdu in hdulist:\n try:\n comment = \"\".join(hdu.header['COMMENT']).strip()\n except KeyError:\n comment = \"\"\n try:\n history = \"\".join(hdu.header['HISTORY']).strip()\n except KeyError:\n history = \"\"\n\n header = FileHeader(hdu.header)\n header['COMMENT'] = comment\n header['HISTORY'] = history\n\n # Strip out KEYCOMMENTS to a dict, the hard way\n keydict = {}\n for card in hdu.header.cards:\n if card.comment != '':\n keydict.update({card.keyword: card.comment})\n header['KEYCOMMENTS'] = keydict\n header['WAVEUNIT'] = extract_waveunit(header)\n\n headers.append(header)\n finally:\n if close:\n hdulist.close()\n return headers\n\n\ndef write(fname, data, header, hdu_type=None, **kwargs):\n \"\"\"\n Take a data header pair and write a FITS file.\n\n Parameters\n ----------\n fname : `str`\n File name, with extension.\n data : `numpy.ndarray`\n n-dimensional data array.\n header : `dict`\n A header dictionary.\n hdu_type: `None` or `~astropy.io.fits.CompImageHDU`\n `None` will return a normal FITS files.\n `~astropy.io.fits.CompImageHDU` will rice compress the FITS file.\n kwargs :\n Additional keyword arguments are given to\n `~astropy.io.fits.HDUList.writeto`.\n \"\"\"\n # Copy header so the one in memory is left alone while changing it for\n # write.\n header = header.copy()\n\n fits_header = header_to_fits(header)\n\n if isinstance(fname, str):\n fname = os.path.expanduser(fname)\n\n fitskwargs = {'output_verify': 'fix'}\n fitskwargs.update(kwargs)\n\n if not hdu_type:\n hdu_type = fits.PrimaryHDU\n\n hdu = hdu_type(data=data, header=fits_header)\n\n if not isinstance(hdu, fits.PrimaryHDU):\n hdul = fits.HDUList([fits.PrimaryHDU(), hdu])\n else:\n hdul = fits.HDUList([hdu])\n\n hdul.writeto(fname, **fitskwargs)\n\n\ndef header_to_fits(header):\n \"\"\"\n Convert a header dict to a `~astropy.fits.Header`.\n \"\"\"\n # Copy the header to avoid modifying it in place\n header = header.copy()\n # The comments need to be added to the header separately from the normal\n # kwargs. Find and deal with them:\n fits_header = fits.Header()\n # Check Header\n key_comments = header.pop('KEYCOMMENTS', False)\n\n for k, v in header.items():\n # Drop any keys that have non-ascii characters\n if not fits.Card._ascii_text_re.match(str(v)):\n warnings.warn(f'The meta key {k} is not valid ascii, dropping from the FITS header',\n SunpyUserWarning)\n continue\n # Drop any keys which are too long to save into FITS\n if len(k) > 8:\n warnings.warn(f\"The meta key {k} is too long, dropping from the FITS header \"\n \"(maximum allowed key length is 8 characters).\",\n SunpyUserWarning)\n continue\n\n if k.upper() in ('COMMENT', 'HV_COMMENT'):\n comments = str(v).split('\\n')\n for com in comments:\n fits_header.add_comment(com)\n elif k.upper() == 'HISTORY':\n hists = str(v).split('\\n')\n for hist in hists:\n fits_header.add_history(hist)\n elif isinstance(v, fits.header._HeaderCommentaryCards):\n if k != '':\n fits_header.append(fits.Card(k, str(v).split('\\n')))\n else:\n # For some horrific reason, we save a list to the wavelnth key in\n # sources/rhessi.py. This is the least invasive fix for that stupidity.\n if isinstance(v, list):\n v = str(v)\n fits_header.append(fits.Card(k, v))\n\n if isinstance(key_comments, dict):\n for k, v in key_comments.items():\n # Check that the Card for the comment exists before trying to write to it.\n if k in fits_header:\n fits_header.comments[k] = v\n elif key_comments:\n\n raise TypeError(\"KEYCOMMENTS must be a dictionary\")\n\n return fits_header\n\n\ndef extract_waveunit(header):\n \"\"\"\n Attempt to read the wavelength unit from a given FITS header.\n\n Parameters\n ----------\n header : `sunpy.io.header.FileHeader`\n One `~sunpy.io.header.FileHeader` instance which was created by\n reading a FITS file. For example, `sunpy.io.fits.get_header` returns a list of\n such instances.\n\n Returns\n -------\n waveunit : `str`\n The wavelength unit that could be found or ``None`` otherwise.\n\n Examples\n --------\n The goal of this function is to return a string that can be used in\n conjunction with the astropy.units module so that the return value can be\n directly passed to `astropy.units.Unit`.\n\n >>> import astropy.units\n >>> header = {'WAVEUNIT': 'Angstrom', 'KEYCOMMENTS': {}}\n >>> waveunit = extract_waveunit(header)\n >>> if waveunit is not None:\n ... unit = astropy.units.Unit(waveunit)\n \"\"\"\n # algorithm: try the following procedures in the following order and return\n # as soon as a waveunit could be detected\n # 1. read header('WAVEUNIT'). If None, go to step 2.\n # 1.1 -9 -> 'nm'\n # 1.2 -10 -> 'angstrom'\n # 1.3 0 -> go to step 2\n # 1.4 if neither of the above, return the value itself in lowercase\n # 2. parse waveunit_comment\n # 2.1 'in meters' -> 'm'\n # 3. parse wavelnth_comment\n # 3.1 \"[$UNIT] ...\" -> $UNIT\n # 3.2 \"Observed wavelength ($UNIT)\" -> $UNIT\n def parse_waveunit_comment(waveunit_comment):\n if waveunit_comment == 'in meters':\n return 'm'\n\n waveunit_comment = header['KEYCOMMENTS'].get('WAVEUNIT')\n wavelnth_comment = header['KEYCOMMENTS'].get('WAVELNTH')\n waveunit = header.get('WAVEUNIT')\n if waveunit is not None:\n metre_submultiples = {\n 0: parse_waveunit_comment(waveunit_comment),\n -1: 'dm',\n -2: 'cm',\n -3: 'mm',\n -6: 'um',\n -9: 'nm',\n -10: 'angstrom',\n -12: 'pm',\n -15: 'fm',\n -18: 'am',\n -21: 'zm',\n -24: 'ym'}\n waveunit = metre_submultiples.get(waveunit, str(waveunit).lower())\n elif waveunit_comment is not None:\n waveunit = parse_waveunit_comment(waveunit_comment)\n elif wavelnth_comment is not None:\n # supported formats (where $UNIT is the unit like \"nm\" or \"Angstrom\"):\n # \"Observed wavelength ($UNIT)\"\n # \"[$UNIT] ...\"\n parentheses_pattern = r'Observed wavelength \\((\\w+?)\\)$'\n brackets_pattern = r'^\\[(\\w+?)\\]'\n for pattern in [parentheses_pattern, brackets_pattern]:\n m = re.search(pattern, wavelnth_comment)\n if m is not None:\n waveunit = m.group(1)\n break\n if waveunit == '':\n return None # To fix problems associated with HMI FITS.\n return waveunit\n", "path": "sunpy/io/fits.py"}]} | 4,064 | 202 |
gh_patches_debug_645 | rasdani/github-patches | git_diff | zulip__zulip-12366 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scrollbar drag can result in unintended click actions
Split off from #11792:
> * on the settings pages, if you click on the scrollbar, drag it down, and then release your click when the mouse is outside the settings modal (e.g. below it or to the right), it closes the settings modal. I don't know if this is an existing thing or a regression, but I ran into it a bunch of times when testing even after knowing the behavior.
This was not a regression from perfect-scrollbar, but I fixed it in Grsmto/simplebar#312 and Grsmto/simplebar#317. Just waiting for the fixes to be included in a new upstream release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `version.py`
Content:
```
1 ZULIP_VERSION = "2.0.3+git"
2 LATEST_MAJOR_VERSION = "2.0"
3 LATEST_RELEASE_VERSION = "2.0.3"
4 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.org/2019/03/01/zulip-2-0-released/"
5
6 # Bump the minor PROVISION_VERSION to indicate that folks should provision
7 # only when going from an old version of the code to a newer version. Bump
8 # the major version to indicate that folks should provision in both
9 # directions.
10
11 # Typically, adding a dependency only requires a minor version bump, and
12 # removing a dependency requires a major version bump.
13
14 PROVISION_VERSION = '32.0'
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -11,4 +11,4 @@
# Typically, adding a dependency only requires a minor version bump, and
# removing a dependency requires a major version bump.
-PROVISION_VERSION = '32.0'
+PROVISION_VERSION = '32.1'
| {"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -11,4 +11,4 @@\n # Typically, adding a dependency only requires a minor version bump, and\n # removing a dependency requires a major version bump.\n \n-PROVISION_VERSION = '32.0'\n+PROVISION_VERSION = '32.1'\n", "issue": "Scrollbar drag can result in unintended click actions\nSplit off from #11792:\r\n\r\n> * on the settings pages, if you click on the scrollbar, drag it down, and then release your click when the mouse is outside the settings modal (e.g. below it or to the right), it closes the settings modal. I don't know if this is an existing thing or a regression, but I ran into it a bunch of times when testing even after knowing the behavior.\r\n\r\nThis was not a regression from perfect-scrollbar, but I fixed it in Grsmto/simplebar#312 and Grsmto/simplebar#317. Just waiting for the fixes to be included in a new upstream release.\n", "before_files": [{"content": "ZULIP_VERSION = \"2.0.3+git\"\nLATEST_MAJOR_VERSION = \"2.0\"\nLATEST_RELEASE_VERSION = \"2.0.3\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.org/2019/03/01/zulip-2-0-released/\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically, adding a dependency only requires a minor version bump, and\n# removing a dependency requires a major version bump.\n\nPROVISION_VERSION = '32.0'\n", "path": "version.py"}], "after_files": [{"content": "ZULIP_VERSION = \"2.0.3+git\"\nLATEST_MAJOR_VERSION = \"2.0\"\nLATEST_RELEASE_VERSION = \"2.0.3\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.org/2019/03/01/zulip-2-0-released/\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically, adding a dependency only requires a minor version bump, and\n# removing a dependency requires a major version bump.\n\nPROVISION_VERSION = '32.1'\n", "path": "version.py"}]} | 584 | 80 |
gh_patches_debug_13680 | rasdani/github-patches | git_diff | sanic-org__sanic-2919 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
(websockets 12.0) DeprecationWarning: websockets.connection was renamed to websockets.protocol and Connection was renamed to Protocol
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
The DeprecationWarning is thrown here:
https://github.com/sanic-org/sanic/blob/82bf46bea3b55e6b1371107cccda280add8e70eb/sanic/server/protocols/websocket_protocol.py#L4-L9
With websockets 12 the try block would run successfully with the warning while the catch block does not have chance to be run.
### Code snippet
_No response_
### Expected Behavior
The catch block is being run instead.
### How do you run Sanic?
Sanic CLI
### Operating System
MacOS
### Sanic Version
Sanic 23.6.0; Routing 23.6.0
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/server/protocols/websocket_protocol.py`
Content:
```
1 from typing import Optional, Sequence, cast
2
3
4 try: # websockets < 11.0
5 from websockets.connection import State
6 from websockets.server import ServerConnection as ServerProtocol
7 except ImportError: # websockets >= 11.0
8 from websockets.protocol import State # type: ignore
9 from websockets.server import ServerProtocol # type: ignore
10
11 from websockets import http11
12 from websockets.datastructures import Headers as WSHeaders
13 from websockets.typing import Subprotocol
14
15 from sanic.exceptions import SanicException
16 from sanic.log import logger
17 from sanic.request import Request
18 from sanic.server import HttpProtocol
19
20 from ..websockets.impl import WebsocketImplProtocol
21
22
23 OPEN = State.OPEN
24 CLOSING = State.CLOSING
25 CLOSED = State.CLOSED
26
27
28 class WebSocketProtocol(HttpProtocol):
29 __slots__ = (
30 "websocket",
31 "websocket_timeout",
32 "websocket_max_size",
33 "websocket_ping_interval",
34 "websocket_ping_timeout",
35 )
36
37 def __init__(
38 self,
39 *args,
40 websocket_timeout: float = 10.0,
41 websocket_max_size: Optional[int] = None,
42 websocket_ping_interval: Optional[float] = 20.0,
43 websocket_ping_timeout: Optional[float] = 20.0,
44 **kwargs,
45 ):
46 super().__init__(*args, **kwargs)
47 self.websocket: Optional[WebsocketImplProtocol] = None
48 self.websocket_timeout = websocket_timeout
49 self.websocket_max_size = websocket_max_size
50 self.websocket_ping_interval = websocket_ping_interval
51 self.websocket_ping_timeout = websocket_ping_timeout
52
53 def connection_lost(self, exc):
54 if self.websocket is not None:
55 self.websocket.connection_lost(exc)
56 super().connection_lost(exc)
57
58 def data_received(self, data):
59 if self.websocket is not None:
60 self.websocket.data_received(data)
61 else:
62 # Pass it to HttpProtocol handler first
63 # That will (hopefully) upgrade it to a websocket.
64 super().data_received(data)
65
66 def eof_received(self) -> Optional[bool]:
67 if self.websocket is not None:
68 return self.websocket.eof_received()
69 else:
70 return False
71
72 def close(self, timeout: Optional[float] = None):
73 # Called by HttpProtocol at the end of connection_task
74 # If we've upgraded to websocket, we do our own closing
75 if self.websocket is not None:
76 # Note, we don't want to use websocket.close()
77 # That is used for user's application code to send a
78 # websocket close packet. This is different.
79 self.websocket.end_connection(1001)
80 else:
81 super().close()
82
83 def close_if_idle(self):
84 # Called by Sanic Server when shutting down
85 # If we've upgraded to websocket, shut it down
86 if self.websocket is not None:
87 if self.websocket.ws_proto.state in (CLOSING, CLOSED):
88 return True
89 elif self.websocket.loop is not None:
90 self.websocket.loop.create_task(self.websocket.close(1001))
91 else:
92 self.websocket.end_connection(1001)
93 else:
94 return super().close_if_idle()
95
96 @staticmethod
97 def sanic_request_to_ws_request(request: Request):
98 return http11.Request(
99 path=request.path,
100 headers=WSHeaders(request.headers),
101 )
102
103 async def websocket_handshake(
104 self, request, subprotocols: Optional[Sequence[str]] = None
105 ):
106 # let the websockets package do the handshake with the client
107 try:
108 if subprotocols is not None:
109 # subprotocols can be a set or frozenset,
110 # but ServerProtocol needs a list
111 subprotocols = cast(
112 Optional[Sequence[Subprotocol]],
113 list(
114 [
115 Subprotocol(subprotocol)
116 for subprotocol in subprotocols
117 ]
118 ),
119 )
120 ws_proto = ServerProtocol(
121 max_size=self.websocket_max_size,
122 subprotocols=subprotocols,
123 state=OPEN,
124 logger=logger,
125 )
126 resp = ws_proto.accept(self.sanic_request_to_ws_request(request))
127 except Exception:
128 msg = (
129 "Failed to open a WebSocket connection.\n"
130 "See server log for more information.\n"
131 )
132 raise SanicException(msg, status_code=500)
133 if 100 <= resp.status_code <= 299:
134 first_line = (
135 f"HTTP/1.1 {resp.status_code} {resp.reason_phrase}\r\n"
136 ).encode()
137 rbody = bytearray(first_line)
138 rbody += (
139 "".join([f"{k}: {v}\r\n" for k, v in resp.headers.items()])
140 ).encode()
141 rbody += b"\r\n"
142 if resp.body is not None:
143 rbody += resp.body
144 rbody += b"\r\n\r\n"
145 await super().send(rbody)
146 else:
147 raise SanicException(resp.body, resp.status_code)
148 self.websocket = WebsocketImplProtocol(
149 ws_proto,
150 ping_interval=self.websocket_ping_interval,
151 ping_timeout=self.websocket_ping_timeout,
152 close_timeout=self.websocket_timeout,
153 )
154 loop = (
155 request.transport.loop
156 if hasattr(request, "transport")
157 and hasattr(request.transport, "loop")
158 else None
159 )
160 await self.websocket.connection_made(self, loop=loop)
161 return self.websocket
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sanic/server/protocols/websocket_protocol.py b/sanic/server/protocols/websocket_protocol.py
--- a/sanic/server/protocols/websocket_protocol.py
+++ b/sanic/server/protocols/websocket_protocol.py
@@ -1,12 +1,12 @@
from typing import Optional, Sequence, cast
-try: # websockets < 11.0
- from websockets.connection import State
- from websockets.server import ServerConnection as ServerProtocol
-except ImportError: # websockets >= 11.0
+try: # websockets >= 11.0
from websockets.protocol import State # type: ignore
from websockets.server import ServerProtocol # type: ignore
+except ImportError: # websockets < 11.0
+ from websockets.connection import State
+ from websockets.server import ServerConnection as ServerProtocol
from websockets import http11
from websockets.datastructures import Headers as WSHeaders
| {"golden_diff": "diff --git a/sanic/server/protocols/websocket_protocol.py b/sanic/server/protocols/websocket_protocol.py\n--- a/sanic/server/protocols/websocket_protocol.py\n+++ b/sanic/server/protocols/websocket_protocol.py\n@@ -1,12 +1,12 @@\n from typing import Optional, Sequence, cast\n \n \n-try: # websockets < 11.0\n- from websockets.connection import State\n- from websockets.server import ServerConnection as ServerProtocol\n-except ImportError: # websockets >= 11.0\n+try: # websockets >= 11.0\n from websockets.protocol import State # type: ignore\n from websockets.server import ServerProtocol # type: ignore\n+except ImportError: # websockets < 11.0\n+ from websockets.connection import State\n+ from websockets.server import ServerConnection as ServerProtocol\n \n from websockets import http11\n from websockets.datastructures import Headers as WSHeaders\n", "issue": "(websockets 12.0) DeprecationWarning: websockets.connection was renamed to websockets.protocol and Connection was renamed to Protocol\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Describe the bug\n\nThe DeprecationWarning is thrown here:\r\n\r\nhttps://github.com/sanic-org/sanic/blob/82bf46bea3b55e6b1371107cccda280add8e70eb/sanic/server/protocols/websocket_protocol.py#L4-L9\r\n\r\nWith websockets 12 the try block would run successfully with the warning while the catch block does not have chance to be run.\n\n### Code snippet\n\n_No response_\n\n### Expected Behavior\n\nThe catch block is being run instead.\n\n### How do you run Sanic?\n\nSanic CLI\n\n### Operating System\n\nMacOS\n\n### Sanic Version\n\nSanic 23.6.0; Routing 23.6.0\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "from typing import Optional, Sequence, cast\n\n\ntry: # websockets < 11.0\n from websockets.connection import State\n from websockets.server import ServerConnection as ServerProtocol\nexcept ImportError: # websockets >= 11.0\n from websockets.protocol import State # type: ignore\n from websockets.server import ServerProtocol # type: ignore\n\nfrom websockets import http11\nfrom websockets.datastructures import Headers as WSHeaders\nfrom websockets.typing import Subprotocol\n\nfrom sanic.exceptions import SanicException\nfrom sanic.log import logger\nfrom sanic.request import Request\nfrom sanic.server import HttpProtocol\n\nfrom ..websockets.impl import WebsocketImplProtocol\n\n\nOPEN = State.OPEN\nCLOSING = State.CLOSING\nCLOSED = State.CLOSED\n\n\nclass WebSocketProtocol(HttpProtocol):\n __slots__ = (\n \"websocket\",\n \"websocket_timeout\",\n \"websocket_max_size\",\n \"websocket_ping_interval\",\n \"websocket_ping_timeout\",\n )\n\n def __init__(\n self,\n *args,\n websocket_timeout: float = 10.0,\n websocket_max_size: Optional[int] = None,\n websocket_ping_interval: Optional[float] = 20.0,\n websocket_ping_timeout: Optional[float] = 20.0,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n self.websocket: Optional[WebsocketImplProtocol] = None\n self.websocket_timeout = websocket_timeout\n self.websocket_max_size = websocket_max_size\n self.websocket_ping_interval = websocket_ping_interval\n self.websocket_ping_timeout = websocket_ping_timeout\n\n def connection_lost(self, exc):\n if self.websocket is not None:\n self.websocket.connection_lost(exc)\n super().connection_lost(exc)\n\n def data_received(self, data):\n if self.websocket is not None:\n self.websocket.data_received(data)\n else:\n # Pass it to HttpProtocol handler first\n # That will (hopefully) upgrade it to a websocket.\n super().data_received(data)\n\n def eof_received(self) -> Optional[bool]:\n if self.websocket is not None:\n return self.websocket.eof_received()\n else:\n return False\n\n def close(self, timeout: Optional[float] = None):\n # Called by HttpProtocol at the end of connection_task\n # If we've upgraded to websocket, we do our own closing\n if self.websocket is not None:\n # Note, we don't want to use websocket.close()\n # That is used for user's application code to send a\n # websocket close packet. This is different.\n self.websocket.end_connection(1001)\n else:\n super().close()\n\n def close_if_idle(self):\n # Called by Sanic Server when shutting down\n # If we've upgraded to websocket, shut it down\n if self.websocket is not None:\n if self.websocket.ws_proto.state in (CLOSING, CLOSED):\n return True\n elif self.websocket.loop is not None:\n self.websocket.loop.create_task(self.websocket.close(1001))\n else:\n self.websocket.end_connection(1001)\n else:\n return super().close_if_idle()\n\n @staticmethod\n def sanic_request_to_ws_request(request: Request):\n return http11.Request(\n path=request.path,\n headers=WSHeaders(request.headers),\n )\n\n async def websocket_handshake(\n self, request, subprotocols: Optional[Sequence[str]] = None\n ):\n # let the websockets package do the handshake with the client\n try:\n if subprotocols is not None:\n # subprotocols can be a set or frozenset,\n # but ServerProtocol needs a list\n subprotocols = cast(\n Optional[Sequence[Subprotocol]],\n list(\n [\n Subprotocol(subprotocol)\n for subprotocol in subprotocols\n ]\n ),\n )\n ws_proto = ServerProtocol(\n max_size=self.websocket_max_size,\n subprotocols=subprotocols,\n state=OPEN,\n logger=logger,\n )\n resp = ws_proto.accept(self.sanic_request_to_ws_request(request))\n except Exception:\n msg = (\n \"Failed to open a WebSocket connection.\\n\"\n \"See server log for more information.\\n\"\n )\n raise SanicException(msg, status_code=500)\n if 100 <= resp.status_code <= 299:\n first_line = (\n f\"HTTP/1.1 {resp.status_code} {resp.reason_phrase}\\r\\n\"\n ).encode()\n rbody = bytearray(first_line)\n rbody += (\n \"\".join([f\"{k}: {v}\\r\\n\" for k, v in resp.headers.items()])\n ).encode()\n rbody += b\"\\r\\n\"\n if resp.body is not None:\n rbody += resp.body\n rbody += b\"\\r\\n\\r\\n\"\n await super().send(rbody)\n else:\n raise SanicException(resp.body, resp.status_code)\n self.websocket = WebsocketImplProtocol(\n ws_proto,\n ping_interval=self.websocket_ping_interval,\n ping_timeout=self.websocket_ping_timeout,\n close_timeout=self.websocket_timeout,\n )\n loop = (\n request.transport.loop\n if hasattr(request, \"transport\")\n and hasattr(request.transport, \"loop\")\n else None\n )\n await self.websocket.connection_made(self, loop=loop)\n return self.websocket\n", "path": "sanic/server/protocols/websocket_protocol.py"}], "after_files": [{"content": "from typing import Optional, Sequence, cast\n\n\ntry: # websockets >= 11.0\n from websockets.protocol import State # type: ignore\n from websockets.server import ServerProtocol # type: ignore\nexcept ImportError: # websockets < 11.0\n from websockets.connection import State\n from websockets.server import ServerConnection as ServerProtocol\n\nfrom websockets import http11\nfrom websockets.datastructures import Headers as WSHeaders\nfrom websockets.typing import Subprotocol\n\nfrom sanic.exceptions import SanicException\nfrom sanic.log import logger\nfrom sanic.request import Request\nfrom sanic.server import HttpProtocol\n\nfrom ..websockets.impl import WebsocketImplProtocol\n\n\nOPEN = State.OPEN\nCLOSING = State.CLOSING\nCLOSED = State.CLOSED\n\n\nclass WebSocketProtocol(HttpProtocol):\n __slots__ = (\n \"websocket\",\n \"websocket_timeout\",\n \"websocket_max_size\",\n \"websocket_ping_interval\",\n \"websocket_ping_timeout\",\n )\n\n def __init__(\n self,\n *args,\n websocket_timeout: float = 10.0,\n websocket_max_size: Optional[int] = None,\n websocket_ping_interval: Optional[float] = 20.0,\n websocket_ping_timeout: Optional[float] = 20.0,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n self.websocket: Optional[WebsocketImplProtocol] = None\n self.websocket_timeout = websocket_timeout\n self.websocket_max_size = websocket_max_size\n self.websocket_ping_interval = websocket_ping_interval\n self.websocket_ping_timeout = websocket_ping_timeout\n\n def connection_lost(self, exc):\n if self.websocket is not None:\n self.websocket.connection_lost(exc)\n super().connection_lost(exc)\n\n def data_received(self, data):\n if self.websocket is not None:\n self.websocket.data_received(data)\n else:\n # Pass it to HttpProtocol handler first\n # That will (hopefully) upgrade it to a websocket.\n super().data_received(data)\n\n def eof_received(self) -> Optional[bool]:\n if self.websocket is not None:\n return self.websocket.eof_received()\n else:\n return False\n\n def close(self, timeout: Optional[float] = None):\n # Called by HttpProtocol at the end of connection_task\n # If we've upgraded to websocket, we do our own closing\n if self.websocket is not None:\n # Note, we don't want to use websocket.close()\n # That is used for user's application code to send a\n # websocket close packet. This is different.\n self.websocket.end_connection(1001)\n else:\n super().close()\n\n def close_if_idle(self):\n # Called by Sanic Server when shutting down\n # If we've upgraded to websocket, shut it down\n if self.websocket is not None:\n if self.websocket.ws_proto.state in (CLOSING, CLOSED):\n return True\n elif self.websocket.loop is not None:\n self.websocket.loop.create_task(self.websocket.close(1001))\n else:\n self.websocket.end_connection(1001)\n else:\n return super().close_if_idle()\n\n @staticmethod\n def sanic_request_to_ws_request(request: Request):\n return http11.Request(\n path=request.path,\n headers=WSHeaders(request.headers),\n )\n\n async def websocket_handshake(\n self, request, subprotocols: Optional[Sequence[str]] = None\n ):\n # let the websockets package do the handshake with the client\n try:\n if subprotocols is not None:\n # subprotocols can be a set or frozenset,\n # but ServerProtocol needs a list\n subprotocols = cast(\n Optional[Sequence[Subprotocol]],\n list(\n [\n Subprotocol(subprotocol)\n for subprotocol in subprotocols\n ]\n ),\n )\n ws_proto = ServerProtocol(\n max_size=self.websocket_max_size,\n subprotocols=subprotocols,\n state=OPEN,\n logger=logger,\n )\n resp = ws_proto.accept(self.sanic_request_to_ws_request(request))\n except Exception:\n msg = (\n \"Failed to open a WebSocket connection.\\n\"\n \"See server log for more information.\\n\"\n )\n raise SanicException(msg, status_code=500)\n if 100 <= resp.status_code <= 299:\n first_line = (\n f\"HTTP/1.1 {resp.status_code} {resp.reason_phrase}\\r\\n\"\n ).encode()\n rbody = bytearray(first_line)\n rbody += (\n \"\".join([f\"{k}: {v}\\r\\n\" for k, v in resp.headers.items()])\n ).encode()\n rbody += b\"\\r\\n\"\n if resp.body is not None:\n rbody += resp.body\n rbody += b\"\\r\\n\\r\\n\"\n await super().send(rbody)\n else:\n raise SanicException(resp.body, resp.status_code)\n self.websocket = WebsocketImplProtocol(\n ws_proto,\n ping_interval=self.websocket_ping_interval,\n ping_timeout=self.websocket_ping_timeout,\n close_timeout=self.websocket_timeout,\n )\n loop = (\n request.transport.loop\n if hasattr(request, \"transport\")\n and hasattr(request.transport, \"loop\")\n else None\n )\n await self.websocket.connection_made(self, loop=loop)\n return self.websocket\n", "path": "sanic/server/protocols/websocket_protocol.py"}]} | 2,029 | 218 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.