problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_25581 | rasdani/github-patches | git_diff | benoitc__gunicorn-1578 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bad Request when sending utf-8 encoded http path under python3
This is the sent data:
```
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 8080))
print(s.send('GET /à%20k HTTP/1.0\r\n\r\n'.encode('utf-8')))
print(s.recv(100000).decode('ascii'))
```
This is the response
```
HTTP/1.1 400 Bad Request
Connection: close
Content-Type: text/html
Content-Length: 181
<html>
<head>
<title>Bad Request</title>
</head>
<body>
<h1><p>Bad Request</p></h1>
Invalid HTTP Version 'Invalid HTTP Version: '%20k HTTP/1.0''
</body>
</html>
```
This is because the request line is first decoded as latin1 using the _compat:bytes_to_str this causes the "à" to be returned as "\xc3\xa0", then the request line is split using line.split(None, 2) which will consider the \xa0 (non breaking space) as whitespace and strip it, thus rendering the request line invalid.
A first attempt would be to use line.split(' ', 2) but then the split will no longer eat up all consecutive whitespaces and may introduce other bugs.
I'm not sure what would be the best solution here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gunicorn/http/message.py`
Content:
```
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 import re
7 import socket
8 from errno import ENOTCONN
9
10 from gunicorn._compat import bytes_to_str
11 from gunicorn.http.unreader import SocketUnreader
12 from gunicorn.http.body import ChunkedReader, LengthReader, EOFReader, Body
13 from gunicorn.http.errors import (InvalidHeader, InvalidHeaderName, NoMoreData,
14 InvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion,
15 LimitRequestLine, LimitRequestHeaders)
16 from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest
17 from gunicorn.six import BytesIO
18 from gunicorn._compat import urlsplit
19
20 MAX_REQUEST_LINE = 8190
21 MAX_HEADERS = 32768
22 DEFAULT_MAX_HEADERFIELD_SIZE = 8190
23
24 HEADER_RE = re.compile("[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\\\"]")
25 METH_RE = re.compile(r"[A-Z0-9$-_.]{3,20}")
26 VERSION_RE = re.compile(r"HTTP/(\d+)\.(\d+)")
27
28
29 class Message(object):
30 def __init__(self, cfg, unreader):
31 self.cfg = cfg
32 self.unreader = unreader
33 self.version = None
34 self.headers = []
35 self.trailers = []
36 self.body = None
37
38 # set headers limits
39 self.limit_request_fields = cfg.limit_request_fields
40 if (self.limit_request_fields <= 0
41 or self.limit_request_fields > MAX_HEADERS):
42 self.limit_request_fields = MAX_HEADERS
43 self.limit_request_field_size = cfg.limit_request_field_size
44 if self.limit_request_field_size < 0:
45 self.limit_request_field_size = DEFAULT_MAX_HEADERFIELD_SIZE
46
47 # set max header buffer size
48 max_header_field_size = self.limit_request_field_size or DEFAULT_MAX_HEADERFIELD_SIZE
49 self.max_buffer_headers = self.limit_request_fields * \
50 (max_header_field_size + 2) + 4
51
52 unused = self.parse(self.unreader)
53 self.unreader.unread(unused)
54 self.set_body_reader()
55
56 def parse(self, unreader):
57 raise NotImplementedError()
58
59 def parse_headers(self, data):
60 headers = []
61
62 # Split lines on \r\n keeping the \r\n on each line
63 lines = [bytes_to_str(line) + "\r\n" for line in data.split(b"\r\n")]
64
65 # Parse headers into key/value pairs paying attention
66 # to continuation lines.
67 while lines:
68 if len(headers) >= self.limit_request_fields:
69 raise LimitRequestHeaders("limit request headers fields")
70
71 # Parse initial header name : value pair.
72 curr = lines.pop(0)
73 header_length = len(curr)
74 if curr.find(":") < 0:
75 raise InvalidHeader(curr.strip())
76 name, value = curr.split(":", 1)
77 name = name.rstrip(" \t").upper()
78 if HEADER_RE.search(name):
79 raise InvalidHeaderName(name)
80
81 name, value = name.strip(), [value.lstrip()]
82
83 # Consume value continuation lines
84 while lines and lines[0].startswith((" ", "\t")):
85 curr = lines.pop(0)
86 header_length += len(curr)
87 if header_length > self.limit_request_field_size > 0:
88 raise LimitRequestHeaders("limit request headers "
89 + "fields size")
90 value.append(curr)
91 value = ''.join(value).rstrip()
92
93 if header_length > self.limit_request_field_size > 0:
94 raise LimitRequestHeaders("limit request headers fields size")
95 headers.append((name, value))
96 return headers
97
98 def set_body_reader(self):
99 chunked = False
100 content_length = None
101 for (name, value) in self.headers:
102 if name == "CONTENT-LENGTH":
103 content_length = value
104 elif name == "TRANSFER-ENCODING":
105 chunked = value.lower() == "chunked"
106 elif name == "SEC-WEBSOCKET-KEY1":
107 content_length = 8
108
109 if chunked:
110 self.body = Body(ChunkedReader(self, self.unreader))
111 elif content_length is not None:
112 try:
113 content_length = int(content_length)
114 except ValueError:
115 raise InvalidHeader("CONTENT-LENGTH", req=self)
116
117 if content_length < 0:
118 raise InvalidHeader("CONTENT-LENGTH", req=self)
119
120 self.body = Body(LengthReader(self.unreader, content_length))
121 else:
122 self.body = Body(EOFReader(self.unreader))
123
124 def should_close(self):
125 for (h, v) in self.headers:
126 if h == "CONNECTION":
127 v = v.lower().strip()
128 if v == "close":
129 return True
130 elif v == "keep-alive":
131 return False
132 break
133 return self.version <= (1, 0)
134
135
136 class Request(Message):
137 def __init__(self, cfg, unreader, req_number=1):
138 self.method = None
139 self.uri = None
140 self.path = None
141 self.query = None
142 self.fragment = None
143
144 # get max request line size
145 self.limit_request_line = cfg.limit_request_line
146 if (self.limit_request_line < 0
147 or self.limit_request_line >= MAX_REQUEST_LINE):
148 self.limit_request_line = MAX_REQUEST_LINE
149
150 self.req_number = req_number
151 self.proxy_protocol_info = None
152 super(Request, self).__init__(cfg, unreader)
153
154 def get_data(self, unreader, buf, stop=False):
155 data = unreader.read()
156 if not data:
157 if stop:
158 raise StopIteration()
159 raise NoMoreData(buf.getvalue())
160 buf.write(data)
161
162 def parse(self, unreader):
163 buf = BytesIO()
164 self.get_data(unreader, buf, stop=True)
165
166 # get request line
167 line, rbuf = self.read_line(unreader, buf, self.limit_request_line)
168
169 # proxy protocol
170 if self.proxy_protocol(bytes_to_str(line)):
171 # get next request line
172 buf = BytesIO()
173 buf.write(rbuf)
174 line, rbuf = self.read_line(unreader, buf, self.limit_request_line)
175
176 self.parse_request_line(bytes_to_str(line))
177 buf = BytesIO()
178 buf.write(rbuf)
179
180 # Headers
181 data = buf.getvalue()
182 idx = data.find(b"\r\n\r\n")
183
184 done = data[:2] == b"\r\n"
185 while True:
186 idx = data.find(b"\r\n\r\n")
187 done = data[:2] == b"\r\n"
188
189 if idx < 0 and not done:
190 self.get_data(unreader, buf)
191 data = buf.getvalue()
192 if len(data) > self.max_buffer_headers:
193 raise LimitRequestHeaders("max buffer headers")
194 else:
195 break
196
197 if done:
198 self.unreader.unread(data[2:])
199 return b""
200
201 self.headers = self.parse_headers(data[:idx])
202
203 ret = data[idx + 4:]
204 buf = BytesIO()
205 return ret
206
207 def read_line(self, unreader, buf, limit=0):
208 data = buf.getvalue()
209
210 while True:
211 idx = data.find(b"\r\n")
212 if idx >= 0:
213 # check if the request line is too large
214 if idx > limit > 0:
215 raise LimitRequestLine(idx, limit)
216 break
217 elif len(data) - 2 > limit > 0:
218 raise LimitRequestLine(len(data), limit)
219 self.get_data(unreader, buf)
220 data = buf.getvalue()
221
222 return (data[:idx], # request line,
223 data[idx + 2:]) # residue in the buffer, skip \r\n
224
225 def proxy_protocol(self, line):
226 """\
227 Detect, check and parse proxy protocol.
228
229 :raises: ForbiddenProxyRequest, InvalidProxyLine.
230 :return: True for proxy protocol line else False
231 """
232 if not self.cfg.proxy_protocol:
233 return False
234
235 if self.req_number != 1:
236 return False
237
238 if not line.startswith("PROXY"):
239 return False
240
241 self.proxy_protocol_access_check()
242 self.parse_proxy_protocol(line)
243
244 return True
245
246 def proxy_protocol_access_check(self):
247 # check in allow list
248 if isinstance(self.unreader, SocketUnreader):
249 try:
250 remote_host = self.unreader.sock.getpeername()[0]
251 except socket.error as e:
252 if e.args[0] == ENOTCONN:
253 raise ForbiddenProxyRequest("UNKNOW")
254 raise
255 if ("*" not in self.cfg.proxy_allow_ips and
256 remote_host not in self.cfg.proxy_allow_ips):
257 raise ForbiddenProxyRequest(remote_host)
258
259 def parse_proxy_protocol(self, line):
260 bits = line.split()
261
262 if len(bits) != 6:
263 raise InvalidProxyLine(line)
264
265 # Extract data
266 proto = bits[1]
267 s_addr = bits[2]
268 d_addr = bits[3]
269
270 # Validation
271 if proto not in ["TCP4", "TCP6"]:
272 raise InvalidProxyLine("protocol '%s' not supported" % proto)
273 if proto == "TCP4":
274 try:
275 socket.inet_pton(socket.AF_INET, s_addr)
276 socket.inet_pton(socket.AF_INET, d_addr)
277 except socket.error:
278 raise InvalidProxyLine(line)
279 elif proto == "TCP6":
280 try:
281 socket.inet_pton(socket.AF_INET6, s_addr)
282 socket.inet_pton(socket.AF_INET6, d_addr)
283 except socket.error:
284 raise InvalidProxyLine(line)
285
286 try:
287 s_port = int(bits[4])
288 d_port = int(bits[5])
289 except ValueError:
290 raise InvalidProxyLine("invalid port %s" % line)
291
292 if not ((0 <= s_port <= 65535) and (0 <= d_port <= 65535)):
293 raise InvalidProxyLine("invalid port %s" % line)
294
295 # Set data
296 self.proxy_protocol_info = {
297 "proxy_protocol": proto,
298 "client_addr": s_addr,
299 "client_port": s_port,
300 "proxy_addr": d_addr,
301 "proxy_port": d_port
302 }
303
304 def parse_request_line(self, line):
305 bits = line.split(None, 2)
306 if len(bits) != 3:
307 raise InvalidRequestLine(line)
308
309 # Method
310 if not METH_RE.match(bits[0]):
311 raise InvalidRequestMethod(bits[0])
312 self.method = bits[0].upper()
313
314 # URI
315 # When the path starts with //, urlsplit considers it as a
316 # relative uri while the RDF says it shouldnt
317 # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
318 # considers it as an absolute url.
319 # fix issue #297
320 if bits[1].startswith("//"):
321 self.uri = bits[1][1:]
322 else:
323 self.uri = bits[1]
324
325 try:
326 parts = urlsplit(self.uri)
327 except ValueError:
328 raise InvalidRequestLine(line)
329 self.path = parts.path or ""
330 self.query = parts.query or ""
331 self.fragment = parts.fragment or ""
332
333 # Version
334 match = VERSION_RE.match(bits[2])
335 if match is None:
336 raise InvalidHTTPVersion(bits[2])
337 self.version = (int(match.group(1)), int(match.group(2)))
338
339 def set_body_reader(self):
340 super(Request, self).set_body_reader()
341 if isinstance(self.body.reader, EOFReader):
342 self.body = Body(LengthReader(self.unreader, 0))
343
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gunicorn/http/message.py b/gunicorn/http/message.py
--- a/gunicorn/http/message.py
+++ b/gunicorn/http/message.py
@@ -173,7 +173,7 @@
buf.write(rbuf)
line, rbuf = self.read_line(unreader, buf, self.limit_request_line)
- self.parse_request_line(bytes_to_str(line))
+ self.parse_request_line(line)
buf = BytesIO()
buf.write(rbuf)
@@ -301,10 +301,10 @@
"proxy_port": d_port
}
- def parse_request_line(self, line):
- bits = line.split(None, 2)
+ def parse_request_line(self, line_bytes):
+ bits = [bytes_to_str(bit) for bit in line_bytes.split(None, 2)]
if len(bits) != 3:
- raise InvalidRequestLine(line)
+ raise InvalidRequestLine(bytes_to_str(line_bytes))
# Method
if not METH_RE.match(bits[0]):
@@ -325,7 +325,7 @@
try:
parts = urlsplit(self.uri)
except ValueError:
- raise InvalidRequestLine(line)
+ raise InvalidRequestLine(bytes_to_str(line_bytes))
self.path = parts.path or ""
self.query = parts.query or ""
self.fragment = parts.fragment or ""
| {"golden_diff": "diff --git a/gunicorn/http/message.py b/gunicorn/http/message.py\n--- a/gunicorn/http/message.py\n+++ b/gunicorn/http/message.py\n@@ -173,7 +173,7 @@\n buf.write(rbuf)\n line, rbuf = self.read_line(unreader, buf, self.limit_request_line)\n \n- self.parse_request_line(bytes_to_str(line))\n+ self.parse_request_line(line)\n buf = BytesIO()\n buf.write(rbuf)\n \n@@ -301,10 +301,10 @@\n \"proxy_port\": d_port\n }\n \n- def parse_request_line(self, line):\n- bits = line.split(None, 2)\n+ def parse_request_line(self, line_bytes):\n+ bits = [bytes_to_str(bit) for bit in line_bytes.split(None, 2)]\n if len(bits) != 3:\n- raise InvalidRequestLine(line)\n+ raise InvalidRequestLine(bytes_to_str(line_bytes))\n \n # Method\n if not METH_RE.match(bits[0]):\n@@ -325,7 +325,7 @@\n try:\n parts = urlsplit(self.uri)\n except ValueError:\n- raise InvalidRequestLine(line)\n+ raise InvalidRequestLine(bytes_to_str(line_bytes))\n self.path = parts.path or \"\"\n self.query = parts.query or \"\"\n self.fragment = parts.fragment or \"\"\n", "issue": "Bad Request when sending utf-8 encoded http path under python3\nThis is the sent data:\r\n```\r\nimport socket\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.connect(('127.0.0.1', 8080))\r\nprint(s.send('GET /\u00e0%20k HTTP/1.0\\r\\n\\r\\n'.encode('utf-8')))\r\nprint(s.recv(100000).decode('ascii'))\r\n```\r\n\r\nThis is the response\r\n```\r\nHTTP/1.1 400 Bad Request\r\nConnection: close\r\nContent-Type: text/html\r\nContent-Length: 181\r\n\r\n<html>\r\n <head>\r\n <title>Bad Request</title>\r\n </head>\r\n <body>\r\n <h1><p>Bad Request</p></h1>\r\n Invalid HTTP Version 'Invalid HTTP Version: '%20k HTTP/1.0''\r\n </body>\r\n</html>\r\n```\r\n\r\nThis is because the request line is first decoded as latin1 using the _compat:bytes_to_str this causes the \"\u00e0\" to be returned as \"\\xc3\\xa0\", then the request line is split using line.split(None, 2) which will consider the \\xa0 (non breaking space) as whitespace and strip it, thus rendering the request line invalid.\r\n\r\nA first attempt would be to use line.split(' ', 2) but then the split will no longer eat up all consecutive whitespaces and may introduce other bugs.\r\n\r\nI'm not sure what would be the best solution here.\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport re\nimport socket\nfrom errno import ENOTCONN\n\nfrom gunicorn._compat import bytes_to_str\nfrom gunicorn.http.unreader import SocketUnreader\nfrom gunicorn.http.body import ChunkedReader, LengthReader, EOFReader, Body\nfrom gunicorn.http.errors import (InvalidHeader, InvalidHeaderName, NoMoreData,\n InvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion,\n LimitRequestLine, LimitRequestHeaders)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.six import BytesIO\nfrom gunicorn._compat import urlsplit\n\nMAX_REQUEST_LINE = 8190\nMAX_HEADERS = 32768\nDEFAULT_MAX_HEADERFIELD_SIZE = 8190\n\nHEADER_RE = re.compile(\"[\\x00-\\x1F\\x7F()<>@,;:\\[\\]={} \\t\\\\\\\\\\\"]\")\nMETH_RE = re.compile(r\"[A-Z0-9$-_.]{3,20}\")\nVERSION_RE = re.compile(r\"HTTP/(\\d+)\\.(\\d+)\")\n\n\nclass Message(object):\n def __init__(self, cfg, unreader):\n self.cfg = cfg\n self.unreader = unreader\n self.version = None\n self.headers = []\n self.trailers = []\n self.body = None\n\n # set headers limits\n self.limit_request_fields = cfg.limit_request_fields\n if (self.limit_request_fields <= 0\n or self.limit_request_fields > MAX_HEADERS):\n self.limit_request_fields = MAX_HEADERS\n self.limit_request_field_size = cfg.limit_request_field_size\n if self.limit_request_field_size < 0:\n self.limit_request_field_size = DEFAULT_MAX_HEADERFIELD_SIZE\n\n # set max header buffer size\n max_header_field_size = self.limit_request_field_size or DEFAULT_MAX_HEADERFIELD_SIZE\n self.max_buffer_headers = self.limit_request_fields * \\\n (max_header_field_size + 2) + 4\n\n unused = self.parse(self.unreader)\n self.unreader.unread(unused)\n self.set_body_reader()\n\n def parse(self, unreader):\n raise NotImplementedError()\n\n def parse_headers(self, data):\n headers = []\n\n # Split lines on \\r\\n keeping the \\r\\n on each line\n lines = [bytes_to_str(line) + \"\\r\\n\" for line in data.split(b\"\\r\\n\")]\n\n # Parse headers into key/value pairs paying attention\n # to continuation lines.\n while lines:\n if len(headers) >= self.limit_request_fields:\n raise LimitRequestHeaders(\"limit request headers fields\")\n\n # Parse initial header name : value pair.\n curr = lines.pop(0)\n header_length = len(curr)\n if curr.find(\":\") < 0:\n raise InvalidHeader(curr.strip())\n name, value = curr.split(\":\", 1)\n name = name.rstrip(\" \\t\").upper()\n if HEADER_RE.search(name):\n raise InvalidHeaderName(name)\n\n name, value = name.strip(), [value.lstrip()]\n\n # Consume value continuation lines\n while lines and lines[0].startswith((\" \", \"\\t\")):\n curr = lines.pop(0)\n header_length += len(curr)\n if header_length > self.limit_request_field_size > 0:\n raise LimitRequestHeaders(\"limit request headers \"\n + \"fields size\")\n value.append(curr)\n value = ''.join(value).rstrip()\n\n if header_length > self.limit_request_field_size > 0:\n raise LimitRequestHeaders(\"limit request headers fields size\")\n headers.append((name, value))\n return headers\n\n def set_body_reader(self):\n chunked = False\n content_length = None\n for (name, value) in self.headers:\n if name == \"CONTENT-LENGTH\":\n content_length = value\n elif name == \"TRANSFER-ENCODING\":\n chunked = value.lower() == \"chunked\"\n elif name == \"SEC-WEBSOCKET-KEY1\":\n content_length = 8\n\n if chunked:\n self.body = Body(ChunkedReader(self, self.unreader))\n elif content_length is not None:\n try:\n content_length = int(content_length)\n except ValueError:\n raise InvalidHeader(\"CONTENT-LENGTH\", req=self)\n\n if content_length < 0:\n raise InvalidHeader(\"CONTENT-LENGTH\", req=self)\n\n self.body = Body(LengthReader(self.unreader, content_length))\n else:\n self.body = Body(EOFReader(self.unreader))\n\n def should_close(self):\n for (h, v) in self.headers:\n if h == \"CONNECTION\":\n v = v.lower().strip()\n if v == \"close\":\n return True\n elif v == \"keep-alive\":\n return False\n break\n return self.version <= (1, 0)\n\n\nclass Request(Message):\n def __init__(self, cfg, unreader, req_number=1):\n self.method = None\n self.uri = None\n self.path = None\n self.query = None\n self.fragment = None\n\n # get max request line size\n self.limit_request_line = cfg.limit_request_line\n if (self.limit_request_line < 0\n or self.limit_request_line >= MAX_REQUEST_LINE):\n self.limit_request_line = MAX_REQUEST_LINE\n\n self.req_number = req_number\n self.proxy_protocol_info = None\n super(Request, self).__init__(cfg, unreader)\n\n def get_data(self, unreader, buf, stop=False):\n data = unreader.read()\n if not data:\n if stop:\n raise StopIteration()\n raise NoMoreData(buf.getvalue())\n buf.write(data)\n\n def parse(self, unreader):\n buf = BytesIO()\n self.get_data(unreader, buf, stop=True)\n\n # get request line\n line, rbuf = self.read_line(unreader, buf, self.limit_request_line)\n\n # proxy protocol\n if self.proxy_protocol(bytes_to_str(line)):\n # get next request line\n buf = BytesIO()\n buf.write(rbuf)\n line, rbuf = self.read_line(unreader, buf, self.limit_request_line)\n\n self.parse_request_line(bytes_to_str(line))\n buf = BytesIO()\n buf.write(rbuf)\n\n # Headers\n data = buf.getvalue()\n idx = data.find(b\"\\r\\n\\r\\n\")\n\n done = data[:2] == b\"\\r\\n\"\n while True:\n idx = data.find(b\"\\r\\n\\r\\n\")\n done = data[:2] == b\"\\r\\n\"\n\n if idx < 0 and not done:\n self.get_data(unreader, buf)\n data = buf.getvalue()\n if len(data) > self.max_buffer_headers:\n raise LimitRequestHeaders(\"max buffer headers\")\n else:\n break\n\n if done:\n self.unreader.unread(data[2:])\n return b\"\"\n\n self.headers = self.parse_headers(data[:idx])\n\n ret = data[idx + 4:]\n buf = BytesIO()\n return ret\n\n def read_line(self, unreader, buf, limit=0):\n data = buf.getvalue()\n\n while True:\n idx = data.find(b\"\\r\\n\")\n if idx >= 0:\n # check if the request line is too large\n if idx > limit > 0:\n raise LimitRequestLine(idx, limit)\n break\n elif len(data) - 2 > limit > 0:\n raise LimitRequestLine(len(data), limit)\n self.get_data(unreader, buf)\n data = buf.getvalue()\n\n return (data[:idx], # request line,\n data[idx + 2:]) # residue in the buffer, skip \\r\\n\n\n def proxy_protocol(self, line):\n \"\"\"\\\n Detect, check and parse proxy protocol.\n\n :raises: ForbiddenProxyRequest, InvalidProxyLine.\n :return: True for proxy protocol line else False\n \"\"\"\n if not self.cfg.proxy_protocol:\n return False\n\n if self.req_number != 1:\n return False\n\n if not line.startswith(\"PROXY\"):\n return False\n\n self.proxy_protocol_access_check()\n self.parse_proxy_protocol(line)\n\n return True\n\n def proxy_protocol_access_check(self):\n # check in allow list\n if isinstance(self.unreader, SocketUnreader):\n try:\n remote_host = self.unreader.sock.getpeername()[0]\n except socket.error as e:\n if e.args[0] == ENOTCONN:\n raise ForbiddenProxyRequest(\"UNKNOW\")\n raise\n if (\"*\" not in self.cfg.proxy_allow_ips and\n remote_host not in self.cfg.proxy_allow_ips):\n raise ForbiddenProxyRequest(remote_host)\n\n def parse_proxy_protocol(self, line):\n bits = line.split()\n\n if len(bits) != 6:\n raise InvalidProxyLine(line)\n\n # Extract data\n proto = bits[1]\n s_addr = bits[2]\n d_addr = bits[3]\n\n # Validation\n if proto not in [\"TCP4\", \"TCP6\"]:\n raise InvalidProxyLine(\"protocol '%s' not supported\" % proto)\n if proto == \"TCP4\":\n try:\n socket.inet_pton(socket.AF_INET, s_addr)\n socket.inet_pton(socket.AF_INET, d_addr)\n except socket.error:\n raise InvalidProxyLine(line)\n elif proto == \"TCP6\":\n try:\n socket.inet_pton(socket.AF_INET6, s_addr)\n socket.inet_pton(socket.AF_INET6, d_addr)\n except socket.error:\n raise InvalidProxyLine(line)\n\n try:\n s_port = int(bits[4])\n d_port = int(bits[5])\n except ValueError:\n raise InvalidProxyLine(\"invalid port %s\" % line)\n\n if not ((0 <= s_port <= 65535) and (0 <= d_port <= 65535)):\n raise InvalidProxyLine(\"invalid port %s\" % line)\n\n # Set data\n self.proxy_protocol_info = {\n \"proxy_protocol\": proto,\n \"client_addr\": s_addr,\n \"client_port\": s_port,\n \"proxy_addr\": d_addr,\n \"proxy_port\": d_port\n }\n\n def parse_request_line(self, line):\n bits = line.split(None, 2)\n if len(bits) != 3:\n raise InvalidRequestLine(line)\n\n # Method\n if not METH_RE.match(bits[0]):\n raise InvalidRequestMethod(bits[0])\n self.method = bits[0].upper()\n\n # URI\n # When the path starts with //, urlsplit considers it as a\n # relative uri while the RDF says it shouldnt\n # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2\n # considers it as an absolute url.\n # fix issue #297\n if bits[1].startswith(\"//\"):\n self.uri = bits[1][1:]\n else:\n self.uri = bits[1]\n\n try:\n parts = urlsplit(self.uri)\n except ValueError:\n raise InvalidRequestLine(line)\n self.path = parts.path or \"\"\n self.query = parts.query or \"\"\n self.fragment = parts.fragment or \"\"\n\n # Version\n match = VERSION_RE.match(bits[2])\n if match is None:\n raise InvalidHTTPVersion(bits[2])\n self.version = (int(match.group(1)), int(match.group(2)))\n\n def set_body_reader(self):\n super(Request, self).set_body_reader()\n if isinstance(self.body.reader, EOFReader):\n self.body = Body(LengthReader(self.unreader, 0))\n", "path": "gunicorn/http/message.py"}], "after_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport re\nimport socket\nfrom errno import ENOTCONN\n\nfrom gunicorn._compat import bytes_to_str\nfrom gunicorn.http.unreader import SocketUnreader\nfrom gunicorn.http.body import ChunkedReader, LengthReader, EOFReader, Body\nfrom gunicorn.http.errors import (InvalidHeader, InvalidHeaderName, NoMoreData,\n InvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion,\n LimitRequestLine, LimitRequestHeaders)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.six import BytesIO\nfrom gunicorn._compat import urlsplit\n\nMAX_REQUEST_LINE = 8190\nMAX_HEADERS = 32768\nDEFAULT_MAX_HEADERFIELD_SIZE = 8190\n\nHEADER_RE = re.compile(\"[\\x00-\\x1F\\x7F()<>@,;:\\[\\]={} \\t\\\\\\\\\\\"]\")\nMETH_RE = re.compile(r\"[A-Z0-9$-_.]{3,20}\")\nVERSION_RE = re.compile(r\"HTTP/(\\d+)\\.(\\d+)\")\n\n\nclass Message(object):\n def __init__(self, cfg, unreader):\n self.cfg = cfg\n self.unreader = unreader\n self.version = None\n self.headers = []\n self.trailers = []\n self.body = None\n\n # set headers limits\n self.limit_request_fields = cfg.limit_request_fields\n if (self.limit_request_fields <= 0\n or self.limit_request_fields > MAX_HEADERS):\n self.limit_request_fields = MAX_HEADERS\n self.limit_request_field_size = cfg.limit_request_field_size\n if self.limit_request_field_size < 0:\n self.limit_request_field_size = DEFAULT_MAX_HEADERFIELD_SIZE\n\n # set max header buffer size\n max_header_field_size = self.limit_request_field_size or DEFAULT_MAX_HEADERFIELD_SIZE\n self.max_buffer_headers = self.limit_request_fields * \\\n (max_header_field_size + 2) + 4\n\n unused = self.parse(self.unreader)\n self.unreader.unread(unused)\n self.set_body_reader()\n\n def parse(self, unreader):\n raise NotImplementedError()\n\n def parse_headers(self, data):\n headers = []\n\n # Split lines on \\r\\n keeping the \\r\\n on each line\n lines = [bytes_to_str(line) + \"\\r\\n\" for line in data.split(b\"\\r\\n\")]\n\n # Parse headers into key/value pairs paying attention\n # to continuation lines.\n while lines:\n if len(headers) >= self.limit_request_fields:\n raise LimitRequestHeaders(\"limit request headers fields\")\n\n # Parse initial header name : value pair.\n curr = lines.pop(0)\n header_length = len(curr)\n if curr.find(\":\") < 0:\n raise InvalidHeader(curr.strip())\n name, value = curr.split(\":\", 1)\n name = name.rstrip(\" \\t\").upper()\n if HEADER_RE.search(name):\n raise InvalidHeaderName(name)\n\n name, value = name.strip(), [value.lstrip()]\n\n # Consume value continuation lines\n while lines and lines[0].startswith((\" \", \"\\t\")):\n curr = lines.pop(0)\n header_length += len(curr)\n if header_length > self.limit_request_field_size > 0:\n raise LimitRequestHeaders(\"limit request headers \"\n + \"fields size\")\n value.append(curr)\n value = ''.join(value).rstrip()\n\n if header_length > self.limit_request_field_size > 0:\n raise LimitRequestHeaders(\"limit request headers fields size\")\n headers.append((name, value))\n return headers\n\n def set_body_reader(self):\n chunked = False\n content_length = None\n for (name, value) in self.headers:\n if name == \"CONTENT-LENGTH\":\n content_length = value\n elif name == \"TRANSFER-ENCODING\":\n chunked = value.lower() == \"chunked\"\n elif name == \"SEC-WEBSOCKET-KEY1\":\n content_length = 8\n\n if chunked:\n self.body = Body(ChunkedReader(self, self.unreader))\n elif content_length is not None:\n try:\n content_length = int(content_length)\n except ValueError:\n raise InvalidHeader(\"CONTENT-LENGTH\", req=self)\n\n if content_length < 0:\n raise InvalidHeader(\"CONTENT-LENGTH\", req=self)\n\n self.body = Body(LengthReader(self.unreader, content_length))\n else:\n self.body = Body(EOFReader(self.unreader))\n\n def should_close(self):\n for (h, v) in self.headers:\n if h == \"CONNECTION\":\n v = v.lower().strip()\n if v == \"close\":\n return True\n elif v == \"keep-alive\":\n return False\n break\n return self.version <= (1, 0)\n\n\nclass Request(Message):\n def __init__(self, cfg, unreader, req_number=1):\n self.method = None\n self.uri = None\n self.path = None\n self.query = None\n self.fragment = None\n\n # get max request line size\n self.limit_request_line = cfg.limit_request_line\n if (self.limit_request_line < 0\n or self.limit_request_line >= MAX_REQUEST_LINE):\n self.limit_request_line = MAX_REQUEST_LINE\n\n self.req_number = req_number\n self.proxy_protocol_info = None\n super(Request, self).__init__(cfg, unreader)\n\n def get_data(self, unreader, buf, stop=False):\n data = unreader.read()\n if not data:\n if stop:\n raise StopIteration()\n raise NoMoreData(buf.getvalue())\n buf.write(data)\n\n def parse(self, unreader):\n buf = BytesIO()\n self.get_data(unreader, buf, stop=True)\n\n # get request line\n line, rbuf = self.read_line(unreader, buf, self.limit_request_line)\n\n # proxy protocol\n if self.proxy_protocol(bytes_to_str(line)):\n # get next request line\n buf = BytesIO()\n buf.write(rbuf)\n line, rbuf = self.read_line(unreader, buf, self.limit_request_line)\n\n self.parse_request_line(line)\n buf = BytesIO()\n buf.write(rbuf)\n\n # Headers\n data = buf.getvalue()\n idx = data.find(b\"\\r\\n\\r\\n\")\n\n done = data[:2] == b\"\\r\\n\"\n while True:\n idx = data.find(b\"\\r\\n\\r\\n\")\n done = data[:2] == b\"\\r\\n\"\n\n if idx < 0 and not done:\n self.get_data(unreader, buf)\n data = buf.getvalue()\n if len(data) > self.max_buffer_headers:\n raise LimitRequestHeaders(\"max buffer headers\")\n else:\n break\n\n if done:\n self.unreader.unread(data[2:])\n return b\"\"\n\n self.headers = self.parse_headers(data[:idx])\n\n ret = data[idx + 4:]\n buf = BytesIO()\n return ret\n\n def read_line(self, unreader, buf, limit=0):\n data = buf.getvalue()\n\n while True:\n idx = data.find(b\"\\r\\n\")\n if idx >= 0:\n # check if the request line is too large\n if idx > limit > 0:\n raise LimitRequestLine(idx, limit)\n break\n elif len(data) - 2 > limit > 0:\n raise LimitRequestLine(len(data), limit)\n self.get_data(unreader, buf)\n data = buf.getvalue()\n\n return (data[:idx], # request line,\n data[idx + 2:]) # residue in the buffer, skip \\r\\n\n\n def proxy_protocol(self, line):\n \"\"\"\\\n Detect, check and parse proxy protocol.\n\n :raises: ForbiddenProxyRequest, InvalidProxyLine.\n :return: True for proxy protocol line else False\n \"\"\"\n if not self.cfg.proxy_protocol:\n return False\n\n if self.req_number != 1:\n return False\n\n if not line.startswith(\"PROXY\"):\n return False\n\n self.proxy_protocol_access_check()\n self.parse_proxy_protocol(line)\n\n return True\n\n def proxy_protocol_access_check(self):\n # check in allow list\n if isinstance(self.unreader, SocketUnreader):\n try:\n remote_host = self.unreader.sock.getpeername()[0]\n except socket.error as e:\n if e.args[0] == ENOTCONN:\n raise ForbiddenProxyRequest(\"UNKNOW\")\n raise\n if (\"*\" not in self.cfg.proxy_allow_ips and\n remote_host not in self.cfg.proxy_allow_ips):\n raise ForbiddenProxyRequest(remote_host)\n\n def parse_proxy_protocol(self, line):\n bits = line.split()\n\n if len(bits) != 6:\n raise InvalidProxyLine(line)\n\n # Extract data\n proto = bits[1]\n s_addr = bits[2]\n d_addr = bits[3]\n\n # Validation\n if proto not in [\"TCP4\", \"TCP6\"]:\n raise InvalidProxyLine(\"protocol '%s' not supported\" % proto)\n if proto == \"TCP4\":\n try:\n socket.inet_pton(socket.AF_INET, s_addr)\n socket.inet_pton(socket.AF_INET, d_addr)\n except socket.error:\n raise InvalidProxyLine(line)\n elif proto == \"TCP6\":\n try:\n socket.inet_pton(socket.AF_INET6, s_addr)\n socket.inet_pton(socket.AF_INET6, d_addr)\n except socket.error:\n raise InvalidProxyLine(line)\n\n try:\n s_port = int(bits[4])\n d_port = int(bits[5])\n except ValueError:\n raise InvalidProxyLine(\"invalid port %s\" % line)\n\n if not ((0 <= s_port <= 65535) and (0 <= d_port <= 65535)):\n raise InvalidProxyLine(\"invalid port %s\" % line)\n\n # Set data\n self.proxy_protocol_info = {\n \"proxy_protocol\": proto,\n \"client_addr\": s_addr,\n \"client_port\": s_port,\n \"proxy_addr\": d_addr,\n \"proxy_port\": d_port\n }\n\n def parse_request_line(self, line_bytes):\n bits = [bytes_to_str(bit) for bit in line_bytes.split(None, 2)]\n if len(bits) != 3:\n raise InvalidRequestLine(bytes_to_str(line_bytes))\n\n # Method\n if not METH_RE.match(bits[0]):\n raise InvalidRequestMethod(bits[0])\n self.method = bits[0].upper()\n\n # URI\n # When the path starts with //, urlsplit considers it as a\n # relative uri while the RDF says it shouldnt\n # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2\n # considers it as an absolute url.\n # fix issue #297\n if bits[1].startswith(\"//\"):\n self.uri = bits[1][1:]\n else:\n self.uri = bits[1]\n\n try:\n parts = urlsplit(self.uri)\n except ValueError:\n raise InvalidRequestLine(bytes_to_str(line_bytes))\n self.path = parts.path or \"\"\n self.query = parts.query or \"\"\n self.fragment = parts.fragment or \"\"\n\n # Version\n match = VERSION_RE.match(bits[2])\n if match is None:\n raise InvalidHTTPVersion(bits[2])\n self.version = (int(match.group(1)), int(match.group(2)))\n\n def set_body_reader(self):\n super(Request, self).set_body_reader()\n if isinstance(self.body.reader, EOFReader):\n self.body = Body(LengthReader(self.unreader, 0))\n", "path": "gunicorn/http/message.py"}]} | 4,081 | 301 |
gh_patches_debug_31050 | rasdani/github-patches | git_diff | litestar-org__litestar-2204 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: pydantic validations aren't being handled by DTOs
### Description
In the documentation of Litestar we have:
> When a value fails pydantic validation, the result will be a ValidationException with the extra key set to the pydantic validation errors. Thus, this data will be made available for the API consumers by default.
But when combined with DTO's in a post request, if a pydantic validation fails the exception is not handled and the client receives an InternalServerError (500) instead of a BadRequest (400):
```
__pydantic_self__.__pydantic_validator__.validate_python(data, self_instance=__pydantic_self__)
pydantic_core._pydantic_core.ValidationError: 1 validation error for User
name
String should have at most 2 characters [type=string_too_long, input_value='abc', input_type=str]
For further information visit https://errors.pydantic.dev/2.2/v/string_too_long
Status code: 500
```
If the validation isn't handled by pydantic (or at least it isn't handled **first** by pydantic) it works, for example an Enum validation returns a proper 400 status code because `msgspec` catches it first:
```python
from enum import Enum
class NameEnum(str, Enum):
a = "A"
b = "B"
# (replace the User class of the example with this)
class User(BaseModel):
name: NameEnum
```
Output for incorrect Enum:
```
File ".../.venv/lib64/python3.11/site-packages/litestar/serialization/msgspec_hooks.py", line 191, in decode_json
raise SerializationException(str(msgspec_error)) from msgspec_error
litestar.exceptions.base_exceptions.SerializationException: Invalid enum value 'abc' - at `$.name`
...
File ".../.venv/lib64/python3.11/site-packages/litestar/routes/http.py", line 186, in _get_response_data
raise ClientException(str(e)) from e
litestar.exceptions.http_exceptions.ClientException: 400: Invalid enum value 'abc' - at `$.name`
```
### URL to code causing the issue
_No response_
### MCVE
```python
from litestar import Litestar, post
from litestar.contrib.pydantic import PydanticDTO
from litestar.testing import TestClient
from pydantic import BaseModel, Field
class User(BaseModel):
name: str = Field(max_length=2)
UserDTO = PydanticDTO[User]
@post("/user", dto=UserDTO, sync_to_thread=False)
def create_user(data: User) -> User:
return data
with TestClient(Litestar([create_user], debug=True)) as client:
response = client.post("/user", json={"name": "abc"})
print(response.text)
print(f"Status code: {response.status_code}")
assert response.status_code == 201
```
### Steps to reproduce
```bash
1. Execute the MCVE
```
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
2.0.0rc1
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
## Funding
* If you would like to see an issue prioritized, make a pledge towards it!
* We receive the pledge once the issue is completed & verified
<a href="https://polar.sh/litestar-org/litestar/issues/2190">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2190/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2190/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/contrib/pydantic/pydantic_dto_factory.py`
Content:
```
1 from __future__ import annotations
2
3 from dataclasses import replace
4 from typing import TYPE_CHECKING, Collection, Generic, TypeVar
5
6 from litestar.dto.base_dto import AbstractDTO
7 from litestar.dto.data_structures import DTOFieldDefinition
8 from litestar.dto.field import DTO_FIELD_META_KEY, DTOField
9 from litestar.exceptions import MissingDependencyException
10 from litestar.types.empty import Empty
11
12 if TYPE_CHECKING:
13 from typing import Generator
14
15 from litestar.typing import FieldDefinition
16
17
18 try:
19 import pydantic
20
21 if pydantic.VERSION.startswith("2"):
22 from pydantic_core import PydanticUndefined
23 else: # pragma: no cover
24 from pydantic.fields import Undefined as PydanticUndefined # type: ignore
25 except ImportError as e:
26 raise MissingDependencyException("pydantic") from e
27
28 __all__ = ("PydanticDTO",)
29
30 T = TypeVar("T", bound="pydantic.BaseModel | Collection[pydantic.BaseModel]")
31
32
33 class PydanticDTO(AbstractDTO[T], Generic[T]):
34 """Support for domain modelling with Pydantic."""
35
36 @classmethod
37 def generate_field_definitions(
38 cls, model_type: type[pydantic.BaseModel]
39 ) -> Generator[DTOFieldDefinition, None, None]:
40 model_field_definitions = cls.get_model_type_hints(model_type)
41
42 if pydantic.VERSION.startswith("1"): # pragma: no cover
43 model_fields: dict[str, pydantic.fields.FieldInfo] = {k: model_field.field_info for k, model_field in model_type.__fields__.items()} # type: ignore
44 else:
45 model_fields = dict(model_type.model_fields)
46
47 for field_name, field_info in model_fields.items():
48 field_definition = model_field_definitions[field_name]
49 dto_field = (field_definition.extra or {}).pop(DTO_FIELD_META_KEY, DTOField())
50
51 if field_info.default is not PydanticUndefined:
52 default = field_info.default
53 elif field_definition.is_optional:
54 default = None
55 else:
56 default = Empty
57
58 yield replace(
59 DTOFieldDefinition.from_field_definition(
60 field_definition=field_definition,
61 dto_field=dto_field,
62 model_name=model_type.__name__,
63 default_factory=field_info.default_factory
64 if field_info.default_factory and field_info.default_factory is not PydanticUndefined # type: ignore[comparison-overlap]
65 else Empty,
66 ),
67 default=default,
68 name=field_name,
69 )
70
71 @classmethod
72 def detect_nested_field(cls, field_definition: FieldDefinition) -> bool:
73 return field_definition.is_subclass_of(pydantic.BaseModel)
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/contrib/pydantic/pydantic_dto_factory.py b/litestar/contrib/pydantic/pydantic_dto_factory.py
--- a/litestar/contrib/pydantic/pydantic_dto_factory.py
+++ b/litestar/contrib/pydantic/pydantic_dto_factory.py
@@ -3,20 +3,23 @@
from dataclasses import replace
from typing import TYPE_CHECKING, Collection, Generic, TypeVar
+from typing_extensions import override
+
from litestar.dto.base_dto import AbstractDTO
from litestar.dto.data_structures import DTOFieldDefinition
from litestar.dto.field import DTO_FIELD_META_KEY, DTOField
-from litestar.exceptions import MissingDependencyException
+from litestar.exceptions import MissingDependencyException, ValidationException
from litestar.types.empty import Empty
if TYPE_CHECKING:
- from typing import Generator
+ from typing import Any, Generator
from litestar.typing import FieldDefinition
try:
import pydantic
+ from pydantic import ValidationError
if pydantic.VERSION.startswith("2"):
from pydantic_core import PydanticUndefined
@@ -33,6 +36,20 @@
class PydanticDTO(AbstractDTO[T], Generic[T]):
"""Support for domain modelling with Pydantic."""
+ @override
+ def decode_builtins(self, value: dict[str, Any]) -> Any:
+ try:
+ return super().decode_builtins(value)
+ except ValidationError as ex:
+ raise ValidationException(extra=ex.errors()) from ex
+
+ @override
+ def decode_bytes(self, value: bytes) -> Any:
+ try:
+ return super().decode_bytes(value)
+ except ValidationError as ex:
+ raise ValidationException(extra=ex.errors()) from ex
+
@classmethod
def generate_field_definitions(
cls, model_type: type[pydantic.BaseModel]
| {"golden_diff": "diff --git a/litestar/contrib/pydantic/pydantic_dto_factory.py b/litestar/contrib/pydantic/pydantic_dto_factory.py\n--- a/litestar/contrib/pydantic/pydantic_dto_factory.py\n+++ b/litestar/contrib/pydantic/pydantic_dto_factory.py\n@@ -3,20 +3,23 @@\n from dataclasses import replace\n from typing import TYPE_CHECKING, Collection, Generic, TypeVar\n \n+from typing_extensions import override\n+\n from litestar.dto.base_dto import AbstractDTO\n from litestar.dto.data_structures import DTOFieldDefinition\n from litestar.dto.field import DTO_FIELD_META_KEY, DTOField\n-from litestar.exceptions import MissingDependencyException\n+from litestar.exceptions import MissingDependencyException, ValidationException\n from litestar.types.empty import Empty\n \n if TYPE_CHECKING:\n- from typing import Generator\n+ from typing import Any, Generator\n \n from litestar.typing import FieldDefinition\n \n \n try:\n import pydantic\n+ from pydantic import ValidationError\n \n if pydantic.VERSION.startswith(\"2\"):\n from pydantic_core import PydanticUndefined\n@@ -33,6 +36,20 @@\n class PydanticDTO(AbstractDTO[T], Generic[T]):\n \"\"\"Support for domain modelling with Pydantic.\"\"\"\n \n+ @override\n+ def decode_builtins(self, value: dict[str, Any]) -> Any:\n+ try:\n+ return super().decode_builtins(value)\n+ except ValidationError as ex:\n+ raise ValidationException(extra=ex.errors()) from ex\n+\n+ @override\n+ def decode_bytes(self, value: bytes) -> Any:\n+ try:\n+ return super().decode_bytes(value)\n+ except ValidationError as ex:\n+ raise ValidationException(extra=ex.errors()) from ex\n+\n @classmethod\n def generate_field_definitions(\n cls, model_type: type[pydantic.BaseModel]\n", "issue": "Bug: pydantic validations aren't being handled by DTOs\n### Description\r\n\r\nIn the documentation of Litestar we have:\r\n\r\n> When a value fails pydantic validation, the result will be a ValidationException with the extra key set to the pydantic validation errors. Thus, this data will be made available for the API consumers by default.\r\n\r\nBut when combined with DTO's in a post request, if a pydantic validation fails the exception is not handled and the client receives an InternalServerError (500) instead of a BadRequest (400):\r\n\r\n\r\n```\r\n __pydantic_self__.__pydantic_validator__.validate_python(data, self_instance=__pydantic_self__)\r\npydantic_core._pydantic_core.ValidationError: 1 validation error for User\r\nname\r\n String should have at most 2 characters [type=string_too_long, input_value='abc', input_type=str]\r\n For further information visit https://errors.pydantic.dev/2.2/v/string_too_long\r\n\r\nStatus code: 500\r\n```\r\n\r\nIf the validation isn't handled by pydantic (or at least it isn't handled **first** by pydantic) it works, for example an Enum validation returns a proper 400 status code because `msgspec` catches it first:\r\n\r\n```python\r\nfrom enum import Enum\r\n\r\nclass NameEnum(str, Enum):\r\n a = \"A\"\r\n b = \"B\"\r\n\r\n# (replace the User class of the example with this)\r\nclass User(BaseModel):\r\n name: NameEnum\r\n```\r\n\r\nOutput for incorrect Enum:\r\n\r\n```\r\n File \".../.venv/lib64/python3.11/site-packages/litestar/serialization/msgspec_hooks.py\", line 191, in decode_json\r\n raise SerializationException(str(msgspec_error)) from msgspec_error\r\nlitestar.exceptions.base_exceptions.SerializationException: Invalid enum value 'abc' - at `$.name`\r\n\r\n\r\n...\r\n\r\n File \".../.venv/lib64/python3.11/site-packages/litestar/routes/http.py\", line 186, in _get_response_data\r\n raise ClientException(str(e)) from e\r\nlitestar.exceptions.http_exceptions.ClientException: 400: Invalid enum value 'abc' - at `$.name`\r\n```\r\n\r\n### URL to code causing the issue\r\n\r\n_No response_\r\n\r\n### MCVE\r\n\r\n```python\r\nfrom litestar import Litestar, post\r\nfrom litestar.contrib.pydantic import PydanticDTO\r\nfrom litestar.testing import TestClient\r\nfrom pydantic import BaseModel, Field\r\n\r\n\r\nclass User(BaseModel):\r\n name: str = Field(max_length=2)\r\n\r\n\r\nUserDTO = PydanticDTO[User]\r\n\r\n\r\n@post(\"/user\", dto=UserDTO, sync_to_thread=False)\r\ndef create_user(data: User) -> User:\r\n return data\r\n\r\n\r\nwith TestClient(Litestar([create_user], debug=True)) as client:\r\n response = client.post(\"/user\", json={\"name\": \"abc\"})\r\n print(response.text)\r\n print(f\"Status code: {response.status_code}\")\r\n assert response.status_code == 201\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n```bash\r\n1. Execute the MCVE\r\n```\r\n\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Litestar Version\r\n\r\n2.0.0rc1\r\n\r\n### Platform\r\n\r\n- [X] Linux\r\n- [ ] Mac\r\n- [ ] Windows\r\n- [ ] Other (Please specify in the description above)\r\n\r\n<!-- POLAR PLEDGE BADGE START -->\r\n---\r\n\r\n## Funding\r\n* If you would like to see an issue prioritized, make a pledge towards it!\r\n* We receive the pledge once the issue is completed & verified\r\n\r\n<a href=\"https://polar.sh/litestar-org/litestar/issues/2190\">\r\n<picture>\r\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/2190/pledge.svg?darkmode=1\">\r\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/2190/pledge.svg\">\r\n</picture>\r\n</a>\r\n<!-- POLAR PLEDGE BADGE END -->\r\n\nStaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import replace\nfrom typing import TYPE_CHECKING, Collection, Generic, TypeVar\n\nfrom litestar.dto.base_dto import AbstractDTO\nfrom litestar.dto.data_structures import DTOFieldDefinition\nfrom litestar.dto.field import DTO_FIELD_META_KEY, DTOField\nfrom litestar.exceptions import MissingDependencyException\nfrom litestar.types.empty import Empty\n\nif TYPE_CHECKING:\n from typing import Generator\n\n from litestar.typing import FieldDefinition\n\n\ntry:\n import pydantic\n\n if pydantic.VERSION.startswith(\"2\"):\n from pydantic_core import PydanticUndefined\n else: # pragma: no cover\n from pydantic.fields import Undefined as PydanticUndefined # type: ignore\nexcept ImportError as e:\n raise MissingDependencyException(\"pydantic\") from e\n\n__all__ = (\"PydanticDTO\",)\n\nT = TypeVar(\"T\", bound=\"pydantic.BaseModel | Collection[pydantic.BaseModel]\")\n\n\nclass PydanticDTO(AbstractDTO[T], Generic[T]):\n \"\"\"Support for domain modelling with Pydantic.\"\"\"\n\n @classmethod\n def generate_field_definitions(\n cls, model_type: type[pydantic.BaseModel]\n ) -> Generator[DTOFieldDefinition, None, None]:\n model_field_definitions = cls.get_model_type_hints(model_type)\n\n if pydantic.VERSION.startswith(\"1\"): # pragma: no cover\n model_fields: dict[str, pydantic.fields.FieldInfo] = {k: model_field.field_info for k, model_field in model_type.__fields__.items()} # type: ignore\n else:\n model_fields = dict(model_type.model_fields)\n\n for field_name, field_info in model_fields.items():\n field_definition = model_field_definitions[field_name]\n dto_field = (field_definition.extra or {}).pop(DTO_FIELD_META_KEY, DTOField())\n\n if field_info.default is not PydanticUndefined:\n default = field_info.default\n elif field_definition.is_optional:\n default = None\n else:\n default = Empty\n\n yield replace(\n DTOFieldDefinition.from_field_definition(\n field_definition=field_definition,\n dto_field=dto_field,\n model_name=model_type.__name__,\n default_factory=field_info.default_factory\n if field_info.default_factory and field_info.default_factory is not PydanticUndefined # type: ignore[comparison-overlap]\n else Empty,\n ),\n default=default,\n name=field_name,\n )\n\n @classmethod\n def detect_nested_field(cls, field_definition: FieldDefinition) -> bool:\n return field_definition.is_subclass_of(pydantic.BaseModel)\n", "path": "litestar/contrib/pydantic/pydantic_dto_factory.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import replace\nfrom typing import TYPE_CHECKING, Collection, Generic, TypeVar\n\nfrom typing_extensions import override\n\nfrom litestar.dto.base_dto import AbstractDTO\nfrom litestar.dto.data_structures import DTOFieldDefinition\nfrom litestar.dto.field import DTO_FIELD_META_KEY, DTOField\nfrom litestar.exceptions import MissingDependencyException, ValidationException\nfrom litestar.types.empty import Empty\n\nif TYPE_CHECKING:\n from typing import Any, Generator\n\n from litestar.typing import FieldDefinition\n\n\ntry:\n import pydantic\n from pydantic import ValidationError\n\n if pydantic.VERSION.startswith(\"2\"):\n from pydantic_core import PydanticUndefined\n else: # pragma: no cover\n from pydantic.fields import Undefined as PydanticUndefined # type: ignore\nexcept ImportError as e:\n raise MissingDependencyException(\"pydantic\") from e\n\n__all__ = (\"PydanticDTO\",)\n\nT = TypeVar(\"T\", bound=\"pydantic.BaseModel | Collection[pydantic.BaseModel]\")\n\n\nclass PydanticDTO(AbstractDTO[T], Generic[T]):\n \"\"\"Support for domain modelling with Pydantic.\"\"\"\n\n @override\n def decode_builtins(self, value: dict[str, Any]) -> Any:\n try:\n return super().decode_builtins(value)\n except ValidationError as ex:\n raise ValidationException(extra=ex.errors()) from ex\n\n @override\n def decode_bytes(self, value: bytes) -> Any:\n try:\n return super().decode_bytes(value)\n except ValidationError as ex:\n raise ValidationException(extra=ex.errors()) from ex\n\n @classmethod\n def generate_field_definitions(\n cls, model_type: type[pydantic.BaseModel]\n ) -> Generator[DTOFieldDefinition, None, None]:\n model_field_definitions = cls.get_model_type_hints(model_type)\n\n if pydantic.VERSION.startswith(\"1\"): # pragma: no cover\n model_fields: dict[str, pydantic.fields.FieldInfo] = {k: model_field.field_info for k, model_field in model_type.__fields__.items()} # type: ignore\n else:\n model_fields = dict(model_type.model_fields)\n\n for field_name, field_info in model_fields.items():\n field_definition = model_field_definitions[field_name]\n dto_field = (field_definition.extra or {}).pop(DTO_FIELD_META_KEY, DTOField())\n\n if field_info.default is not PydanticUndefined:\n default = field_info.default\n elif field_definition.is_optional:\n default = None\n else:\n default = Empty\n\n yield replace(\n DTOFieldDefinition.from_field_definition(\n field_definition=field_definition,\n dto_field=dto_field,\n model_name=model_type.__name__,\n default_factory=field_info.default_factory\n if field_info.default_factory and field_info.default_factory is not PydanticUndefined # type: ignore[comparison-overlap]\n else Empty,\n ),\n default=default,\n name=field_name,\n )\n\n @classmethod\n def detect_nested_field(cls, field_definition: FieldDefinition) -> bool:\n return field_definition.is_subclass_of(pydantic.BaseModel)\n", "path": "litestar/contrib/pydantic/pydantic_dto_factory.py"}]} | 2,047 | 424 |
gh_patches_debug_263 | rasdani/github-patches | git_diff | numpy__numpy-3235 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
2to3 run `itertools_imports` fixer
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/py3tool.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- python -*-
3 """
4 %prog SUBMODULE...
5
6 Hack to pipe submodules of Numpy through 2to3 and build them in-place
7 one-by-one.
8
9 Example usage:
10
11 python3 tools/py3tool.py testing distutils core
12
13 This will copy files to _py3k/numpy, add a dummy __init__.py and
14 version.py on the top level, and copy and 2to3 the files of the three
15 submodules.
16
17 When running py3tool again, only changed files are re-processed, which
18 makes the test-bugfix cycle faster.
19
20 """
21 from __future__ import division, absolute_import, print_function
22
23 from optparse import OptionParser
24 import shutil
25 import os
26 import sys
27 import re
28 import subprocess
29 import fnmatch
30
31 if os.environ.get('USE_2TO3CACHE'):
32 import lib2to3cache
33
34 BASE = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
35 TEMP = os.path.normpath(os.path.join(BASE, '_py3k'))
36
37 SCRIPT_2TO3 = os.path.join(BASE, 'tools', '2to3.py')
38
39 EXTRA_2TO3_FLAGS = {
40 'numpy/core/defchararray.py': '-x unicode',
41 'numpy/compat/py3k.py': '-x unicode',
42 'numpy/ma/timer_comparison.py': 'skip',
43 }
44
45 # Names of fixers to skip when running 2to3. This is a complete list of
46 # available fixers, with fixers not currently skipped commented out.
47 FIXES_TO_SKIP = [
48 'apply',
49 # 'basestring',
50 'buffer',
51 'callable',
52 'dict',
53 'exec',
54 'execfile',
55 'exitfunc',
56 'filter',
57 'funcattrs',
58 'future',
59 'getcwdu',
60 'has_key',
61 # 'idioms',
62 'import',
63 'imports',
64 'imports2',
65 'input',
66 'intern',
67 # 'isinstance',
68 # 'itertools',
69 # 'itertools_imports',
70 # 'long',
71 'map',
72 'metaclass',
73 'methodattrs',
74 'ne',
75 # 'next',
76 # 'nonzero',
77 # 'numliterals',
78 'operator',
79 'paren',
80 'print',
81 'raise',
82 'raw_input',
83 'reduce',
84 # 'renames',
85 'repr',
86 'setliteral',
87 'standarderror',
88 'sys_exc',
89 'throw',
90 'tuple_params',
91 # 'types',
92 # 'unicode',
93 # 'urllib',
94 # 'ws_comma',
95 'xrange',
96 'xreadlines',
97 # 'zip',
98 ]
99
100 skip_fixes= []
101 for _t in FIXES_TO_SKIP:
102 skip_fixes.append('-x')
103 skip_fixes.append(_t)
104
105
106 def main():
107 p = OptionParser(usage=__doc__.strip())
108 p.add_option("--clean", "-c", action="store_true",
109 help="clean source directory")
110 options, args = p.parse_args()
111
112 if not args:
113 p.error('no submodules given')
114 else:
115 dirs = ['numpy/%s' % x for x in map(os.path.basename, args)]
116
117 # Prepare
118 if not os.path.isdir(TEMP):
119 os.makedirs(TEMP)
120
121 # Set up dummy files (for building only submodules)
122 dummy_files = {
123 '__init__.py': 'from numpy.version import version as __version__',
124 'version.py': 'version = "1.4.0.dev"'
125 }
126
127 for fn, content in dummy_files.items():
128 fn = os.path.join(TEMP, 'numpy', fn)
129 if not os.path.isfile(fn):
130 try:
131 os.makedirs(os.path.dirname(fn))
132 except OSError:
133 pass
134 f = open(fn, 'wb+')
135 f.write(content.encode('ascii'))
136 f.close()
137
138 # Environment
139 pp = [os.path.abspath(TEMP)]
140 def getenv():
141 env = dict(os.environ)
142 env.update({'PYTHONPATH': ':'.join(pp)})
143 return env
144
145 # Copy
146 for d in dirs:
147 src = os.path.join(BASE, d)
148 dst = os.path.join(TEMP, d)
149
150 # Run 2to3
151 sync_2to3(dst=dst,
152 src=src,
153 patchfile=os.path.join(TEMP, os.path.basename(d) + '.patch'),
154 clean=options.clean)
155
156 # Run setup.py, falling back to Pdb post-mortem on exceptions
157 setup_py = os.path.join(dst, 'setup.py')
158 if os.path.isfile(setup_py):
159 code = """\
160 import pdb, sys, traceback
161 p = pdb.Pdb()
162 try:
163 import __main__
164 __main__.__dict__.update({
165 "__name__": "__main__", "__file__": "setup.py",
166 "__builtins__": __builtins__})
167 fp = open("setup.py", "rb")
168 try:
169 exec(compile(fp.read(), "setup.py", 'exec'))
170 finally:
171 fp.close()
172 except SystemExit:
173 raise
174 except:
175 traceback.print_exc()
176 t = sys.exc_info()[2]
177 p.interaction(None, t)
178 """
179 ret = subprocess.call([sys.executable, '-c', code,
180 'build_ext', '-i'],
181 cwd=dst,
182 env=getenv())
183 if ret != 0:
184 raise RuntimeError("Build failed.")
185
186 # Run nosetests
187 subprocess.call(['nosetests3', '-v', d], cwd=TEMP)
188
189
190 def walk_sync(dir1, dir2, _seen=None):
191 if _seen is None:
192 seen = {}
193 else:
194 seen = _seen
195
196 if not dir1.endswith(os.path.sep):
197 dir1 = dir1 + os.path.sep
198
199 # Walk through stuff (which we haven't yet gone through) in dir1
200 for root, dirs, files in os.walk(dir1):
201 sub = root[len(dir1):]
202 if sub in seen:
203 dirs = [x for x in dirs if x not in seen[sub][0]]
204 files = [x for x in files if x not in seen[sub][1]]
205 seen[sub][0].extend(dirs)
206 seen[sub][1].extend(files)
207 else:
208 seen[sub] = (dirs, files)
209 if not dirs and not files:
210 continue
211 yield os.path.join(dir1, sub), os.path.join(dir2, sub), dirs, files
212
213 if _seen is None:
214 # Walk through stuff (which we haven't yet gone through) in dir2
215 for root2, root1, dirs, files in walk_sync(dir2, dir1, _seen=seen):
216 yield root1, root2, dirs, files
217
218 def sync_2to3(src, dst, patchfile=None, clean=False):
219 import lib2to3.main
220 from io import StringIO
221
222 to_convert = []
223
224 for src_dir, dst_dir, dirs, files in walk_sync(src, dst):
225 for fn in dirs + files:
226 src_fn = os.path.join(src_dir, fn)
227 dst_fn = os.path.join(dst_dir, fn)
228
229 # skip temporary etc. files
230 if fn.startswith('.#') or fn.endswith('~'):
231 continue
232
233 # remove non-existing
234 if os.path.exists(dst_fn) and not os.path.exists(src_fn):
235 if clean:
236 if os.path.isdir(dst_fn):
237 shutil.rmtree(dst_fn)
238 else:
239 os.unlink(dst_fn)
240 continue
241
242 # make directories
243 if os.path.isdir(src_fn):
244 if not os.path.isdir(dst_fn):
245 os.makedirs(dst_fn)
246 continue
247
248 dst_dir = os.path.dirname(dst_fn)
249 if os.path.isfile(dst_fn) and not os.path.isdir(dst_dir):
250 os.makedirs(dst_dir)
251
252 # don't replace up-to-date files
253 try:
254 if os.path.isfile(dst_fn) and \
255 os.stat(dst_fn).st_mtime >= os.stat(src_fn).st_mtime:
256 continue
257 except OSError:
258 pass
259
260 # copy file
261 shutil.copyfile(src_fn, dst_fn)
262
263 # add .py files to 2to3 list
264 if dst_fn.endswith('.py'):
265 to_convert.append((src_fn, dst_fn))
266
267 # run 2to3
268 flag_sets = {}
269 for fn, dst_fn in to_convert:
270 flag = ''
271 for pat, opt in EXTRA_2TO3_FLAGS.items():
272 if fnmatch.fnmatch(fn, pat):
273 flag = opt
274 break
275 flag_sets.setdefault(flag, []).append(dst_fn)
276
277 if patchfile:
278 p = open(patchfile, 'wb+')
279 else:
280 p = open(os.devnull, 'wb')
281
282 for flags, filenames in flag_sets.items():
283 if flags == 'skip':
284 continue
285
286 _old_stdout = sys.stdout
287 try:
288 sys.stdout = StringIO()
289 opt = []
290 opt.extend(['-w', '-n'])
291 opt.extend(skip_fixes)
292 opt.extend(flags.split())
293 opt.extend(filenames)
294 lib2to3.main.main("lib2to3.fixes", opt)
295 finally:
296 sys.stdout = _old_stdout
297
298 p.close()
299
300 if __name__ == "__main__":
301 main()
302
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/py3tool.py b/tools/py3tool.py
--- a/tools/py3tool.py
+++ b/tools/py3tool.py
@@ -66,7 +66,7 @@
'intern',
# 'isinstance',
# 'itertools',
-# 'itertools_imports',
+ 'itertools_imports',
# 'long',
'map',
'metaclass',
| {"golden_diff": "diff --git a/tools/py3tool.py b/tools/py3tool.py\n--- a/tools/py3tool.py\n+++ b/tools/py3tool.py\n@@ -66,7 +66,7 @@\n 'intern',\n # 'isinstance',\n # 'itertools',\n-# 'itertools_imports',\n+ 'itertools_imports',\n # 'long',\n 'map',\n 'metaclass',\n", "issue": "2to3 run `itertools_imports` fixer\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- python -*-\n\"\"\"\n%prog SUBMODULE...\n\nHack to pipe submodules of Numpy through 2to3 and build them in-place\none-by-one.\n\nExample usage:\n\n python3 tools/py3tool.py testing distutils core\n\nThis will copy files to _py3k/numpy, add a dummy __init__.py and\nversion.py on the top level, and copy and 2to3 the files of the three\nsubmodules.\n\nWhen running py3tool again, only changed files are re-processed, which\nmakes the test-bugfix cycle faster.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nfrom optparse import OptionParser\nimport shutil\nimport os\nimport sys\nimport re\nimport subprocess\nimport fnmatch\n\nif os.environ.get('USE_2TO3CACHE'):\n import lib2to3cache\n\nBASE = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))\nTEMP = os.path.normpath(os.path.join(BASE, '_py3k'))\n\nSCRIPT_2TO3 = os.path.join(BASE, 'tools', '2to3.py')\n\nEXTRA_2TO3_FLAGS = {\n 'numpy/core/defchararray.py': '-x unicode',\n 'numpy/compat/py3k.py': '-x unicode',\n 'numpy/ma/timer_comparison.py': 'skip',\n}\n\n# Names of fixers to skip when running 2to3. This is a complete list of\n# available fixers, with fixers not currently skipped commented out.\nFIXES_TO_SKIP = [\n 'apply',\n# 'basestring',\n 'buffer',\n 'callable',\n 'dict',\n 'exec',\n 'execfile',\n 'exitfunc',\n 'filter',\n 'funcattrs',\n 'future',\n 'getcwdu',\n 'has_key',\n# 'idioms',\n 'import',\n 'imports',\n 'imports2',\n 'input',\n 'intern',\n# 'isinstance',\n# 'itertools',\n# 'itertools_imports',\n# 'long',\n 'map',\n 'metaclass',\n 'methodattrs',\n 'ne',\n# 'next',\n# 'nonzero',\n# 'numliterals',\n 'operator',\n 'paren',\n 'print',\n 'raise',\n 'raw_input',\n 'reduce',\n# 'renames',\n 'repr',\n 'setliteral',\n 'standarderror',\n 'sys_exc',\n 'throw',\n 'tuple_params',\n# 'types',\n# 'unicode',\n# 'urllib',\n# 'ws_comma',\n 'xrange',\n 'xreadlines',\n# 'zip',\n]\n\nskip_fixes= []\nfor _t in FIXES_TO_SKIP:\n skip_fixes.append('-x')\n skip_fixes.append(_t)\n\n\ndef main():\n p = OptionParser(usage=__doc__.strip())\n p.add_option(\"--clean\", \"-c\", action=\"store_true\",\n help=\"clean source directory\")\n options, args = p.parse_args()\n\n if not args:\n p.error('no submodules given')\n else:\n dirs = ['numpy/%s' % x for x in map(os.path.basename, args)]\n\n # Prepare\n if not os.path.isdir(TEMP):\n os.makedirs(TEMP)\n\n # Set up dummy files (for building only submodules)\n dummy_files = {\n '__init__.py': 'from numpy.version import version as __version__',\n 'version.py': 'version = \"1.4.0.dev\"'\n }\n\n for fn, content in dummy_files.items():\n fn = os.path.join(TEMP, 'numpy', fn)\n if not os.path.isfile(fn):\n try:\n os.makedirs(os.path.dirname(fn))\n except OSError:\n pass\n f = open(fn, 'wb+')\n f.write(content.encode('ascii'))\n f.close()\n\n # Environment\n pp = [os.path.abspath(TEMP)]\n def getenv():\n env = dict(os.environ)\n env.update({'PYTHONPATH': ':'.join(pp)})\n return env\n\n # Copy\n for d in dirs:\n src = os.path.join(BASE, d)\n dst = os.path.join(TEMP, d)\n\n # Run 2to3\n sync_2to3(dst=dst,\n src=src,\n patchfile=os.path.join(TEMP, os.path.basename(d) + '.patch'),\n clean=options.clean)\n\n # Run setup.py, falling back to Pdb post-mortem on exceptions\n setup_py = os.path.join(dst, 'setup.py')\n if os.path.isfile(setup_py):\n code = \"\"\"\\\nimport pdb, sys, traceback\np = pdb.Pdb()\ntry:\n import __main__\n __main__.__dict__.update({\n \"__name__\": \"__main__\", \"__file__\": \"setup.py\",\n \"__builtins__\": __builtins__})\n fp = open(\"setup.py\", \"rb\")\n try:\n exec(compile(fp.read(), \"setup.py\", 'exec'))\n finally:\n fp.close()\nexcept SystemExit:\n raise\nexcept:\n traceback.print_exc()\n t = sys.exc_info()[2]\n p.interaction(None, t)\n\"\"\"\n ret = subprocess.call([sys.executable, '-c', code,\n 'build_ext', '-i'],\n cwd=dst,\n env=getenv())\n if ret != 0:\n raise RuntimeError(\"Build failed.\")\n\n # Run nosetests\n subprocess.call(['nosetests3', '-v', d], cwd=TEMP)\n\n\ndef walk_sync(dir1, dir2, _seen=None):\n if _seen is None:\n seen = {}\n else:\n seen = _seen\n\n if not dir1.endswith(os.path.sep):\n dir1 = dir1 + os.path.sep\n\n # Walk through stuff (which we haven't yet gone through) in dir1\n for root, dirs, files in os.walk(dir1):\n sub = root[len(dir1):]\n if sub in seen:\n dirs = [x for x in dirs if x not in seen[sub][0]]\n files = [x for x in files if x not in seen[sub][1]]\n seen[sub][0].extend(dirs)\n seen[sub][1].extend(files)\n else:\n seen[sub] = (dirs, files)\n if not dirs and not files:\n continue\n yield os.path.join(dir1, sub), os.path.join(dir2, sub), dirs, files\n\n if _seen is None:\n # Walk through stuff (which we haven't yet gone through) in dir2\n for root2, root1, dirs, files in walk_sync(dir2, dir1, _seen=seen):\n yield root1, root2, dirs, files\n\ndef sync_2to3(src, dst, patchfile=None, clean=False):\n import lib2to3.main\n from io import StringIO\n\n to_convert = []\n\n for src_dir, dst_dir, dirs, files in walk_sync(src, dst):\n for fn in dirs + files:\n src_fn = os.path.join(src_dir, fn)\n dst_fn = os.path.join(dst_dir, fn)\n\n # skip temporary etc. files\n if fn.startswith('.#') or fn.endswith('~'):\n continue\n\n # remove non-existing\n if os.path.exists(dst_fn) and not os.path.exists(src_fn):\n if clean:\n if os.path.isdir(dst_fn):\n shutil.rmtree(dst_fn)\n else:\n os.unlink(dst_fn)\n continue\n\n # make directories\n if os.path.isdir(src_fn):\n if not os.path.isdir(dst_fn):\n os.makedirs(dst_fn)\n continue\n\n dst_dir = os.path.dirname(dst_fn)\n if os.path.isfile(dst_fn) and not os.path.isdir(dst_dir):\n os.makedirs(dst_dir)\n\n # don't replace up-to-date files\n try:\n if os.path.isfile(dst_fn) and \\\n os.stat(dst_fn).st_mtime >= os.stat(src_fn).st_mtime:\n continue\n except OSError:\n pass\n\n # copy file\n shutil.copyfile(src_fn, dst_fn)\n\n # add .py files to 2to3 list\n if dst_fn.endswith('.py'):\n to_convert.append((src_fn, dst_fn))\n\n # run 2to3\n flag_sets = {}\n for fn, dst_fn in to_convert:\n flag = ''\n for pat, opt in EXTRA_2TO3_FLAGS.items():\n if fnmatch.fnmatch(fn, pat):\n flag = opt\n break\n flag_sets.setdefault(flag, []).append(dst_fn)\n\n if patchfile:\n p = open(patchfile, 'wb+')\n else:\n p = open(os.devnull, 'wb')\n\n for flags, filenames in flag_sets.items():\n if flags == 'skip':\n continue\n\n _old_stdout = sys.stdout\n try:\n sys.stdout = StringIO()\n opt = []\n opt.extend(['-w', '-n'])\n opt.extend(skip_fixes)\n opt.extend(flags.split())\n opt.extend(filenames)\n lib2to3.main.main(\"lib2to3.fixes\", opt)\n finally:\n sys.stdout = _old_stdout\n\n p.close()\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/py3tool.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- python -*-\n\"\"\"\n%prog SUBMODULE...\n\nHack to pipe submodules of Numpy through 2to3 and build them in-place\none-by-one.\n\nExample usage:\n\n python3 tools/py3tool.py testing distutils core\n\nThis will copy files to _py3k/numpy, add a dummy __init__.py and\nversion.py on the top level, and copy and 2to3 the files of the three\nsubmodules.\n\nWhen running py3tool again, only changed files are re-processed, which\nmakes the test-bugfix cycle faster.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nfrom optparse import OptionParser\nimport shutil\nimport os\nimport sys\nimport re\nimport subprocess\nimport fnmatch\n\nif os.environ.get('USE_2TO3CACHE'):\n import lib2to3cache\n\nBASE = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))\nTEMP = os.path.normpath(os.path.join(BASE, '_py3k'))\n\nSCRIPT_2TO3 = os.path.join(BASE, 'tools', '2to3.py')\n\nEXTRA_2TO3_FLAGS = {\n 'numpy/core/defchararray.py': '-x unicode',\n 'numpy/compat/py3k.py': '-x unicode',\n 'numpy/ma/timer_comparison.py': 'skip',\n}\n\n# Names of fixers to skip when running 2to3. This is a complete list of\n# available fixers, with fixers not currently skipped commented out.\nFIXES_TO_SKIP = [\n 'apply',\n# 'basestring',\n 'buffer',\n 'callable',\n 'dict',\n 'exec',\n 'execfile',\n 'exitfunc',\n 'filter',\n 'funcattrs',\n 'future',\n 'getcwdu',\n 'has_key',\n# 'idioms',\n 'import',\n 'imports',\n 'imports2',\n 'input',\n 'intern',\n# 'isinstance',\n# 'itertools',\n 'itertools_imports',\n# 'long',\n 'map',\n 'metaclass',\n 'methodattrs',\n 'ne',\n# 'next',\n# 'nonzero',\n# 'numliterals',\n 'operator',\n 'paren',\n 'print',\n 'raise',\n 'raw_input',\n 'reduce',\n# 'renames',\n 'repr',\n 'setliteral',\n 'standarderror',\n 'sys_exc',\n 'throw',\n 'tuple_params',\n# 'types',\n# 'unicode',\n# 'urllib',\n# 'ws_comma',\n 'xrange',\n 'xreadlines',\n# 'zip',\n]\n\nskip_fixes= []\nfor _t in FIXES_TO_SKIP:\n skip_fixes.append('-x')\n skip_fixes.append(_t)\n\n\ndef main():\n p = OptionParser(usage=__doc__.strip())\n p.add_option(\"--clean\", \"-c\", action=\"store_true\",\n help=\"clean source directory\")\n options, args = p.parse_args()\n\n if not args:\n p.error('no submodules given')\n else:\n dirs = ['numpy/%s' % x for x in map(os.path.basename, args)]\n\n # Prepare\n if not os.path.isdir(TEMP):\n os.makedirs(TEMP)\n\n # Set up dummy files (for building only submodules)\n dummy_files = {\n '__init__.py': 'from numpy.version import version as __version__',\n 'version.py': 'version = \"1.4.0.dev\"'\n }\n\n for fn, content in dummy_files.items():\n fn = os.path.join(TEMP, 'numpy', fn)\n if not os.path.isfile(fn):\n try:\n os.makedirs(os.path.dirname(fn))\n except OSError:\n pass\n f = open(fn, 'wb+')\n f.write(content.encode('ascii'))\n f.close()\n\n # Environment\n pp = [os.path.abspath(TEMP)]\n def getenv():\n env = dict(os.environ)\n env.update({'PYTHONPATH': ':'.join(pp)})\n return env\n\n # Copy\n for d in dirs:\n src = os.path.join(BASE, d)\n dst = os.path.join(TEMP, d)\n\n # Run 2to3\n sync_2to3(dst=dst,\n src=src,\n patchfile=os.path.join(TEMP, os.path.basename(d) + '.patch'),\n clean=options.clean)\n\n # Run setup.py, falling back to Pdb post-mortem on exceptions\n setup_py = os.path.join(dst, 'setup.py')\n if os.path.isfile(setup_py):\n code = \"\"\"\\\nimport pdb, sys, traceback\np = pdb.Pdb()\ntry:\n import __main__\n __main__.__dict__.update({\n \"__name__\": \"__main__\", \"__file__\": \"setup.py\",\n \"__builtins__\": __builtins__})\n fp = open(\"setup.py\", \"rb\")\n try:\n exec(compile(fp.read(), \"setup.py\", 'exec'))\n finally:\n fp.close()\nexcept SystemExit:\n raise\nexcept:\n traceback.print_exc()\n t = sys.exc_info()[2]\n p.interaction(None, t)\n\"\"\"\n ret = subprocess.call([sys.executable, '-c', code,\n 'build_ext', '-i'],\n cwd=dst,\n env=getenv())\n if ret != 0:\n raise RuntimeError(\"Build failed.\")\n\n # Run nosetests\n subprocess.call(['nosetests3', '-v', d], cwd=TEMP)\n\n\ndef walk_sync(dir1, dir2, _seen=None):\n if _seen is None:\n seen = {}\n else:\n seen = _seen\n\n if not dir1.endswith(os.path.sep):\n dir1 = dir1 + os.path.sep\n\n # Walk through stuff (which we haven't yet gone through) in dir1\n for root, dirs, files in os.walk(dir1):\n sub = root[len(dir1):]\n if sub in seen:\n dirs = [x for x in dirs if x not in seen[sub][0]]\n files = [x for x in files if x not in seen[sub][1]]\n seen[sub][0].extend(dirs)\n seen[sub][1].extend(files)\n else:\n seen[sub] = (dirs, files)\n if not dirs and not files:\n continue\n yield os.path.join(dir1, sub), os.path.join(dir2, sub), dirs, files\n\n if _seen is None:\n # Walk through stuff (which we haven't yet gone through) in dir2\n for root2, root1, dirs, files in walk_sync(dir2, dir1, _seen=seen):\n yield root1, root2, dirs, files\n\ndef sync_2to3(src, dst, patchfile=None, clean=False):\n import lib2to3.main\n from io import StringIO\n\n to_convert = []\n\n for src_dir, dst_dir, dirs, files in walk_sync(src, dst):\n for fn in dirs + files:\n src_fn = os.path.join(src_dir, fn)\n dst_fn = os.path.join(dst_dir, fn)\n\n # skip temporary etc. files\n if fn.startswith('.#') or fn.endswith('~'):\n continue\n\n # remove non-existing\n if os.path.exists(dst_fn) and not os.path.exists(src_fn):\n if clean:\n if os.path.isdir(dst_fn):\n shutil.rmtree(dst_fn)\n else:\n os.unlink(dst_fn)\n continue\n\n # make directories\n if os.path.isdir(src_fn):\n if not os.path.isdir(dst_fn):\n os.makedirs(dst_fn)\n continue\n\n dst_dir = os.path.dirname(dst_fn)\n if os.path.isfile(dst_fn) and not os.path.isdir(dst_dir):\n os.makedirs(dst_dir)\n\n # don't replace up-to-date files\n try:\n if os.path.isfile(dst_fn) and \\\n os.stat(dst_fn).st_mtime >= os.stat(src_fn).st_mtime:\n continue\n except OSError:\n pass\n\n # copy file\n shutil.copyfile(src_fn, dst_fn)\n\n # add .py files to 2to3 list\n if dst_fn.endswith('.py'):\n to_convert.append((src_fn, dst_fn))\n\n # run 2to3\n flag_sets = {}\n for fn, dst_fn in to_convert:\n flag = ''\n for pat, opt in EXTRA_2TO3_FLAGS.items():\n if fnmatch.fnmatch(fn, pat):\n flag = opt\n break\n flag_sets.setdefault(flag, []).append(dst_fn)\n\n if patchfile:\n p = open(patchfile, 'wb+')\n else:\n p = open(os.devnull, 'wb')\n\n for flags, filenames in flag_sets.items():\n if flags == 'skip':\n continue\n\n _old_stdout = sys.stdout\n try:\n sys.stdout = StringIO()\n opt = []\n opt.extend(['-w', '-n'])\n opt.extend(skip_fixes)\n opt.extend(flags.split())\n opt.extend(filenames)\n lib2to3.main.main(\"lib2to3.fixes\", opt)\n finally:\n sys.stdout = _old_stdout\n\n p.close()\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/py3tool.py"}]} | 3,105 | 91 |
gh_patches_debug_12718 | rasdani/github-patches | git_diff | ray-project__ray-6849 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tune] Feature request: tune.sample_from does not support callable objects.
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu 16.04
- **Ray installed from (source or binary)**: binary
- **Ray version**: 0.7.2
- **Python version**: 3.2
- **Exact command to reproduce**: See below
### Describe the problem
The `tune` sample_from interface is strictly limited to function objects, such as lambdas. This serves most use cases, but there are a number of instances where it's very useful to define a callable object to yield samples. (See trivial example below.) At the moment, providing a callable object returns errors from within tune variant generation, as the non-function-based `sample_from` entries are processed in grid entries. This can be resolved by changeing the sample/grid check from a direct check for `FunctionType` (Source location: https://github.com/ray-project/ray/blob/fadfa5f30bb654a74c781eaf8396a35af3ab7760/python/ray/tune/suggest/variant_generator.py#L116) to the builtin function `callable`.
I'm not entirely clear if this is an intentional limitation, and changing this logic will likely require expansion of tune's tests and documentation to cover the new behavior. I would be happy to open a PR for this if a maintainer gives the feature a 👍.
### Source code / logs
```python
import random
import ray.tune as tune
from ray.tune.suggest.variant_generator import generate_variants
class Normal:
def __call__(self, _config):
return random.normalvariate(mu=0, sigma=1)
grid_config = {"grid": tune.grid_search(list(range(2)))}
sample_config = {"normal": tune.sample_from(Normal())}
print(grid_config)
print(list(generate_variants(grid_config)))
print(sample_config)
print(list(generate_variants(sample_config)))
```
Results:
```
{'grid': {'grid_search': [0, 1]}}
[('grid=0', {'grid': 0}), ('grid=1', {'grid': 1})]
{'normal': tune.sample_from(<__main__.Normal object at 0x7f08ed1d0f50>)}
Traceback (most recent call last):
File "sample_error.py", line 19, in <module>
print(list(generate_variants(sample_config)))
File "/work/home/lexaf/workspace/alphabeta/.conda/lib/python3.7/site-packages/ray/tune/suggest/variant_generator.py", line 43, in generate_variants
for resolved_vars, spec in _generate_variants(unresolved_spec):
File "/work/home/lexaf/workspace/alphabeta/.conda/lib/python3.7/site-packages/ray/tune/suggest/variant_generator.py", line 123, in _generate_variants
for resolved_spec in grid_search:
File "/work/home/lexaf/workspace/alphabeta/.conda/lib/python3.7/site-packages/ray/tune/suggest/variant_generator.py", line 193, in _grid_search_generator
while value_indices[-1] < len(grid_vars[-1][1]):
TypeError: object of type 'Normal' has no len()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/tune/suggest/variant_generator.py`
Content:
```
1 import copy
2 import logging
3 import numpy
4 import random
5 import types
6
7 from ray.tune import TuneError
8 from ray.tune.sample import sample_from
9
10 logger = logging.getLogger(__name__)
11
12
13 def generate_variants(unresolved_spec):
14 """Generates variants from a spec (dict) with unresolved values.
15
16 There are two types of unresolved values:
17
18 Grid search: These define a grid search over values. For example, the
19 following grid search values in a spec will produce six distinct
20 variants in combination:
21
22 "activation": grid_search(["relu", "tanh"])
23 "learning_rate": grid_search([1e-3, 1e-4, 1e-5])
24
25 Lambda functions: These are evaluated to produce a concrete value, and
26 can express dependencies or conditional distributions between values.
27 They can also be used to express random search (e.g., by calling
28 into the `random` or `np` module).
29
30 "cpu": lambda spec: spec.config.num_workers
31 "batch_size": lambda spec: random.uniform(1, 1000)
32
33 Finally, to support defining specs in plain JSON / YAML, grid search
34 and lambda functions can also be defined alternatively as follows:
35
36 "activation": {"grid_search": ["relu", "tanh"]}
37 "cpu": {"eval": "spec.config.num_workers"}
38
39 Use `format_vars` to format the returned dict of hyperparameters.
40
41 Yields:
42 (Dict of resolved variables, Spec object)
43 """
44 for resolved_vars, spec in _generate_variants(unresolved_spec):
45 assert not _unresolved_values(spec)
46 yield resolved_vars, spec
47
48
49 def grid_search(values):
50 """Convenience method for specifying grid search over a value.
51
52 Arguments:
53 values: An iterable whose parameters will be gridded.
54 """
55
56 return {"grid_search": values}
57
58
59 _STANDARD_IMPORTS = {
60 "random": random,
61 "np": numpy,
62 }
63
64 _MAX_RESOLUTION_PASSES = 20
65
66
67 def resolve_nested_dict(nested_dict):
68 """Flattens a nested dict by joining keys into tuple of paths.
69
70 Can then be passed into `format_vars`.
71 """
72 res = {}
73 for k, v in nested_dict.items():
74 if isinstance(v, dict):
75 for k_, v_ in resolve_nested_dict(v).items():
76 res[(k, ) + k_] = v_
77 else:
78 res[(k, )] = v
79 return res
80
81
82 def format_vars(resolved_vars):
83 """Formats the resolved variable dict into a single string."""
84 out = []
85 for path, value in sorted(resolved_vars.items()):
86 if path[0] in ["run", "env", "resources_per_trial"]:
87 continue # TrialRunner already has these in the experiment_tag
88 pieces = []
89 last_string = True
90 for k in path[::-1]:
91 if isinstance(k, int):
92 pieces.append(str(k))
93 elif last_string:
94 last_string = False
95 pieces.append(k)
96 pieces.reverse()
97 out.append(_clean_value("_".join(pieces)) + "=" + _clean_value(value))
98 return ",".join(out)
99
100
101 def flatten_resolved_vars(resolved_vars):
102 """Formats the resolved variable dict into a mapping of (str -> value)."""
103 flattened_resolved_vars_dict = {}
104 for pieces, value in resolved_vars.items():
105 if pieces[0] == "config":
106 pieces = pieces[1:]
107 pieces = [str(piece) for piece in pieces]
108 flattened_resolved_vars_dict["/".join(pieces)] = value
109 return flattened_resolved_vars_dict
110
111
112 def _clean_value(value):
113 if isinstance(value, float):
114 return "{:.5}".format(value)
115 else:
116 return str(value).replace("/", "_")
117
118
119 def _generate_variants(spec):
120 spec = copy.deepcopy(spec)
121 unresolved = _unresolved_values(spec)
122 if not unresolved:
123 yield {}, spec
124 return
125
126 grid_vars = []
127 lambda_vars = []
128 for path, value in unresolved.items():
129 if isinstance(value, types.FunctionType):
130 lambda_vars.append((path, value))
131 else:
132 grid_vars.append((path, value))
133 grid_vars.sort()
134
135 grid_search = _grid_search_generator(spec, grid_vars)
136 for resolved_spec in grid_search:
137 resolved_vars = _resolve_lambda_vars(resolved_spec, lambda_vars)
138 for resolved, spec in _generate_variants(resolved_spec):
139 for path, value in grid_vars:
140 resolved_vars[path] = _get_value(spec, path)
141 for k, v in resolved.items():
142 if (k in resolved_vars and v != resolved_vars[k]
143 and _is_resolved(resolved_vars[k])):
144 raise ValueError(
145 "The variable `{}` could not be unambiguously "
146 "resolved to a single value. Consider simplifying "
147 "your configuration.".format(k))
148 resolved_vars[k] = v
149 yield resolved_vars, spec
150
151
152 def _assign_value(spec, path, value):
153 for k in path[:-1]:
154 spec = spec[k]
155 spec[path[-1]] = value
156
157
158 def _get_value(spec, path):
159 for k in path:
160 spec = spec[k]
161 return spec
162
163
164 def _resolve_lambda_vars(spec, lambda_vars):
165 resolved = {}
166 error = True
167 num_passes = 0
168 while error and num_passes < _MAX_RESOLUTION_PASSES:
169 num_passes += 1
170 error = False
171 for path, fn in lambda_vars:
172 try:
173 value = fn(_UnresolvedAccessGuard(spec))
174 except RecursiveDependencyError as e:
175 error = e
176 except Exception:
177 raise ValueError(
178 "Failed to evaluate expression: {}: {}".format(path, fn))
179 else:
180 _assign_value(spec, path, value)
181 resolved[path] = value
182 if error:
183 raise error
184 return resolved
185
186
187 def _grid_search_generator(unresolved_spec, grid_vars):
188 value_indices = [0] * len(grid_vars)
189
190 def increment(i):
191 value_indices[i] += 1
192 if value_indices[i] >= len(grid_vars[i][1]):
193 value_indices[i] = 0
194 if i + 1 < len(value_indices):
195 return increment(i + 1)
196 else:
197 return True
198 return False
199
200 if not grid_vars:
201 yield unresolved_spec
202 return
203
204 while value_indices[-1] < len(grid_vars[-1][1]):
205 spec = copy.deepcopy(unresolved_spec)
206 for i, (path, values) in enumerate(grid_vars):
207 _assign_value(spec, path, values[value_indices[i]])
208 yield spec
209 if grid_vars:
210 done = increment(0)
211 if done:
212 break
213
214
215 def _is_resolved(v):
216 resolved, _ = _try_resolve(v)
217 return resolved
218
219
220 def _try_resolve(v):
221 if isinstance(v, sample_from):
222 # Function to sample from
223 return False, v.func
224 elif isinstance(v, dict) and len(v) == 1 and "eval" in v:
225 # Lambda function in eval syntax
226 return False, lambda spec: eval(
227 v["eval"], _STANDARD_IMPORTS, {"spec": spec})
228 elif isinstance(v, dict) and len(v) == 1 and "grid_search" in v:
229 # Grid search values
230 grid_values = v["grid_search"]
231 if not isinstance(grid_values, list):
232 raise TuneError(
233 "Grid search expected list of values, got: {}".format(
234 grid_values))
235 return False, grid_values
236 return True, v
237
238
239 def _unresolved_values(spec):
240 found = {}
241 for k, v in spec.items():
242 resolved, v = _try_resolve(v)
243 if not resolved:
244 found[(k, )] = v
245 elif isinstance(v, dict):
246 # Recurse into a dict
247 for (path, value) in _unresolved_values(v).items():
248 found[(k, ) + path] = value
249 elif isinstance(v, list):
250 # Recurse into a list
251 for i, elem in enumerate(v):
252 for (path, value) in _unresolved_values({i: elem}).items():
253 found[(k, ) + path] = value
254 return found
255
256
257 class _UnresolvedAccessGuard(dict):
258 def __init__(self, *args, **kwds):
259 super(_UnresolvedAccessGuard, self).__init__(*args, **kwds)
260 self.__dict__ = self
261
262 def __getattribute__(self, item):
263 value = dict.__getattribute__(self, item)
264 if not _is_resolved(value):
265 raise RecursiveDependencyError(
266 "`{}` recursively depends on {}".format(item, value))
267 elif isinstance(value, dict):
268 return _UnresolvedAccessGuard(value)
269 else:
270 return value
271
272
273 class RecursiveDependencyError(Exception):
274 def __init__(self, msg):
275 Exception.__init__(self, msg)
276
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/tune/suggest/variant_generator.py b/python/ray/tune/suggest/variant_generator.py
--- a/python/ray/tune/suggest/variant_generator.py
+++ b/python/ray/tune/suggest/variant_generator.py
@@ -2,7 +2,6 @@
import logging
import numpy
import random
-import types
from ray.tune import TuneError
from ray.tune.sample import sample_from
@@ -126,7 +125,7 @@
grid_vars = []
lambda_vars = []
for path, value in unresolved.items():
- if isinstance(value, types.FunctionType):
+ if callable(value):
lambda_vars.append((path, value))
else:
grid_vars.append((path, value))
| {"golden_diff": "diff --git a/python/ray/tune/suggest/variant_generator.py b/python/ray/tune/suggest/variant_generator.py\n--- a/python/ray/tune/suggest/variant_generator.py\n+++ b/python/ray/tune/suggest/variant_generator.py\n@@ -2,7 +2,6 @@\n import logging\n import numpy\n import random\n-import types\n \n from ray.tune import TuneError\n from ray.tune.sample import sample_from\n@@ -126,7 +125,7 @@\n grid_vars = []\n lambda_vars = []\n for path, value in unresolved.items():\n- if isinstance(value, types.FunctionType):\n+ if callable(value):\n lambda_vars.append((path, value))\n else:\n grid_vars.append((path, value))\n", "issue": "[tune] Feature request: tune.sample_from does not support callable objects. \n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu 16.04\r\n- **Ray installed from (source or binary)**: binary\r\n- **Ray version**: 0.7.2\r\n- **Python version**: 3.2\r\n- **Exact command to reproduce**: See below\r\n\r\n### Describe the problem\r\n\r\nThe `tune` sample_from interface is strictly limited to function objects, such as lambdas. This serves most use cases, but there are a number of instances where it's very useful to define a callable object to yield samples. (See trivial example below.) At the moment, providing a callable object returns errors from within tune variant generation, as the non-function-based `sample_from` entries are processed in grid entries. This can be resolved by changeing the sample/grid check from a direct check for `FunctionType` (Source location: https://github.com/ray-project/ray/blob/fadfa5f30bb654a74c781eaf8396a35af3ab7760/python/ray/tune/suggest/variant_generator.py#L116) to the builtin function `callable`.\r\n\r\nI'm not entirely clear if this is an intentional limitation, and changing this logic will likely require expansion of tune's tests and documentation to cover the new behavior. I would be happy to open a PR for this if a maintainer gives the feature a \ud83d\udc4d.\r\n\r\n### Source code / logs\r\n\r\n```python\r\n\r\nimport random\r\n\r\nimport ray.tune as tune\r\nfrom ray.tune.suggest.variant_generator import generate_variants\r\n\r\nclass Normal:\r\n def __call__(self, _config):\r\n return random.normalvariate(mu=0, sigma=1)\r\n\r\n\r\ngrid_config = {\"grid\": tune.grid_search(list(range(2)))}\r\nsample_config = {\"normal\": tune.sample_from(Normal())}\r\n\r\n\r\nprint(grid_config)\r\nprint(list(generate_variants(grid_config)))\r\n\r\nprint(sample_config)\r\nprint(list(generate_variants(sample_config)))\r\n\r\n```\r\n\r\nResults:\r\n\r\n```\r\n{'grid': {'grid_search': [0, 1]}}\r\n[('grid=0', {'grid': 0}), ('grid=1', {'grid': 1})]\r\n{'normal': tune.sample_from(<__main__.Normal object at 0x7f08ed1d0f50>)}\r\nTraceback (most recent call last):\r\n File \"sample_error.py\", line 19, in <module>\r\n print(list(generate_variants(sample_config)))\r\n File \"/work/home/lexaf/workspace/alphabeta/.conda/lib/python3.7/site-packages/ray/tune/suggest/variant_generator.py\", line 43, in generate_variants\r\n for resolved_vars, spec in _generate_variants(unresolved_spec):\r\n File \"/work/home/lexaf/workspace/alphabeta/.conda/lib/python3.7/site-packages/ray/tune/suggest/variant_generator.py\", line 123, in _generate_variants\r\n for resolved_spec in grid_search:\r\n File \"/work/home/lexaf/workspace/alphabeta/.conda/lib/python3.7/site-packages/ray/tune/suggest/variant_generator.py\", line 193, in _grid_search_generator\r\n while value_indices[-1] < len(grid_vars[-1][1]):\r\nTypeError: object of type 'Normal' has no len()\r\n```\r\n\n", "before_files": [{"content": "import copy\nimport logging\nimport numpy\nimport random\nimport types\n\nfrom ray.tune import TuneError\nfrom ray.tune.sample import sample_from\n\nlogger = logging.getLogger(__name__)\n\n\ndef generate_variants(unresolved_spec):\n \"\"\"Generates variants from a spec (dict) with unresolved values.\n\n There are two types of unresolved values:\n\n Grid search: These define a grid search over values. For example, the\n following grid search values in a spec will produce six distinct\n variants in combination:\n\n \"activation\": grid_search([\"relu\", \"tanh\"])\n \"learning_rate\": grid_search([1e-3, 1e-4, 1e-5])\n\n Lambda functions: These are evaluated to produce a concrete value, and\n can express dependencies or conditional distributions between values.\n They can also be used to express random search (e.g., by calling\n into the `random` or `np` module).\n\n \"cpu\": lambda spec: spec.config.num_workers\n \"batch_size\": lambda spec: random.uniform(1, 1000)\n\n Finally, to support defining specs in plain JSON / YAML, grid search\n and lambda functions can also be defined alternatively as follows:\n\n \"activation\": {\"grid_search\": [\"relu\", \"tanh\"]}\n \"cpu\": {\"eval\": \"spec.config.num_workers\"}\n\n Use `format_vars` to format the returned dict of hyperparameters.\n\n Yields:\n (Dict of resolved variables, Spec object)\n \"\"\"\n for resolved_vars, spec in _generate_variants(unresolved_spec):\n assert not _unresolved_values(spec)\n yield resolved_vars, spec\n\n\ndef grid_search(values):\n \"\"\"Convenience method for specifying grid search over a value.\n\n Arguments:\n values: An iterable whose parameters will be gridded.\n \"\"\"\n\n return {\"grid_search\": values}\n\n\n_STANDARD_IMPORTS = {\n \"random\": random,\n \"np\": numpy,\n}\n\n_MAX_RESOLUTION_PASSES = 20\n\n\ndef resolve_nested_dict(nested_dict):\n \"\"\"Flattens a nested dict by joining keys into tuple of paths.\n\n Can then be passed into `format_vars`.\n \"\"\"\n res = {}\n for k, v in nested_dict.items():\n if isinstance(v, dict):\n for k_, v_ in resolve_nested_dict(v).items():\n res[(k, ) + k_] = v_\n else:\n res[(k, )] = v\n return res\n\n\ndef format_vars(resolved_vars):\n \"\"\"Formats the resolved variable dict into a single string.\"\"\"\n out = []\n for path, value in sorted(resolved_vars.items()):\n if path[0] in [\"run\", \"env\", \"resources_per_trial\"]:\n continue # TrialRunner already has these in the experiment_tag\n pieces = []\n last_string = True\n for k in path[::-1]:\n if isinstance(k, int):\n pieces.append(str(k))\n elif last_string:\n last_string = False\n pieces.append(k)\n pieces.reverse()\n out.append(_clean_value(\"_\".join(pieces)) + \"=\" + _clean_value(value))\n return \",\".join(out)\n\n\ndef flatten_resolved_vars(resolved_vars):\n \"\"\"Formats the resolved variable dict into a mapping of (str -> value).\"\"\"\n flattened_resolved_vars_dict = {}\n for pieces, value in resolved_vars.items():\n if pieces[0] == \"config\":\n pieces = pieces[1:]\n pieces = [str(piece) for piece in pieces]\n flattened_resolved_vars_dict[\"/\".join(pieces)] = value\n return flattened_resolved_vars_dict\n\n\ndef _clean_value(value):\n if isinstance(value, float):\n return \"{:.5}\".format(value)\n else:\n return str(value).replace(\"/\", \"_\")\n\n\ndef _generate_variants(spec):\n spec = copy.deepcopy(spec)\n unresolved = _unresolved_values(spec)\n if not unresolved:\n yield {}, spec\n return\n\n grid_vars = []\n lambda_vars = []\n for path, value in unresolved.items():\n if isinstance(value, types.FunctionType):\n lambda_vars.append((path, value))\n else:\n grid_vars.append((path, value))\n grid_vars.sort()\n\n grid_search = _grid_search_generator(spec, grid_vars)\n for resolved_spec in grid_search:\n resolved_vars = _resolve_lambda_vars(resolved_spec, lambda_vars)\n for resolved, spec in _generate_variants(resolved_spec):\n for path, value in grid_vars:\n resolved_vars[path] = _get_value(spec, path)\n for k, v in resolved.items():\n if (k in resolved_vars and v != resolved_vars[k]\n and _is_resolved(resolved_vars[k])):\n raise ValueError(\n \"The variable `{}` could not be unambiguously \"\n \"resolved to a single value. Consider simplifying \"\n \"your configuration.\".format(k))\n resolved_vars[k] = v\n yield resolved_vars, spec\n\n\ndef _assign_value(spec, path, value):\n for k in path[:-1]:\n spec = spec[k]\n spec[path[-1]] = value\n\n\ndef _get_value(spec, path):\n for k in path:\n spec = spec[k]\n return spec\n\n\ndef _resolve_lambda_vars(spec, lambda_vars):\n resolved = {}\n error = True\n num_passes = 0\n while error and num_passes < _MAX_RESOLUTION_PASSES:\n num_passes += 1\n error = False\n for path, fn in lambda_vars:\n try:\n value = fn(_UnresolvedAccessGuard(spec))\n except RecursiveDependencyError as e:\n error = e\n except Exception:\n raise ValueError(\n \"Failed to evaluate expression: {}: {}\".format(path, fn))\n else:\n _assign_value(spec, path, value)\n resolved[path] = value\n if error:\n raise error\n return resolved\n\n\ndef _grid_search_generator(unresolved_spec, grid_vars):\n value_indices = [0] * len(grid_vars)\n\n def increment(i):\n value_indices[i] += 1\n if value_indices[i] >= len(grid_vars[i][1]):\n value_indices[i] = 0\n if i + 1 < len(value_indices):\n return increment(i + 1)\n else:\n return True\n return False\n\n if not grid_vars:\n yield unresolved_spec\n return\n\n while value_indices[-1] < len(grid_vars[-1][1]):\n spec = copy.deepcopy(unresolved_spec)\n for i, (path, values) in enumerate(grid_vars):\n _assign_value(spec, path, values[value_indices[i]])\n yield spec\n if grid_vars:\n done = increment(0)\n if done:\n break\n\n\ndef _is_resolved(v):\n resolved, _ = _try_resolve(v)\n return resolved\n\n\ndef _try_resolve(v):\n if isinstance(v, sample_from):\n # Function to sample from\n return False, v.func\n elif isinstance(v, dict) and len(v) == 1 and \"eval\" in v:\n # Lambda function in eval syntax\n return False, lambda spec: eval(\n v[\"eval\"], _STANDARD_IMPORTS, {\"spec\": spec})\n elif isinstance(v, dict) and len(v) == 1 and \"grid_search\" in v:\n # Grid search values\n grid_values = v[\"grid_search\"]\n if not isinstance(grid_values, list):\n raise TuneError(\n \"Grid search expected list of values, got: {}\".format(\n grid_values))\n return False, grid_values\n return True, v\n\n\ndef _unresolved_values(spec):\n found = {}\n for k, v in spec.items():\n resolved, v = _try_resolve(v)\n if not resolved:\n found[(k, )] = v\n elif isinstance(v, dict):\n # Recurse into a dict\n for (path, value) in _unresolved_values(v).items():\n found[(k, ) + path] = value\n elif isinstance(v, list):\n # Recurse into a list\n for i, elem in enumerate(v):\n for (path, value) in _unresolved_values({i: elem}).items():\n found[(k, ) + path] = value\n return found\n\n\nclass _UnresolvedAccessGuard(dict):\n def __init__(self, *args, **kwds):\n super(_UnresolvedAccessGuard, self).__init__(*args, **kwds)\n self.__dict__ = self\n\n def __getattribute__(self, item):\n value = dict.__getattribute__(self, item)\n if not _is_resolved(value):\n raise RecursiveDependencyError(\n \"`{}` recursively depends on {}\".format(item, value))\n elif isinstance(value, dict):\n return _UnresolvedAccessGuard(value)\n else:\n return value\n\n\nclass RecursiveDependencyError(Exception):\n def __init__(self, msg):\n Exception.__init__(self, msg)\n", "path": "python/ray/tune/suggest/variant_generator.py"}], "after_files": [{"content": "import copy\nimport logging\nimport numpy\nimport random\n\nfrom ray.tune import TuneError\nfrom ray.tune.sample import sample_from\n\nlogger = logging.getLogger(__name__)\n\n\ndef generate_variants(unresolved_spec):\n \"\"\"Generates variants from a spec (dict) with unresolved values.\n\n There are two types of unresolved values:\n\n Grid search: These define a grid search over values. For example, the\n following grid search values in a spec will produce six distinct\n variants in combination:\n\n \"activation\": grid_search([\"relu\", \"tanh\"])\n \"learning_rate\": grid_search([1e-3, 1e-4, 1e-5])\n\n Lambda functions: These are evaluated to produce a concrete value, and\n can express dependencies or conditional distributions between values.\n They can also be used to express random search (e.g., by calling\n into the `random` or `np` module).\n\n \"cpu\": lambda spec: spec.config.num_workers\n \"batch_size\": lambda spec: random.uniform(1, 1000)\n\n Finally, to support defining specs in plain JSON / YAML, grid search\n and lambda functions can also be defined alternatively as follows:\n\n \"activation\": {\"grid_search\": [\"relu\", \"tanh\"]}\n \"cpu\": {\"eval\": \"spec.config.num_workers\"}\n\n Use `format_vars` to format the returned dict of hyperparameters.\n\n Yields:\n (Dict of resolved variables, Spec object)\n \"\"\"\n for resolved_vars, spec in _generate_variants(unresolved_spec):\n assert not _unresolved_values(spec)\n yield resolved_vars, spec\n\n\ndef grid_search(values):\n \"\"\"Convenience method for specifying grid search over a value.\n\n Arguments:\n values: An iterable whose parameters will be gridded.\n \"\"\"\n\n return {\"grid_search\": values}\n\n\n_STANDARD_IMPORTS = {\n \"random\": random,\n \"np\": numpy,\n}\n\n_MAX_RESOLUTION_PASSES = 20\n\n\ndef resolve_nested_dict(nested_dict):\n \"\"\"Flattens a nested dict by joining keys into tuple of paths.\n\n Can then be passed into `format_vars`.\n \"\"\"\n res = {}\n for k, v in nested_dict.items():\n if isinstance(v, dict):\n for k_, v_ in resolve_nested_dict(v).items():\n res[(k, ) + k_] = v_\n else:\n res[(k, )] = v\n return res\n\n\ndef format_vars(resolved_vars):\n \"\"\"Formats the resolved variable dict into a single string.\"\"\"\n out = []\n for path, value in sorted(resolved_vars.items()):\n if path[0] in [\"run\", \"env\", \"resources_per_trial\"]:\n continue # TrialRunner already has these in the experiment_tag\n pieces = []\n last_string = True\n for k in path[::-1]:\n if isinstance(k, int):\n pieces.append(str(k))\n elif last_string:\n last_string = False\n pieces.append(k)\n pieces.reverse()\n out.append(_clean_value(\"_\".join(pieces)) + \"=\" + _clean_value(value))\n return \",\".join(out)\n\n\ndef flatten_resolved_vars(resolved_vars):\n \"\"\"Formats the resolved variable dict into a mapping of (str -> value).\"\"\"\n flattened_resolved_vars_dict = {}\n for pieces, value in resolved_vars.items():\n if pieces[0] == \"config\":\n pieces = pieces[1:]\n pieces = [str(piece) for piece in pieces]\n flattened_resolved_vars_dict[\"/\".join(pieces)] = value\n return flattened_resolved_vars_dict\n\n\ndef _clean_value(value):\n if isinstance(value, float):\n return \"{:.5}\".format(value)\n else:\n return str(value).replace(\"/\", \"_\")\n\n\ndef _generate_variants(spec):\n spec = copy.deepcopy(spec)\n unresolved = _unresolved_values(spec)\n if not unresolved:\n yield {}, spec\n return\n\n grid_vars = []\n lambda_vars = []\n for path, value in unresolved.items():\n if callable(value):\n lambda_vars.append((path, value))\n else:\n grid_vars.append((path, value))\n grid_vars.sort()\n\n grid_search = _grid_search_generator(spec, grid_vars)\n for resolved_spec in grid_search:\n resolved_vars = _resolve_lambda_vars(resolved_spec, lambda_vars)\n for resolved, spec in _generate_variants(resolved_spec):\n for path, value in grid_vars:\n resolved_vars[path] = _get_value(spec, path)\n for k, v in resolved.items():\n if (k in resolved_vars and v != resolved_vars[k]\n and _is_resolved(resolved_vars[k])):\n raise ValueError(\n \"The variable `{}` could not be unambiguously \"\n \"resolved to a single value. Consider simplifying \"\n \"your configuration.\".format(k))\n resolved_vars[k] = v\n yield resolved_vars, spec\n\n\ndef _assign_value(spec, path, value):\n for k in path[:-1]:\n spec = spec[k]\n spec[path[-1]] = value\n\n\ndef _get_value(spec, path):\n for k in path:\n spec = spec[k]\n return spec\n\n\ndef _resolve_lambda_vars(spec, lambda_vars):\n resolved = {}\n error = True\n num_passes = 0\n while error and num_passes < _MAX_RESOLUTION_PASSES:\n num_passes += 1\n error = False\n for path, fn in lambda_vars:\n try:\n value = fn(_UnresolvedAccessGuard(spec))\n except RecursiveDependencyError as e:\n error = e\n except Exception:\n raise ValueError(\n \"Failed to evaluate expression: {}: {}\".format(path, fn))\n else:\n _assign_value(spec, path, value)\n resolved[path] = value\n if error:\n raise error\n return resolved\n\n\ndef _grid_search_generator(unresolved_spec, grid_vars):\n value_indices = [0] * len(grid_vars)\n\n def increment(i):\n value_indices[i] += 1\n if value_indices[i] >= len(grid_vars[i][1]):\n value_indices[i] = 0\n if i + 1 < len(value_indices):\n return increment(i + 1)\n else:\n return True\n return False\n\n if not grid_vars:\n yield unresolved_spec\n return\n\n while value_indices[-1] < len(grid_vars[-1][1]):\n spec = copy.deepcopy(unresolved_spec)\n for i, (path, values) in enumerate(grid_vars):\n _assign_value(spec, path, values[value_indices[i]])\n yield spec\n if grid_vars:\n done = increment(0)\n if done:\n break\n\n\ndef _is_resolved(v):\n resolved, _ = _try_resolve(v)\n return resolved\n\n\ndef _try_resolve(v):\n if isinstance(v, sample_from):\n # Function to sample from\n return False, v.func\n elif isinstance(v, dict) and len(v) == 1 and \"eval\" in v:\n # Lambda function in eval syntax\n return False, lambda spec: eval(\n v[\"eval\"], _STANDARD_IMPORTS, {\"spec\": spec})\n elif isinstance(v, dict) and len(v) == 1 and \"grid_search\" in v:\n # Grid search values\n grid_values = v[\"grid_search\"]\n if not isinstance(grid_values, list):\n raise TuneError(\n \"Grid search expected list of values, got: {}\".format(\n grid_values))\n return False, grid_values\n return True, v\n\n\ndef _unresolved_values(spec):\n found = {}\n for k, v in spec.items():\n resolved, v = _try_resolve(v)\n if not resolved:\n found[(k, )] = v\n elif isinstance(v, dict):\n # Recurse into a dict\n for (path, value) in _unresolved_values(v).items():\n found[(k, ) + path] = value\n elif isinstance(v, list):\n # Recurse into a list\n for i, elem in enumerate(v):\n for (path, value) in _unresolved_values({i: elem}).items():\n found[(k, ) + path] = value\n return found\n\n\nclass _UnresolvedAccessGuard(dict):\n def __init__(self, *args, **kwds):\n super(_UnresolvedAccessGuard, self).__init__(*args, **kwds)\n self.__dict__ = self\n\n def __getattribute__(self, item):\n value = dict.__getattribute__(self, item)\n if not _is_resolved(value):\n raise RecursiveDependencyError(\n \"`{}` recursively depends on {}\".format(item, value))\n elif isinstance(value, dict):\n return _UnresolvedAccessGuard(value)\n else:\n return value\n\n\nclass RecursiveDependencyError(Exception):\n def __init__(self, msg):\n Exception.__init__(self, msg)\n", "path": "python/ray/tune/suggest/variant_generator.py"}]} | 3,683 | 165 |
gh_patches_debug_37155 | rasdani/github-patches | git_diff | pallets__werkzeug-1712 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
set charset in shareddatamiddleware
# Description
UTF-8 Javascript files served by the SharedDataMiddleware do not contain a charset in the `Content-Type` response header. This leads to issues of javascript interpreted as US-ASCII by browsers (https://github.com/posativ/isso/issues/607).
**Current**
```
Content-Type: application/javascript
```
**Expected**
```
Content-Type: application/javascript; charset=UTF-8
```
The mime type is deducted by using python's mimetypes module and the filename:
https://github.com/pallets/werkzeug/blob/aa9676f5cd5ab7ee08b09f38df22ef3e3c564d87/src/werkzeug/middleware/shared_data.py#L256-L257
It is sent without appending an encoding:
https://github.com/pallets/werkzeug/blob/aa9676f5cd5ab7ee08b09f38df22ef3e3c564d87/src/werkzeug/middleware/shared_data.py#L281
Other usages seem to suggest that a call to `get_content_type` is necessary to append the charset, if applicable:
https://github.com/pallets/werkzeug/blob/e76aac8294626e24e1075e665cbf9657b88c4301/src/werkzeug/wrappers/common_descriptors.py#L146
# Possible solutions
I am not familiar with the codebase, but would it be possible to call `get_content_type`? I tried to modify it myself, but I don't know where to get the encoding from inside `SharedDataMiddleware`.
My problem is solved when I hardcode 'utf-8' as charset:
```python
("Content-Type", get_content_type(mime_type, 'utf-8'))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/werkzeug/middleware/shared_data.py`
Content:
```
1 """
2 Serve Shared Static Files
3 =========================
4
5 .. autoclass:: SharedDataMiddleware
6 :members: is_allowed
7
8 :copyright: 2007 Pallets
9 :license: BSD-3-Clause
10 """
11 import mimetypes
12 import os
13 import pkgutil
14 import posixpath
15 from datetime import datetime
16 from io import BytesIO
17 from time import mktime
18 from time import time
19 from zlib import adler32
20
21 from .._compat import PY2
22 from .._compat import string_types
23 from ..filesystem import get_filesystem_encoding
24 from ..http import http_date
25 from ..http import is_resource_modified
26 from ..security import safe_join
27 from ..wsgi import get_path_info
28 from ..wsgi import wrap_file
29
30
31 class SharedDataMiddleware(object):
32
33 """A WSGI middleware that provides static content for development
34 environments or simple server setups. Usage is quite simple::
35
36 import os
37 from werkzeug.wsgi import SharedDataMiddleware
38
39 app = SharedDataMiddleware(app, {
40 '/static': os.path.join(os.path.dirname(__file__), 'static')
41 })
42
43 The contents of the folder ``./shared`` will now be available on
44 ``http://example.com/shared/``. This is pretty useful during development
45 because a standalone media server is not required. One can also mount
46 files on the root folder and still continue to use the application because
47 the shared data middleware forwards all unhandled requests to the
48 application, even if the requests are below one of the shared folders.
49
50 If `pkg_resources` is available you can also tell the middleware to serve
51 files from package data::
52
53 app = SharedDataMiddleware(app, {
54 '/static': ('myapplication', 'static')
55 })
56
57 This will then serve the ``static`` folder in the `myapplication`
58 Python package.
59
60 The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
61 rules for files that are not accessible from the web. If `cache` is set to
62 `False` no caching headers are sent.
63
64 Currently the middleware does not support non ASCII filenames. If the
65 encoding on the file system happens to be the encoding of the URI it may
66 work but this could also be by accident. We strongly suggest using ASCII
67 only file names for static files.
68
69 The middleware will guess the mimetype using the Python `mimetype`
70 module. If it's unable to figure out the charset it will fall back
71 to `fallback_mimetype`.
72
73 .. versionchanged:: 0.5
74 The cache timeout is configurable now.
75
76 .. versionadded:: 0.6
77 The `fallback_mimetype` parameter was added.
78
79 :param app: the application to wrap. If you don't want to wrap an
80 application you can pass it :exc:`NotFound`.
81 :param exports: a list or dict of exported files and folders.
82 :param disallow: a list of :func:`~fnmatch.fnmatch` rules.
83 :param fallback_mimetype: the fallback mimetype for unknown files.
84 :param cache: enable or disable caching headers.
85 :param cache_timeout: the cache timeout in seconds for the headers.
86 """
87
88 def __init__(
89 self,
90 app,
91 exports,
92 disallow=None,
93 cache=True,
94 cache_timeout=60 * 60 * 12,
95 fallback_mimetype="text/plain",
96 ):
97 self.app = app
98 self.exports = []
99 self.cache = cache
100 self.cache_timeout = cache_timeout
101
102 if hasattr(exports, "items"):
103 exports = exports.items()
104
105 for key, value in exports:
106 if isinstance(value, tuple):
107 loader = self.get_package_loader(*value)
108 elif isinstance(value, string_types):
109 if os.path.isfile(value):
110 loader = self.get_file_loader(value)
111 else:
112 loader = self.get_directory_loader(value)
113 else:
114 raise TypeError("unknown def %r" % value)
115
116 self.exports.append((key, loader))
117
118 if disallow is not None:
119 from fnmatch import fnmatch
120
121 self.is_allowed = lambda x: not fnmatch(x, disallow)
122
123 self.fallback_mimetype = fallback_mimetype
124
125 def is_allowed(self, filename):
126 """Subclasses can override this method to disallow the access to
127 certain files. However by providing `disallow` in the constructor
128 this method is overwritten.
129 """
130 return True
131
132 def _opener(self, filename):
133 return lambda: (
134 open(filename, "rb"),
135 datetime.utcfromtimestamp(os.path.getmtime(filename)),
136 int(os.path.getsize(filename)),
137 )
138
139 def get_file_loader(self, filename):
140 return lambda x: (os.path.basename(filename), self._opener(filename))
141
142 def get_package_loader(self, package, package_path):
143 loadtime = datetime.utcnow()
144 provider = pkgutil.get_loader(package)
145
146 if hasattr(provider, "get_resource_reader"):
147 # Python 3
148 reader = provider.get_resource_reader(package)
149
150 def loader(path):
151 if path is None:
152 return None, None
153
154 path = safe_join(package_path, path)
155 basename = posixpath.basename(path)
156
157 try:
158 resource = reader.open_resource(path)
159 except IOError:
160 return None, None
161
162 if isinstance(resource, BytesIO):
163 return (
164 basename,
165 lambda: (resource, loadtime, len(resource.getvalue())),
166 )
167
168 return (
169 basename,
170 lambda: (
171 resource,
172 datetime.utcfromtimestamp(os.path.getmtime(resource.name)),
173 os.path.getsize(resource.name),
174 ),
175 )
176
177 else:
178 # Python 2
179 package_filename = provider.get_filename(package)
180 is_filesystem = os.path.exists(package_filename)
181 root = os.path.join(os.path.dirname(package_filename), package_path)
182
183 def loader(path):
184 if path is None:
185 return None, None
186
187 path = safe_join(root, path)
188 basename = posixpath.basename(path)
189
190 if is_filesystem:
191 if not os.path.isfile(path):
192 return None, None
193
194 return basename, self._opener(path)
195
196 try:
197 data = provider.get_data(path)
198 except IOError:
199 return None, None
200
201 return basename, lambda: (BytesIO(data), loadtime, len(data))
202
203 return loader
204
205 def get_directory_loader(self, directory):
206 def loader(path):
207 if path is not None:
208 path = safe_join(directory, path)
209 else:
210 path = directory
211
212 if os.path.isfile(path):
213 return os.path.basename(path), self._opener(path)
214
215 return None, None
216
217 return loader
218
219 def generate_etag(self, mtime, file_size, real_filename):
220 if not isinstance(real_filename, bytes):
221 real_filename = real_filename.encode(get_filesystem_encoding())
222
223 return "wzsdm-%d-%s-%s" % (
224 mktime(mtime.timetuple()),
225 file_size,
226 adler32(real_filename) & 0xFFFFFFFF,
227 )
228
229 def __call__(self, environ, start_response):
230 path = get_path_info(environ)
231
232 if PY2:
233 path = path.encode(get_filesystem_encoding())
234
235 file_loader = None
236
237 for search_path, loader in self.exports:
238 if search_path == path:
239 real_filename, file_loader = loader(None)
240
241 if file_loader is not None:
242 break
243
244 if not search_path.endswith("/"):
245 search_path += "/"
246
247 if path.startswith(search_path):
248 real_filename, file_loader = loader(path[len(search_path) :])
249
250 if file_loader is not None:
251 break
252
253 if file_loader is None or not self.is_allowed(real_filename):
254 return self.app(environ, start_response)
255
256 guessed_type = mimetypes.guess_type(real_filename)
257 mime_type = guessed_type[0] or self.fallback_mimetype
258 f, mtime, file_size = file_loader()
259
260 headers = [("Date", http_date())]
261
262 if self.cache:
263 timeout = self.cache_timeout
264 etag = self.generate_etag(mtime, file_size, real_filename)
265 headers += [
266 ("Etag", '"%s"' % etag),
267 ("Cache-Control", "max-age=%d, public" % timeout),
268 ]
269
270 if not is_resource_modified(environ, etag, last_modified=mtime):
271 f.close()
272 start_response("304 Not Modified", headers)
273 return []
274
275 headers.append(("Expires", http_date(time() + timeout)))
276 else:
277 headers.append(("Cache-Control", "public"))
278
279 headers.extend(
280 (
281 ("Content-Type", mime_type),
282 ("Content-Length", str(file_size)),
283 ("Last-Modified", http_date(mtime)),
284 )
285 )
286 start_response("200 OK", headers)
287 return wrap_file(environ, f)
288
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/werkzeug/middleware/shared_data.py b/src/werkzeug/middleware/shared_data.py
--- a/src/werkzeug/middleware/shared_data.py
+++ b/src/werkzeug/middleware/shared_data.py
@@ -24,6 +24,7 @@
from ..http import http_date
from ..http import is_resource_modified
from ..security import safe_join
+from ..utils import get_content_type
from ..wsgi import get_path_info
from ..wsgi import wrap_file
@@ -70,19 +71,24 @@
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
- .. versionchanged:: 0.5
- The cache timeout is configurable now.
-
- .. versionadded:: 0.6
- The `fallback_mimetype` parameter was added.
-
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a list or dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
- :param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:param cache_timeout: the cache timeout in seconds for the headers.
+ :param fallback_mimetype: The fallback mimetype for unknown files.
+
+ .. versionchanged:: 1.0
+ The default ``fallback_mimetype`` is
+ ``application/octet-stream``. If a filename looks like a text
+ mimetype, the ``utf-8`` charset is added to it.
+
+ .. versionadded:: 0.6
+ Added ``fallback_mimetype``.
+
+ .. versionchanged:: 0.5
+ Added ``cache_timeout``.
"""
def __init__(
@@ -92,7 +98,7 @@
disallow=None,
cache=True,
cache_timeout=60 * 60 * 12,
- fallback_mimetype="text/plain",
+ fallback_mimetype="application/octet-stream",
):
self.app = app
self.exports = []
@@ -254,7 +260,7 @@
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
- mime_type = guessed_type[0] or self.fallback_mimetype
+ mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, "utf-8")
f, mtime, file_size = file_loader()
headers = [("Date", http_date())]
| {"golden_diff": "diff --git a/src/werkzeug/middleware/shared_data.py b/src/werkzeug/middleware/shared_data.py\n--- a/src/werkzeug/middleware/shared_data.py\n+++ b/src/werkzeug/middleware/shared_data.py\n@@ -24,6 +24,7 @@\n from ..http import http_date\n from ..http import is_resource_modified\n from ..security import safe_join\n+from ..utils import get_content_type\n from ..wsgi import get_path_info\n from ..wsgi import wrap_file\n \n@@ -70,19 +71,24 @@\n module. If it's unable to figure out the charset it will fall back\n to `fallback_mimetype`.\n \n- .. versionchanged:: 0.5\n- The cache timeout is configurable now.\n-\n- .. versionadded:: 0.6\n- The `fallback_mimetype` parameter was added.\n-\n :param app: the application to wrap. If you don't want to wrap an\n application you can pass it :exc:`NotFound`.\n :param exports: a list or dict of exported files and folders.\n :param disallow: a list of :func:`~fnmatch.fnmatch` rules.\n- :param fallback_mimetype: the fallback mimetype for unknown files.\n :param cache: enable or disable caching headers.\n :param cache_timeout: the cache timeout in seconds for the headers.\n+ :param fallback_mimetype: The fallback mimetype for unknown files.\n+\n+ .. versionchanged:: 1.0\n+ The default ``fallback_mimetype`` is\n+ ``application/octet-stream``. If a filename looks like a text\n+ mimetype, the ``utf-8`` charset is added to it.\n+\n+ .. versionadded:: 0.6\n+ Added ``fallback_mimetype``.\n+\n+ .. versionchanged:: 0.5\n+ Added ``cache_timeout``.\n \"\"\"\n \n def __init__(\n@@ -92,7 +98,7 @@\n disallow=None,\n cache=True,\n cache_timeout=60 * 60 * 12,\n- fallback_mimetype=\"text/plain\",\n+ fallback_mimetype=\"application/octet-stream\",\n ):\n self.app = app\n self.exports = []\n@@ -254,7 +260,7 @@\n return self.app(environ, start_response)\n \n guessed_type = mimetypes.guess_type(real_filename)\n- mime_type = guessed_type[0] or self.fallback_mimetype\n+ mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, \"utf-8\")\n f, mtime, file_size = file_loader()\n \n headers = [(\"Date\", http_date())]\n", "issue": "set charset in shareddatamiddleware\n# Description\r\nUTF-8 Javascript files served by the SharedDataMiddleware do not contain a charset in the `Content-Type` response header. This leads to issues of javascript interpreted as US-ASCII by browsers (https://github.com/posativ/isso/issues/607).\r\n\r\n**Current**\r\n```\r\nContent-Type: application/javascript\r\n```\r\n**Expected**\r\n```\r\nContent-Type: application/javascript; charset=UTF-8\r\n```\r\n\r\nThe mime type is deducted by using python's mimetypes module and the filename:\r\n\r\nhttps://github.com/pallets/werkzeug/blob/aa9676f5cd5ab7ee08b09f38df22ef3e3c564d87/src/werkzeug/middleware/shared_data.py#L256-L257\r\n\r\nIt is sent without appending an encoding:\r\n\r\nhttps://github.com/pallets/werkzeug/blob/aa9676f5cd5ab7ee08b09f38df22ef3e3c564d87/src/werkzeug/middleware/shared_data.py#L281\r\n\r\nOther usages seem to suggest that a call to `get_content_type` is necessary to append the charset, if applicable:\r\n\r\nhttps://github.com/pallets/werkzeug/blob/e76aac8294626e24e1075e665cbf9657b88c4301/src/werkzeug/wrappers/common_descriptors.py#L146\r\n\r\n# Possible solutions\r\nI am not familiar with the codebase, but would it be possible to call `get_content_type`? I tried to modify it myself, but I don't know where to get the encoding from inside `SharedDataMiddleware`.\r\n\r\nMy problem is solved when I hardcode 'utf-8' as charset:\r\n```python\r\n(\"Content-Type\", get_content_type(mime_type, 'utf-8'))\r\n```\n", "before_files": [{"content": "\"\"\"\nServe Shared Static Files\n=========================\n\n.. autoclass:: SharedDataMiddleware\n :members: is_allowed\n\n:copyright: 2007 Pallets\n:license: BSD-3-Clause\n\"\"\"\nimport mimetypes\nimport os\nimport pkgutil\nimport posixpath\nfrom datetime import datetime\nfrom io import BytesIO\nfrom time import mktime\nfrom time import time\nfrom zlib import adler32\n\nfrom .._compat import PY2\nfrom .._compat import string_types\nfrom ..filesystem import get_filesystem_encoding\nfrom ..http import http_date\nfrom ..http import is_resource_modified\nfrom ..security import safe_join\nfrom ..wsgi import get_path_info\nfrom ..wsgi import wrap_file\n\n\nclass SharedDataMiddleware(object):\n\n \"\"\"A WSGI middleware that provides static content for development\n environments or simple server setups. Usage is quite simple::\n\n import os\n from werkzeug.wsgi import SharedDataMiddleware\n\n app = SharedDataMiddleware(app, {\n '/static': os.path.join(os.path.dirname(__file__), 'static')\n })\n\n The contents of the folder ``./shared`` will now be available on\n ``http://example.com/shared/``. This is pretty useful during development\n because a standalone media server is not required. One can also mount\n files on the root folder and still continue to use the application because\n the shared data middleware forwards all unhandled requests to the\n application, even if the requests are below one of the shared folders.\n\n If `pkg_resources` is available you can also tell the middleware to serve\n files from package data::\n\n app = SharedDataMiddleware(app, {\n '/static': ('myapplication', 'static')\n })\n\n This will then serve the ``static`` folder in the `myapplication`\n Python package.\n\n The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`\n rules for files that are not accessible from the web. If `cache` is set to\n `False` no caching headers are sent.\n\n Currently the middleware does not support non ASCII filenames. If the\n encoding on the file system happens to be the encoding of the URI it may\n work but this could also be by accident. We strongly suggest using ASCII\n only file names for static files.\n\n The middleware will guess the mimetype using the Python `mimetype`\n module. If it's unable to figure out the charset it will fall back\n to `fallback_mimetype`.\n\n .. versionchanged:: 0.5\n The cache timeout is configurable now.\n\n .. versionadded:: 0.6\n The `fallback_mimetype` parameter was added.\n\n :param app: the application to wrap. If you don't want to wrap an\n application you can pass it :exc:`NotFound`.\n :param exports: a list or dict of exported files and folders.\n :param disallow: a list of :func:`~fnmatch.fnmatch` rules.\n :param fallback_mimetype: the fallback mimetype for unknown files.\n :param cache: enable or disable caching headers.\n :param cache_timeout: the cache timeout in seconds for the headers.\n \"\"\"\n\n def __init__(\n self,\n app,\n exports,\n disallow=None,\n cache=True,\n cache_timeout=60 * 60 * 12,\n fallback_mimetype=\"text/plain\",\n ):\n self.app = app\n self.exports = []\n self.cache = cache\n self.cache_timeout = cache_timeout\n\n if hasattr(exports, \"items\"):\n exports = exports.items()\n\n for key, value in exports:\n if isinstance(value, tuple):\n loader = self.get_package_loader(*value)\n elif isinstance(value, string_types):\n if os.path.isfile(value):\n loader = self.get_file_loader(value)\n else:\n loader = self.get_directory_loader(value)\n else:\n raise TypeError(\"unknown def %r\" % value)\n\n self.exports.append((key, loader))\n\n if disallow is not None:\n from fnmatch import fnmatch\n\n self.is_allowed = lambda x: not fnmatch(x, disallow)\n\n self.fallback_mimetype = fallback_mimetype\n\n def is_allowed(self, filename):\n \"\"\"Subclasses can override this method to disallow the access to\n certain files. However by providing `disallow` in the constructor\n this method is overwritten.\n \"\"\"\n return True\n\n def _opener(self, filename):\n return lambda: (\n open(filename, \"rb\"),\n datetime.utcfromtimestamp(os.path.getmtime(filename)),\n int(os.path.getsize(filename)),\n )\n\n def get_file_loader(self, filename):\n return lambda x: (os.path.basename(filename), self._opener(filename))\n\n def get_package_loader(self, package, package_path):\n loadtime = datetime.utcnow()\n provider = pkgutil.get_loader(package)\n\n if hasattr(provider, \"get_resource_reader\"):\n # Python 3\n reader = provider.get_resource_reader(package)\n\n def loader(path):\n if path is None:\n return None, None\n\n path = safe_join(package_path, path)\n basename = posixpath.basename(path)\n\n try:\n resource = reader.open_resource(path)\n except IOError:\n return None, None\n\n if isinstance(resource, BytesIO):\n return (\n basename,\n lambda: (resource, loadtime, len(resource.getvalue())),\n )\n\n return (\n basename,\n lambda: (\n resource,\n datetime.utcfromtimestamp(os.path.getmtime(resource.name)),\n os.path.getsize(resource.name),\n ),\n )\n\n else:\n # Python 2\n package_filename = provider.get_filename(package)\n is_filesystem = os.path.exists(package_filename)\n root = os.path.join(os.path.dirname(package_filename), package_path)\n\n def loader(path):\n if path is None:\n return None, None\n\n path = safe_join(root, path)\n basename = posixpath.basename(path)\n\n if is_filesystem:\n if not os.path.isfile(path):\n return None, None\n\n return basename, self._opener(path)\n\n try:\n data = provider.get_data(path)\n except IOError:\n return None, None\n\n return basename, lambda: (BytesIO(data), loadtime, len(data))\n\n return loader\n\n def get_directory_loader(self, directory):\n def loader(path):\n if path is not None:\n path = safe_join(directory, path)\n else:\n path = directory\n\n if os.path.isfile(path):\n return os.path.basename(path), self._opener(path)\n\n return None, None\n\n return loader\n\n def generate_etag(self, mtime, file_size, real_filename):\n if not isinstance(real_filename, bytes):\n real_filename = real_filename.encode(get_filesystem_encoding())\n\n return \"wzsdm-%d-%s-%s\" % (\n mktime(mtime.timetuple()),\n file_size,\n adler32(real_filename) & 0xFFFFFFFF,\n )\n\n def __call__(self, environ, start_response):\n path = get_path_info(environ)\n\n if PY2:\n path = path.encode(get_filesystem_encoding())\n\n file_loader = None\n\n for search_path, loader in self.exports:\n if search_path == path:\n real_filename, file_loader = loader(None)\n\n if file_loader is not None:\n break\n\n if not search_path.endswith(\"/\"):\n search_path += \"/\"\n\n if path.startswith(search_path):\n real_filename, file_loader = loader(path[len(search_path) :])\n\n if file_loader is not None:\n break\n\n if file_loader is None or not self.is_allowed(real_filename):\n return self.app(environ, start_response)\n\n guessed_type = mimetypes.guess_type(real_filename)\n mime_type = guessed_type[0] or self.fallback_mimetype\n f, mtime, file_size = file_loader()\n\n headers = [(\"Date\", http_date())]\n\n if self.cache:\n timeout = self.cache_timeout\n etag = self.generate_etag(mtime, file_size, real_filename)\n headers += [\n (\"Etag\", '\"%s\"' % etag),\n (\"Cache-Control\", \"max-age=%d, public\" % timeout),\n ]\n\n if not is_resource_modified(environ, etag, last_modified=mtime):\n f.close()\n start_response(\"304 Not Modified\", headers)\n return []\n\n headers.append((\"Expires\", http_date(time() + timeout)))\n else:\n headers.append((\"Cache-Control\", \"public\"))\n\n headers.extend(\n (\n (\"Content-Type\", mime_type),\n (\"Content-Length\", str(file_size)),\n (\"Last-Modified\", http_date(mtime)),\n )\n )\n start_response(\"200 OK\", headers)\n return wrap_file(environ, f)\n", "path": "src/werkzeug/middleware/shared_data.py"}], "after_files": [{"content": "\"\"\"\nServe Shared Static Files\n=========================\n\n.. autoclass:: SharedDataMiddleware\n :members: is_allowed\n\n:copyright: 2007 Pallets\n:license: BSD-3-Clause\n\"\"\"\nimport mimetypes\nimport os\nimport pkgutil\nimport posixpath\nfrom datetime import datetime\nfrom io import BytesIO\nfrom time import mktime\nfrom time import time\nfrom zlib import adler32\n\nfrom .._compat import PY2\nfrom .._compat import string_types\nfrom ..filesystem import get_filesystem_encoding\nfrom ..http import http_date\nfrom ..http import is_resource_modified\nfrom ..security import safe_join\nfrom ..utils import get_content_type\nfrom ..wsgi import get_path_info\nfrom ..wsgi import wrap_file\n\n\nclass SharedDataMiddleware(object):\n\n \"\"\"A WSGI middleware that provides static content for development\n environments or simple server setups. Usage is quite simple::\n\n import os\n from werkzeug.wsgi import SharedDataMiddleware\n\n app = SharedDataMiddleware(app, {\n '/static': os.path.join(os.path.dirname(__file__), 'static')\n })\n\n The contents of the folder ``./shared`` will now be available on\n ``http://example.com/shared/``. This is pretty useful during development\n because a standalone media server is not required. One can also mount\n files on the root folder and still continue to use the application because\n the shared data middleware forwards all unhandled requests to the\n application, even if the requests are below one of the shared folders.\n\n If `pkg_resources` is available you can also tell the middleware to serve\n files from package data::\n\n app = SharedDataMiddleware(app, {\n '/static': ('myapplication', 'static')\n })\n\n This will then serve the ``static`` folder in the `myapplication`\n Python package.\n\n The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`\n rules for files that are not accessible from the web. If `cache` is set to\n `False` no caching headers are sent.\n\n Currently the middleware does not support non ASCII filenames. If the\n encoding on the file system happens to be the encoding of the URI it may\n work but this could also be by accident. We strongly suggest using ASCII\n only file names for static files.\n\n The middleware will guess the mimetype using the Python `mimetype`\n module. If it's unable to figure out the charset it will fall back\n to `fallback_mimetype`.\n\n :param app: the application to wrap. If you don't want to wrap an\n application you can pass it :exc:`NotFound`.\n :param exports: a list or dict of exported files and folders.\n :param disallow: a list of :func:`~fnmatch.fnmatch` rules.\n :param cache: enable or disable caching headers.\n :param cache_timeout: the cache timeout in seconds for the headers.\n :param fallback_mimetype: The fallback mimetype for unknown files.\n\n .. versionchanged:: 1.0\n The default ``fallback_mimetype`` is\n ``application/octet-stream``. If a filename looks like a text\n mimetype, the ``utf-8`` charset is added to it.\n\n .. versionadded:: 0.6\n Added ``fallback_mimetype``.\n\n .. versionchanged:: 0.5\n Added ``cache_timeout``.\n \"\"\"\n\n def __init__(\n self,\n app,\n exports,\n disallow=None,\n cache=True,\n cache_timeout=60 * 60 * 12,\n fallback_mimetype=\"application/octet-stream\",\n ):\n self.app = app\n self.exports = []\n self.cache = cache\n self.cache_timeout = cache_timeout\n\n if hasattr(exports, \"items\"):\n exports = exports.items()\n\n for key, value in exports:\n if isinstance(value, tuple):\n loader = self.get_package_loader(*value)\n elif isinstance(value, string_types):\n if os.path.isfile(value):\n loader = self.get_file_loader(value)\n else:\n loader = self.get_directory_loader(value)\n else:\n raise TypeError(\"unknown def %r\" % value)\n\n self.exports.append((key, loader))\n\n if disallow is not None:\n from fnmatch import fnmatch\n\n self.is_allowed = lambda x: not fnmatch(x, disallow)\n\n self.fallback_mimetype = fallback_mimetype\n\n def is_allowed(self, filename):\n \"\"\"Subclasses can override this method to disallow the access to\n certain files. However by providing `disallow` in the constructor\n this method is overwritten.\n \"\"\"\n return True\n\n def _opener(self, filename):\n return lambda: (\n open(filename, \"rb\"),\n datetime.utcfromtimestamp(os.path.getmtime(filename)),\n int(os.path.getsize(filename)),\n )\n\n def get_file_loader(self, filename):\n return lambda x: (os.path.basename(filename), self._opener(filename))\n\n def get_package_loader(self, package, package_path):\n loadtime = datetime.utcnow()\n provider = pkgutil.get_loader(package)\n\n if hasattr(provider, \"get_resource_reader\"):\n # Python 3\n reader = provider.get_resource_reader(package)\n\n def loader(path):\n if path is None:\n return None, None\n\n path = safe_join(package_path, path)\n basename = posixpath.basename(path)\n\n try:\n resource = reader.open_resource(path)\n except IOError:\n return None, None\n\n if isinstance(resource, BytesIO):\n return (\n basename,\n lambda: (resource, loadtime, len(resource.getvalue())),\n )\n\n return (\n basename,\n lambda: (\n resource,\n datetime.utcfromtimestamp(os.path.getmtime(resource.name)),\n os.path.getsize(resource.name),\n ),\n )\n\n else:\n # Python 2\n package_filename = provider.get_filename(package)\n is_filesystem = os.path.exists(package_filename)\n root = os.path.join(os.path.dirname(package_filename), package_path)\n\n def loader(path):\n if path is None:\n return None, None\n\n path = safe_join(root, path)\n basename = posixpath.basename(path)\n\n if is_filesystem:\n if not os.path.isfile(path):\n return None, None\n\n return basename, self._opener(path)\n\n try:\n data = provider.get_data(path)\n except IOError:\n return None, None\n\n return basename, lambda: (BytesIO(data), loadtime, len(data))\n\n return loader\n\n def get_directory_loader(self, directory):\n def loader(path):\n if path is not None:\n path = safe_join(directory, path)\n else:\n path = directory\n\n if os.path.isfile(path):\n return os.path.basename(path), self._opener(path)\n\n return None, None\n\n return loader\n\n def generate_etag(self, mtime, file_size, real_filename):\n if not isinstance(real_filename, bytes):\n real_filename = real_filename.encode(get_filesystem_encoding())\n\n return \"wzsdm-%d-%s-%s\" % (\n mktime(mtime.timetuple()),\n file_size,\n adler32(real_filename) & 0xFFFFFFFF,\n )\n\n def __call__(self, environ, start_response):\n path = get_path_info(environ)\n\n if PY2:\n path = path.encode(get_filesystem_encoding())\n\n file_loader = None\n\n for search_path, loader in self.exports:\n if search_path == path:\n real_filename, file_loader = loader(None)\n\n if file_loader is not None:\n break\n\n if not search_path.endswith(\"/\"):\n search_path += \"/\"\n\n if path.startswith(search_path):\n real_filename, file_loader = loader(path[len(search_path) :])\n\n if file_loader is not None:\n break\n\n if file_loader is None or not self.is_allowed(real_filename):\n return self.app(environ, start_response)\n\n guessed_type = mimetypes.guess_type(real_filename)\n mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, \"utf-8\")\n f, mtime, file_size = file_loader()\n\n headers = [(\"Date\", http_date())]\n\n if self.cache:\n timeout = self.cache_timeout\n etag = self.generate_etag(mtime, file_size, real_filename)\n headers += [\n (\"Etag\", '\"%s\"' % etag),\n (\"Cache-Control\", \"max-age=%d, public\" % timeout),\n ]\n\n if not is_resource_modified(environ, etag, last_modified=mtime):\n f.close()\n start_response(\"304 Not Modified\", headers)\n return []\n\n headers.append((\"Expires\", http_date(time() + timeout)))\n else:\n headers.append((\"Cache-Control\", \"public\"))\n\n headers.extend(\n (\n (\"Content-Type\", mime_type),\n (\"Content-Length\", str(file_size)),\n (\"Last-Modified\", http_date(mtime)),\n )\n )\n start_response(\"200 OK\", headers)\n return wrap_file(environ, f)\n", "path": "src/werkzeug/middleware/shared_data.py"}]} | 3,379 | 590 |
gh_patches_debug_38425 | rasdani/github-patches | git_diff | python-pillow__Pillow-5258 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyQt6 Support
Hi everyone!. I'm updating my app to PyQt6 and found a problem.
ImageQt.ImageQt class raises an error when try to use with QPainter:
```pycon
>>> page_image = ImageQt.ImageQt(page)
>>> painter = QtGui.QPainter()
>>> painter.begin(page_image)
TypeError: begin(self, QPaintDevice): argument 1 has unexpected type 'ImageQt'
```
Regards
Python 3.8.6
Ubuntu 20.04
PyQt6 6.0.1
Pillow 8.1.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/PIL/ImageQt.py`
Content:
```
1 #
2 # The Python Imaging Library.
3 # $Id$
4 #
5 # a simple Qt image interface.
6 #
7 # history:
8 # 2006-06-03 fl: created
9 # 2006-06-04 fl: inherit from QImage instead of wrapping it
10 # 2006-06-05 fl: removed toimage helper; move string support to ImageQt
11 # 2013-11-13 fl: add support for Qt5 ([email protected])
12 #
13 # Copyright (c) 2006 by Secret Labs AB
14 # Copyright (c) 2006 by Fredrik Lundh
15 #
16 # See the README file for information on usage and redistribution.
17 #
18
19 import sys
20 from io import BytesIO
21
22 from . import Image
23 from ._util import isPath
24
25 qt_versions = [
26 ["side6", "PySide6"],
27 ["5", "PyQt5"],
28 ["side2", "PySide2"],
29 ]
30
31 # If a version has already been imported, attempt it first
32 qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True)
33 for qt_version, qt_module in qt_versions:
34 try:
35 if qt_module == "PySide6":
36 from PySide6.QtCore import QBuffer, QIODevice
37 from PySide6.QtGui import QImage, QPixmap, qRgba
38 elif qt_module == "PyQt5":
39 from PyQt5.QtCore import QBuffer, QIODevice
40 from PyQt5.QtGui import QImage, QPixmap, qRgba
41 elif qt_module == "PySide2":
42 from PySide2.QtCore import QBuffer, QIODevice
43 from PySide2.QtGui import QImage, QPixmap, qRgba
44 except (ImportError, RuntimeError):
45 continue
46 qt_is_installed = True
47 break
48 else:
49 qt_is_installed = False
50 qt_version = None
51
52
53 def rgb(r, g, b, a=255):
54 """(Internal) Turns an RGB color into a Qt compatible color integer."""
55 # use qRgb to pack the colors, and then turn the resulting long
56 # into a negative integer with the same bitpattern.
57 return qRgba(r, g, b, a) & 0xFFFFFFFF
58
59
60 def fromqimage(im):
61 """
62 :param im: A PIL Image object, or a file name
63 (given either as Python string or a PyQt string object)
64 """
65 buffer = QBuffer()
66 buffer.open(QIODevice.ReadWrite)
67 # preserve alpha channel with png
68 # otherwise ppm is more friendly with Image.open
69 if im.hasAlphaChannel():
70 im.save(buffer, "png")
71 else:
72 im.save(buffer, "ppm")
73
74 b = BytesIO()
75 b.write(buffer.data())
76 buffer.close()
77 b.seek(0)
78
79 return Image.open(b)
80
81
82 def fromqpixmap(im):
83 return fromqimage(im)
84 # buffer = QBuffer()
85 # buffer.open(QIODevice.ReadWrite)
86 # # im.save(buffer)
87 # # What if png doesn't support some image features like animation?
88 # im.save(buffer, 'ppm')
89 # bytes_io = BytesIO()
90 # bytes_io.write(buffer.data())
91 # buffer.close()
92 # bytes_io.seek(0)
93 # return Image.open(bytes_io)
94
95
96 def align8to32(bytes, width, mode):
97 """
98 converts each scanline of data from 8 bit to 32 bit aligned
99 """
100
101 bits_per_pixel = {"1": 1, "L": 8, "P": 8}[mode]
102
103 # calculate bytes per line and the extra padding if needed
104 bits_per_line = bits_per_pixel * width
105 full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)
106 bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)
107
108 extra_padding = -bytes_per_line % 4
109
110 # already 32 bit aligned by luck
111 if not extra_padding:
112 return bytes
113
114 new_data = []
115 for i in range(len(bytes) // bytes_per_line):
116 new_data.append(
117 bytes[i * bytes_per_line : (i + 1) * bytes_per_line]
118 + b"\x00" * extra_padding
119 )
120
121 return b"".join(new_data)
122
123
124 def _toqclass_helper(im):
125 data = None
126 colortable = None
127
128 # handle filename, if given instead of image name
129 if hasattr(im, "toUtf8"):
130 # FIXME - is this really the best way to do this?
131 im = str(im.toUtf8(), "utf-8")
132 if isPath(im):
133 im = Image.open(im)
134
135 if im.mode == "1":
136 format = QImage.Format_Mono
137 elif im.mode == "L":
138 format = QImage.Format_Indexed8
139 colortable = []
140 for i in range(256):
141 colortable.append(rgb(i, i, i))
142 elif im.mode == "P":
143 format = QImage.Format_Indexed8
144 colortable = []
145 palette = im.getpalette()
146 for i in range(0, len(palette), 3):
147 colortable.append(rgb(*palette[i : i + 3]))
148 elif im.mode == "RGB":
149 data = im.tobytes("raw", "BGRX")
150 format = QImage.Format_RGB32
151 elif im.mode == "RGBA":
152 data = im.tobytes("raw", "BGRA")
153 format = QImage.Format_ARGB32
154 else:
155 raise ValueError(f"unsupported image mode {repr(im.mode)}")
156
157 __data = data or align8to32(im.tobytes(), im.size[0], im.mode)
158 return {"data": __data, "im": im, "format": format, "colortable": colortable}
159
160
161 if qt_is_installed:
162
163 class ImageQt(QImage):
164 def __init__(self, im):
165 """
166 An PIL image wrapper for Qt. This is a subclass of PyQt's QImage
167 class.
168
169 :param im: A PIL Image object, or a file name (given either as
170 Python string or a PyQt string object).
171 """
172 im_data = _toqclass_helper(im)
173 # must keep a reference, or Qt will crash!
174 # All QImage constructors that take data operate on an existing
175 # buffer, so this buffer has to hang on for the life of the image.
176 # Fixes https://github.com/python-pillow/Pillow/issues/1370
177 self.__data = im_data["data"]
178 super().__init__(
179 self.__data,
180 im_data["im"].size[0],
181 im_data["im"].size[1],
182 im_data["format"],
183 )
184 if im_data["colortable"]:
185 self.setColorTable(im_data["colortable"])
186
187
188 def toqimage(im):
189 return ImageQt(im)
190
191
192 def toqpixmap(im):
193 # # This doesn't work. For now using a dumb approach.
194 # im_data = _toqclass_helper(im)
195 # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])
196 # result.loadFromData(im_data['data'])
197 # Fix some strange bug that causes
198 if im.mode == "RGB":
199 im = im.convert("RGBA")
200
201 qimage = toqimage(im)
202 return QPixmap.fromImage(qimage)
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/PIL/ImageQt.py b/src/PIL/ImageQt.py
--- a/src/PIL/ImageQt.py
+++ b/src/PIL/ImageQt.py
@@ -23,6 +23,7 @@
from ._util import isPath
qt_versions = [
+ ["6", "PyQt6"],
["side6", "PySide6"],
["5", "PyQt5"],
["side2", "PySide2"],
@@ -32,7 +33,10 @@
qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True)
for qt_version, qt_module in qt_versions:
try:
- if qt_module == "PySide6":
+ if qt_module == "PyQt6":
+ from PyQt6.QtCore import QBuffer, QIODevice
+ from PyQt6.QtGui import QImage, QPixmap, qRgba
+ elif qt_module == "PySide6":
from PySide6.QtCore import QBuffer, QIODevice
from PySide6.QtGui import QImage, QPixmap, qRgba
elif qt_module == "PyQt5":
@@ -63,7 +67,8 @@
(given either as Python string or a PyQt string object)
"""
buffer = QBuffer()
- buffer.open(QIODevice.ReadWrite)
+ qt_openmode = QIODevice.OpenMode if qt_version == "6" else QIODevice
+ buffer.open(qt_openmode.ReadWrite)
# preserve alpha channel with png
# otherwise ppm is more friendly with Image.open
if im.hasAlphaChannel():
@@ -132,25 +137,26 @@
if isPath(im):
im = Image.open(im)
+ qt_format = QImage.Format if qt_version == "6" else QImage
if im.mode == "1":
- format = QImage.Format_Mono
+ format = qt_format.Format_Mono
elif im.mode == "L":
- format = QImage.Format_Indexed8
+ format = qt_format.Format_Indexed8
colortable = []
for i in range(256):
colortable.append(rgb(i, i, i))
elif im.mode == "P":
- format = QImage.Format_Indexed8
+ format = qt_format.Format_Indexed8
colortable = []
palette = im.getpalette()
for i in range(0, len(palette), 3):
colortable.append(rgb(*palette[i : i + 3]))
elif im.mode == "RGB":
data = im.tobytes("raw", "BGRX")
- format = QImage.Format_RGB32
+ format = qt_format.Format_RGB32
elif im.mode == "RGBA":
data = im.tobytes("raw", "BGRA")
- format = QImage.Format_ARGB32
+ format = qt_format.Format_ARGB32
else:
raise ValueError(f"unsupported image mode {repr(im.mode)}")
| {"golden_diff": "diff --git a/src/PIL/ImageQt.py b/src/PIL/ImageQt.py\n--- a/src/PIL/ImageQt.py\n+++ b/src/PIL/ImageQt.py\n@@ -23,6 +23,7 @@\n from ._util import isPath\n \n qt_versions = [\n+ [\"6\", \"PyQt6\"],\n [\"side6\", \"PySide6\"],\n [\"5\", \"PyQt5\"],\n [\"side2\", \"PySide2\"],\n@@ -32,7 +33,10 @@\n qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True)\n for qt_version, qt_module in qt_versions:\n try:\n- if qt_module == \"PySide6\":\n+ if qt_module == \"PyQt6\":\n+ from PyQt6.QtCore import QBuffer, QIODevice\n+ from PyQt6.QtGui import QImage, QPixmap, qRgba\n+ elif qt_module == \"PySide6\":\n from PySide6.QtCore import QBuffer, QIODevice\n from PySide6.QtGui import QImage, QPixmap, qRgba\n elif qt_module == \"PyQt5\":\n@@ -63,7 +67,8 @@\n (given either as Python string or a PyQt string object)\n \"\"\"\n buffer = QBuffer()\n- buffer.open(QIODevice.ReadWrite)\n+ qt_openmode = QIODevice.OpenMode if qt_version == \"6\" else QIODevice\n+ buffer.open(qt_openmode.ReadWrite)\n # preserve alpha channel with png\n # otherwise ppm is more friendly with Image.open\n if im.hasAlphaChannel():\n@@ -132,25 +137,26 @@\n if isPath(im):\n im = Image.open(im)\n \n+ qt_format = QImage.Format if qt_version == \"6\" else QImage\n if im.mode == \"1\":\n- format = QImage.Format_Mono\n+ format = qt_format.Format_Mono\n elif im.mode == \"L\":\n- format = QImage.Format_Indexed8\n+ format = qt_format.Format_Indexed8\n colortable = []\n for i in range(256):\n colortable.append(rgb(i, i, i))\n elif im.mode == \"P\":\n- format = QImage.Format_Indexed8\n+ format = qt_format.Format_Indexed8\n colortable = []\n palette = im.getpalette()\n for i in range(0, len(palette), 3):\n colortable.append(rgb(*palette[i : i + 3]))\n elif im.mode == \"RGB\":\n data = im.tobytes(\"raw\", \"BGRX\")\n- format = QImage.Format_RGB32\n+ format = qt_format.Format_RGB32\n elif im.mode == \"RGBA\":\n data = im.tobytes(\"raw\", \"BGRA\")\n- format = QImage.Format_ARGB32\n+ format = qt_format.Format_ARGB32\n else:\n raise ValueError(f\"unsupported image mode {repr(im.mode)}\")\n", "issue": "PyQt6 Support\nHi everyone!. I'm updating my app to PyQt6 and found a problem.\r\n\r\nImageQt.ImageQt class raises an error when try to use with QPainter:\r\n\r\n```pycon\r\n>>> page_image = ImageQt.ImageQt(page)\r\n>>> painter = QtGui.QPainter()\r\n>>> painter.begin(page_image)\r\nTypeError: begin(self, QPaintDevice): argument 1 has unexpected type 'ImageQt'\r\n```\r\n\r\nRegards\r\n\r\nPython 3.8.6\r\nUbuntu 20.04\r\nPyQt6 6.0.1\r\nPillow 8.1.0\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# a simple Qt image interface.\n#\n# history:\n# 2006-06-03 fl: created\n# 2006-06-04 fl: inherit from QImage instead of wrapping it\n# 2006-06-05 fl: removed toimage helper; move string support to ImageQt\n# 2013-11-13 fl: add support for Qt5 ([email protected])\n#\n# Copyright (c) 2006 by Secret Labs AB\n# Copyright (c) 2006 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport sys\nfrom io import BytesIO\n\nfrom . import Image\nfrom ._util import isPath\n\nqt_versions = [\n [\"side6\", \"PySide6\"],\n [\"5\", \"PyQt5\"],\n [\"side2\", \"PySide2\"],\n]\n\n# If a version has already been imported, attempt it first\nqt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True)\nfor qt_version, qt_module in qt_versions:\n try:\n if qt_module == \"PySide6\":\n from PySide6.QtCore import QBuffer, QIODevice\n from PySide6.QtGui import QImage, QPixmap, qRgba\n elif qt_module == \"PyQt5\":\n from PyQt5.QtCore import QBuffer, QIODevice\n from PyQt5.QtGui import QImage, QPixmap, qRgba\n elif qt_module == \"PySide2\":\n from PySide2.QtCore import QBuffer, QIODevice\n from PySide2.QtGui import QImage, QPixmap, qRgba\n except (ImportError, RuntimeError):\n continue\n qt_is_installed = True\n break\nelse:\n qt_is_installed = False\n qt_version = None\n\n\ndef rgb(r, g, b, a=255):\n \"\"\"(Internal) Turns an RGB color into a Qt compatible color integer.\"\"\"\n # use qRgb to pack the colors, and then turn the resulting long\n # into a negative integer with the same bitpattern.\n return qRgba(r, g, b, a) & 0xFFFFFFFF\n\n\ndef fromqimage(im):\n \"\"\"\n :param im: A PIL Image object, or a file name\n (given either as Python string or a PyQt string object)\n \"\"\"\n buffer = QBuffer()\n buffer.open(QIODevice.ReadWrite)\n # preserve alpha channel with png\n # otherwise ppm is more friendly with Image.open\n if im.hasAlphaChannel():\n im.save(buffer, \"png\")\n else:\n im.save(buffer, \"ppm\")\n\n b = BytesIO()\n b.write(buffer.data())\n buffer.close()\n b.seek(0)\n\n return Image.open(b)\n\n\ndef fromqpixmap(im):\n return fromqimage(im)\n # buffer = QBuffer()\n # buffer.open(QIODevice.ReadWrite)\n # # im.save(buffer)\n # # What if png doesn't support some image features like animation?\n # im.save(buffer, 'ppm')\n # bytes_io = BytesIO()\n # bytes_io.write(buffer.data())\n # buffer.close()\n # bytes_io.seek(0)\n # return Image.open(bytes_io)\n\n\ndef align8to32(bytes, width, mode):\n \"\"\"\n converts each scanline of data from 8 bit to 32 bit aligned\n \"\"\"\n\n bits_per_pixel = {\"1\": 1, \"L\": 8, \"P\": 8}[mode]\n\n # calculate bytes per line and the extra padding if needed\n bits_per_line = bits_per_pixel * width\n full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)\n bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)\n\n extra_padding = -bytes_per_line % 4\n\n # already 32 bit aligned by luck\n if not extra_padding:\n return bytes\n\n new_data = []\n for i in range(len(bytes) // bytes_per_line):\n new_data.append(\n bytes[i * bytes_per_line : (i + 1) * bytes_per_line]\n + b\"\\x00\" * extra_padding\n )\n\n return b\"\".join(new_data)\n\n\ndef _toqclass_helper(im):\n data = None\n colortable = None\n\n # handle filename, if given instead of image name\n if hasattr(im, \"toUtf8\"):\n # FIXME - is this really the best way to do this?\n im = str(im.toUtf8(), \"utf-8\")\n if isPath(im):\n im = Image.open(im)\n\n if im.mode == \"1\":\n format = QImage.Format_Mono\n elif im.mode == \"L\":\n format = QImage.Format_Indexed8\n colortable = []\n for i in range(256):\n colortable.append(rgb(i, i, i))\n elif im.mode == \"P\":\n format = QImage.Format_Indexed8\n colortable = []\n palette = im.getpalette()\n for i in range(0, len(palette), 3):\n colortable.append(rgb(*palette[i : i + 3]))\n elif im.mode == \"RGB\":\n data = im.tobytes(\"raw\", \"BGRX\")\n format = QImage.Format_RGB32\n elif im.mode == \"RGBA\":\n data = im.tobytes(\"raw\", \"BGRA\")\n format = QImage.Format_ARGB32\n else:\n raise ValueError(f\"unsupported image mode {repr(im.mode)}\")\n\n __data = data or align8to32(im.tobytes(), im.size[0], im.mode)\n return {\"data\": __data, \"im\": im, \"format\": format, \"colortable\": colortable}\n\n\nif qt_is_installed:\n\n class ImageQt(QImage):\n def __init__(self, im):\n \"\"\"\n An PIL image wrapper for Qt. This is a subclass of PyQt's QImage\n class.\n\n :param im: A PIL Image object, or a file name (given either as\n Python string or a PyQt string object).\n \"\"\"\n im_data = _toqclass_helper(im)\n # must keep a reference, or Qt will crash!\n # All QImage constructors that take data operate on an existing\n # buffer, so this buffer has to hang on for the life of the image.\n # Fixes https://github.com/python-pillow/Pillow/issues/1370\n self.__data = im_data[\"data\"]\n super().__init__(\n self.__data,\n im_data[\"im\"].size[0],\n im_data[\"im\"].size[1],\n im_data[\"format\"],\n )\n if im_data[\"colortable\"]:\n self.setColorTable(im_data[\"colortable\"])\n\n\ndef toqimage(im):\n return ImageQt(im)\n\n\ndef toqpixmap(im):\n # # This doesn't work. For now using a dumb approach.\n # im_data = _toqclass_helper(im)\n # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])\n # result.loadFromData(im_data['data'])\n # Fix some strange bug that causes\n if im.mode == \"RGB\":\n im = im.convert(\"RGBA\")\n\n qimage = toqimage(im)\n return QPixmap.fromImage(qimage)\n", "path": "src/PIL/ImageQt.py"}], "after_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# a simple Qt image interface.\n#\n# history:\n# 2006-06-03 fl: created\n# 2006-06-04 fl: inherit from QImage instead of wrapping it\n# 2006-06-05 fl: removed toimage helper; move string support to ImageQt\n# 2013-11-13 fl: add support for Qt5 ([email protected])\n#\n# Copyright (c) 2006 by Secret Labs AB\n# Copyright (c) 2006 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport sys\nfrom io import BytesIO\n\nfrom . import Image\nfrom ._util import isPath\n\nqt_versions = [\n [\"6\", \"PyQt6\"],\n [\"side6\", \"PySide6\"],\n [\"5\", \"PyQt5\"],\n [\"side2\", \"PySide2\"],\n]\n\n# If a version has already been imported, attempt it first\nqt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True)\nfor qt_version, qt_module in qt_versions:\n try:\n if qt_module == \"PyQt6\":\n from PyQt6.QtCore import QBuffer, QIODevice\n from PyQt6.QtGui import QImage, QPixmap, qRgba\n elif qt_module == \"PySide6\":\n from PySide6.QtCore import QBuffer, QIODevice\n from PySide6.QtGui import QImage, QPixmap, qRgba\n elif qt_module == \"PyQt5\":\n from PyQt5.QtCore import QBuffer, QIODevice\n from PyQt5.QtGui import QImage, QPixmap, qRgba\n elif qt_module == \"PySide2\":\n from PySide2.QtCore import QBuffer, QIODevice\n from PySide2.QtGui import QImage, QPixmap, qRgba\n except (ImportError, RuntimeError):\n continue\n qt_is_installed = True\n break\nelse:\n qt_is_installed = False\n qt_version = None\n\n\ndef rgb(r, g, b, a=255):\n \"\"\"(Internal) Turns an RGB color into a Qt compatible color integer.\"\"\"\n # use qRgb to pack the colors, and then turn the resulting long\n # into a negative integer with the same bitpattern.\n return qRgba(r, g, b, a) & 0xFFFFFFFF\n\n\ndef fromqimage(im):\n \"\"\"\n :param im: A PIL Image object, or a file name\n (given either as Python string or a PyQt string object)\n \"\"\"\n buffer = QBuffer()\n qt_openmode = QIODevice.OpenMode if qt_version == \"6\" else QIODevice\n buffer.open(qt_openmode.ReadWrite)\n # preserve alpha channel with png\n # otherwise ppm is more friendly with Image.open\n if im.hasAlphaChannel():\n im.save(buffer, \"png\")\n else:\n im.save(buffer, \"ppm\")\n\n b = BytesIO()\n b.write(buffer.data())\n buffer.close()\n b.seek(0)\n\n return Image.open(b)\n\n\ndef fromqpixmap(im):\n return fromqimage(im)\n # buffer = QBuffer()\n # buffer.open(QIODevice.ReadWrite)\n # # im.save(buffer)\n # # What if png doesn't support some image features like animation?\n # im.save(buffer, 'ppm')\n # bytes_io = BytesIO()\n # bytes_io.write(buffer.data())\n # buffer.close()\n # bytes_io.seek(0)\n # return Image.open(bytes_io)\n\n\ndef align8to32(bytes, width, mode):\n \"\"\"\n converts each scanline of data from 8 bit to 32 bit aligned\n \"\"\"\n\n bits_per_pixel = {\"1\": 1, \"L\": 8, \"P\": 8}[mode]\n\n # calculate bytes per line and the extra padding if needed\n bits_per_line = bits_per_pixel * width\n full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)\n bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)\n\n extra_padding = -bytes_per_line % 4\n\n # already 32 bit aligned by luck\n if not extra_padding:\n return bytes\n\n new_data = []\n for i in range(len(bytes) // bytes_per_line):\n new_data.append(\n bytes[i * bytes_per_line : (i + 1) * bytes_per_line]\n + b\"\\x00\" * extra_padding\n )\n\n return b\"\".join(new_data)\n\n\ndef _toqclass_helper(im):\n data = None\n colortable = None\n\n # handle filename, if given instead of image name\n if hasattr(im, \"toUtf8\"):\n # FIXME - is this really the best way to do this?\n im = str(im.toUtf8(), \"utf-8\")\n if isPath(im):\n im = Image.open(im)\n\n qt_format = QImage.Format if qt_version == \"6\" else QImage\n if im.mode == \"1\":\n format = qt_format.Format_Mono\n elif im.mode == \"L\":\n format = qt_format.Format_Indexed8\n colortable = []\n for i in range(256):\n colortable.append(rgb(i, i, i))\n elif im.mode == \"P\":\n format = qt_format.Format_Indexed8\n colortable = []\n palette = im.getpalette()\n for i in range(0, len(palette), 3):\n colortable.append(rgb(*palette[i : i + 3]))\n elif im.mode == \"RGB\":\n data = im.tobytes(\"raw\", \"BGRX\")\n format = qt_format.Format_RGB32\n elif im.mode == \"RGBA\":\n data = im.tobytes(\"raw\", \"BGRA\")\n format = qt_format.Format_ARGB32\n else:\n raise ValueError(f\"unsupported image mode {repr(im.mode)}\")\n\n __data = data or align8to32(im.tobytes(), im.size[0], im.mode)\n return {\"data\": __data, \"im\": im, \"format\": format, \"colortable\": colortable}\n\n\nif qt_is_installed:\n\n class ImageQt(QImage):\n def __init__(self, im):\n \"\"\"\n An PIL image wrapper for Qt. This is a subclass of PyQt's QImage\n class.\n\n :param im: A PIL Image object, or a file name (given either as\n Python string or a PyQt string object).\n \"\"\"\n im_data = _toqclass_helper(im)\n # must keep a reference, or Qt will crash!\n # All QImage constructors that take data operate on an existing\n # buffer, so this buffer has to hang on for the life of the image.\n # Fixes https://github.com/python-pillow/Pillow/issues/1370\n self.__data = im_data[\"data\"]\n super().__init__(\n self.__data,\n im_data[\"im\"].size[0],\n im_data[\"im\"].size[1],\n im_data[\"format\"],\n )\n if im_data[\"colortable\"]:\n self.setColorTable(im_data[\"colortable\"])\n\n\ndef toqimage(im):\n return ImageQt(im)\n\n\ndef toqpixmap(im):\n # # This doesn't work. For now using a dumb approach.\n # im_data = _toqclass_helper(im)\n # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])\n # result.loadFromData(im_data['data'])\n # Fix some strange bug that causes\n if im.mode == \"RGB\":\n im = im.convert(\"RGBA\")\n\n qimage = toqimage(im)\n return QPixmap.fromImage(qimage)\n", "path": "src/PIL/ImageQt.py"}]} | 2,529 | 656 |
gh_patches_debug_34003 | rasdani/github-patches | git_diff | UTNkar__moore-191 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
List of Superusers
<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->
### Description
Making it possible to sort after lvel under /admin/users/ so admins shows up and you get a proper overview.
Or: Make a list of users with superuser access to the website
### Steps to Reproduce
1. [First Step]
2. [Second Step]
3. [and so on...]
<!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/members/wagtail_hooks.py`
Content:
```
1 from django.forms import CheckboxSelectMultiple
2 from wagtail.contrib.modeladmin.options import ModelAdmin, ModelAdminGroup, \
3 modeladmin_register
4 from wagtail.contrib.modeladmin.views import EditView, CreateView
5 from wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList, \
6 FieldPanel
7
8 from members.models import StudyProgram, Section
9 from django.utils.translation import ugettext_lazy as _
10
11
12 class StudyProgramEditHandler:
13 def get_edit_handler_class(self):
14 edit_handler = TabbedInterface([
15 ObjectList([
16 FieldPanel('name_en'),
17 FieldPanel('name_sv'),
18 FieldPanel('degree'),
19 ], heading=_('General'),
20 ),
21 # TODO: http://stackoverflow.com/questions/43188124/
22 # ObjectList([
23 # FieldPanel('sections', widget=CheckboxSelectMultiple),
24 # ], heading=_('Sections'),
25 # ),
26 ])
27 return edit_handler.bind_to_model(self.model)
28
29
30 class StudyProgramEditView(StudyProgramEditHandler, EditView):
31 pass
32
33
34 class StudyProgramCreateView(StudyProgramEditHandler, CreateView):
35 pass
36
37
38 class StudyProgramAdmin(ModelAdmin):
39 model = StudyProgram
40 menu_label = _('Study Program')
41 menu_icon = 'fa-graduation-cap'
42 menu_order = 510
43 add_to_settings_menu = False
44 create_view_class = StudyProgramCreateView
45 edit_view_class = StudyProgramEditView
46 list_display = ('degree', 'name_en', 'name_sv')
47 search_fields = ('name_en', 'name_sv')
48
49
50 class SectionEditHandler:
51 def get_edit_handler_class(self):
52 edit_handler = TabbedInterface([
53 ObjectList([
54 FieldPanel('name_en'),
55 FieldPanel('name_sv'),
56 FieldPanel('abbreviation'),
57 ], heading=_('General'), ),
58 ObjectList([
59 FieldPanel('studies', widget=CheckboxSelectMultiple),
60 ], heading=_('Studies'), ),
61 ])
62 return edit_handler.bind_to_model(self.model)
63
64
65 class SectionEditView(SectionEditHandler, EditView):
66 pass
67
68
69 class SectionCreateView(SectionEditHandler, CreateView):
70 pass
71
72
73 class SectionAdmin(ModelAdmin):
74 model = Section
75 menu_label = _('Sections')
76 menu_icon = 'fa-eye'
77 menu_order = 520
78 add_to_settings_menu = False
79 create_view_class = SectionCreateView
80 edit_view_class = SectionEditView
81 list_display = ('abbreviation', 'name_en', 'name_sv')
82 search_fields = ('name_en', 'name_sv', 'abbreviation')
83
84
85 class EducationAdminGroup(ModelAdminGroup):
86 menu_label = _('Education')
87 menu_icon = 'fa-university'
88 menu_order = 450
89 items = (StudyProgramAdmin, SectionAdmin)
90
91
92 modeladmin_register(EducationAdminGroup)
93
```
Path: `src/members/models.py`
Content:
```
1 import requests
2 from django.conf import settings
3 from django.contrib.auth.models import AbstractUser
4 from django.core import validators
5 from django.db import models
6 from django.db.models import ManyToManyField
7 from django.utils import timezone
8 from django.utils.translation import ugettext_lazy as _
9 from requests.auth import HTTPDigestAuth
10 from simple_email_confirmation.models import SimpleEmailConfirmationUserMixin
11
12 from utils.translation import TranslatedField
13
14
15 class StudyProgram(models.Model):
16 """This class describes a university study program"""
17
18 class Meta:
19 verbose_name = _('study program')
20 verbose_name_plural = _('study programs')
21
22 DEGREE_CHOICES = (
23 ('bsc', _('Bachelor of Science')),
24 ('msc', _('Master of Science')),
25 ('be', _('Bachelor of Engineering')),
26 ('msceng', _('Master of Science in Engineering')),
27 )
28
29 name_en = models.CharField(
30 max_length=255,
31 verbose_name=_('English program name'),
32 help_text=_('Enter the name of the study program'),
33 blank=False,
34 )
35
36 name_sv = models.CharField(
37 max_length=255,
38 verbose_name=_('Swedish program name'),
39 help_text=_('Enter the name of the study program'),
40 blank=False,
41 )
42
43 name = TranslatedField('name_en', 'name_sv')
44
45 degree = models.CharField(
46 max_length=20,
47 choices=DEGREE_CHOICES,
48 verbose_name=_('Degree type'),
49 blank=True,
50 )
51
52 def __str__(self) -> str:
53 if self.degree:
54 return _('%(degree_type)s in %(study_program)s') % {
55 'degree_type': self.get_degree_display(),
56 'study_program': self.name,
57 }
58 else:
59 return self.name.__str__()
60
61
62 class Section(models.Model):
63 """This class represent a study section"""
64
65 class Meta:
66 verbose_name = _('section')
67 verbose_name_plural = _('sections')
68
69 name_en = models.CharField(
70 max_length=255,
71 verbose_name=_('English section name'),
72 help_text=_('Enter the name of the section'),
73 blank=False,
74 )
75
76 name_sv = models.CharField(
77 max_length=255,
78 verbose_name=_('Swedish section name'),
79 help_text=_('Enter the name of the section'),
80 blank=False,
81 )
82
83 name = TranslatedField('name_en', 'name_sv')
84
85 abbreviation = models.CharField(
86 max_length=130,
87 verbose_name=_('Section abbreviation'),
88 help_text=_('Enter the abbreviation for the section'),
89 blank=True,
90 )
91
92 studies = ManyToManyField(
93 StudyProgram,
94 related_name='sections',
95 blank=True,
96 )
97
98 def __str__(self) -> str:
99 if self.abbreviation:
100 return '%s - %s' % (self.abbreviation, self.name)
101 else:
102 return self.name.__str__()
103
104
105 class Member(SimpleEmailConfirmationUserMixin, AbstractUser):
106 """This class describes a member"""
107
108 # ---- Personal information ------
109
110 birthday = models.DateField(
111 verbose_name=_('Birthday'),
112 null=True
113 )
114
115 person_number_ext = models.CharField(
116 max_length=4,
117 verbose_name=_('Person number extension'),
118 help_text=_('Enter the last four digits of your Swedish person '
119 'number, given by the Swedish tax authority'),
120 validators=[validators.RegexValidator(
121 regex=r'^\d{4}$',
122 message=_('The person number extension consists of four numbers'),
123 )],
124 unique_for_date="birthday",
125 blank=True,
126 )
127
128 # ---- Membership information ------
129
130 MEMBERSHIP_CHOICES = (
131 ('unknown', _('Unknown')),
132 ('nonmember', _('Nonmember')),
133 ('member', _('Member')),
134 ('alumnus', _('Alumnus')),
135 )
136
137 status = models.CharField(
138 max_length=20,
139 choices=MEMBERSHIP_CHOICES,
140 verbose_name=_('Membership status'),
141 blank=False,
142 default='unknown'
143 )
144 status_changed = models.DateTimeField(
145 default=timezone.now,
146 null=False,
147 )
148
149 # ---- Contact information ------
150
151 phone_number = models.CharField(
152 max_length=20,
153 verbose_name=_('Phone number'),
154 help_text=_('Enter a phone number so UTN may reach you'),
155 validators=[validators.RegexValidator(
156 regex=r'^\+?\d+$',
157 message=_('Please enter a valid phone number'),
158 )],
159 blank=True,
160 )
161
162 # ---- University information ------
163
164 registration_year = models.CharField(
165 max_length=4,
166 verbose_name=_('Registration year'),
167 help_text=_('Enter the year you started studying at the TakNat '
168 'faculty'),
169 validators=[validators.RegexValidator(
170 regex=r'^\d{4}$',
171 message=_('Please enter a valid year')
172 )],
173 blank=True,
174 )
175
176 study = models.ForeignKey(
177 StudyProgram,
178 verbose_name=_('Study program'),
179 on_delete=models.SET_NULL,
180 null=True,
181 blank=True,
182 )
183
184 section = models.ForeignKey(
185 Section,
186 verbose_name=_('Member of section'),
187 on_delete=models.SET_NULL,
188 null=True,
189 blank=True,
190 )
191
192 def __str__(self) -> str:
193 if self.first_name and self.last_name:
194 return '%s %s' % (self.first_name, self.last_name)
195 else:
196 return self.username
197
198 def person_number(self) -> str:
199 if self.birthday is None or self.person_number_ext is None:
200 return ''
201 else:
202 return '%s-%s' % (self.birthday.strftime('%Y%m%d'),
203 self.person_number_ext)
204
205 def update_status(self, data=None):
206 if data is None:
207 if self.person_number() == '':
208 return
209 try:
210 r = requests.get(
211 'https://register.utn.se/api.php',
212 auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,
213 settings.MEMBERSHIP_API_PASSWORD),
214 params={
215 'action': 'check',
216 'person_number': self.person_number().replace('-', '')
217 },
218 )
219 data = r.json().get('status')
220 except requests.exceptions.ConnectionError:
221 data = 'unknown'
222 except ValueError:
223 return
224
225 if data == 'member':
226 self.status = 'member'
227 elif data == 'nonmember':
228 if self.status in ['unknown', 'nonmember']:
229 self.status = 'nonmember'
230 else:
231 self.status = 'alumnus'
232
233 self.status_changed = timezone.now()
234
235 def remove_old_email(self):
236 for email in self.get_unconfirmed_emails() or []:
237 self.remove_email(email)
238 for email in self.get_confirmed_emails():
239 if email != self.email:
240 self.remove_email(email)
241
```
Path: `src/members/signals.py`
Content:
```
1 import datetime
2 from django.conf import settings
3 from django.core.mail import EmailMultiAlternatives
4 from django.db.models.signals import pre_save
5 from django.dispatch import receiver
6 from django.template import loader
7 from django.utils import timezone
8 from simple_email_confirmation import unconfirmed_email_created
9
10 from members.models import Member
11
12
13 @receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')
14 def send_confirmation_email(sender, email, user=None, **kwargs):
15 user = user or sender
16 if user is not None:
17 context = {
18 'email': email,
19 'domain': settings.BASE_URL,
20 'site_name': settings.WAGTAIL_SITE_NAME,
21 'token': user.get_confirmation_key(email),
22 'new_user': user.get_confirmed_emails() == []
23 }
24
25 subject = loader.render_to_string(
26 'members/email_change_subject.txt', context)
27 # Email subject *must not* contain newlines
28 subject = ''.join(subject.splitlines())
29 body = loader.render_to_string('members/email_change_email.html',
30 context)
31
32 email_message = EmailMultiAlternatives(subject, body, None, [email])
33 email_message.send()
34
35
36 @receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')
37 def check_membership(sender, instance, **kwargs):
38 if timezone.now() - instance.status_changed > datetime.timedelta(1)\
39 or instance.status == 'unknown':
40 instance.update_status()
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/members/models.py b/src/members/models.py
--- a/src/members/models.py
+++ b/src/members/models.py
@@ -191,7 +191,7 @@
def __str__(self) -> str:
if self.first_name and self.last_name:
- return '%s %s' % (self.first_name, self.last_name)
+ return self.get_full_name()
else:
return self.username
diff --git a/src/members/signals.py b/src/members/signals.py
--- a/src/members/signals.py
+++ b/src/members/signals.py
@@ -12,7 +12,7 @@
@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')
def send_confirmation_email(sender, email, user=None, **kwargs):
- user = user or sender
+ user = user or sender # TODO: use user.send_email
if user is not None:
context = {
'email': email,
diff --git a/src/members/wagtail_hooks.py b/src/members/wagtail_hooks.py
--- a/src/members/wagtail_hooks.py
+++ b/src/members/wagtail_hooks.py
@@ -1,12 +1,14 @@
from django.forms import CheckboxSelectMultiple
+from django.template import loader
+from django.utils.translation import ugettext_lazy as _
from wagtail.contrib.modeladmin.options import ModelAdmin, ModelAdminGroup, \
modeladmin_register
from wagtail.contrib.modeladmin.views import EditView, CreateView
from wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList, \
FieldPanel
+from wagtail.wagtailcore import hooks
-from members.models import StudyProgram, Section
-from django.utils.translation import ugettext_lazy as _
+from members.models import StudyProgram, Section, Member
class StudyProgramEditHandler:
@@ -90,3 +92,22 @@
modeladmin_register(EducationAdminGroup)
+
+
+class SuperUserPanel(object):
+ order = 1000
+
+ def __init__(self, request):
+ self.request = request
+
+ def render(self):
+ c = {
+ 'supers': Member.objects.filter(is_superuser=True),
+ 'user': self.request.user
+ }
+ return loader.get_template('members/admin_panel.html').render(c)
+
+
[email protected]('construct_homepage_panels')
+def add_super_user_panel(request, panels):
+ return panels.append(SuperUserPanel(request))
| {"golden_diff": "diff --git a/src/members/models.py b/src/members/models.py\n--- a/src/members/models.py\n+++ b/src/members/models.py\n@@ -191,7 +191,7 @@\n \n def __str__(self) -> str:\n if self.first_name and self.last_name:\n- return '%s %s' % (self.first_name, self.last_name)\n+ return self.get_full_name()\n else:\n return self.username\n \ndiff --git a/src/members/signals.py b/src/members/signals.py\n--- a/src/members/signals.py\n+++ b/src/members/signals.py\n@@ -12,7 +12,7 @@\n \n @receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\n def send_confirmation_email(sender, email, user=None, **kwargs):\n- user = user or sender\n+ user = user or sender # TODO: use user.send_email\n if user is not None:\n context = {\n 'email': email,\ndiff --git a/src/members/wagtail_hooks.py b/src/members/wagtail_hooks.py\n--- a/src/members/wagtail_hooks.py\n+++ b/src/members/wagtail_hooks.py\n@@ -1,12 +1,14 @@\n from django.forms import CheckboxSelectMultiple\n+from django.template import loader\n+from django.utils.translation import ugettext_lazy as _\n from wagtail.contrib.modeladmin.options import ModelAdmin, ModelAdminGroup, \\\n modeladmin_register\n from wagtail.contrib.modeladmin.views import EditView, CreateView\n from wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList, \\\n FieldPanel\n+from wagtail.wagtailcore import hooks\n \n-from members.models import StudyProgram, Section\n-from django.utils.translation import ugettext_lazy as _\n+from members.models import StudyProgram, Section, Member\n \n \n class StudyProgramEditHandler:\n@@ -90,3 +92,22 @@\n \n \n modeladmin_register(EducationAdminGroup)\n+\n+\n+class SuperUserPanel(object):\n+ order = 1000\n+\n+ def __init__(self, request):\n+ self.request = request\n+\n+ def render(self):\n+ c = {\n+ 'supers': Member.objects.filter(is_superuser=True),\n+ 'user': self.request.user\n+ }\n+ return loader.get_template('members/admin_panel.html').render(c)\n+\n+\[email protected]('construct_homepage_panels')\n+def add_super_user_panel(request, panels):\n+ return panels.append(SuperUserPanel(request))\n", "issue": "List of Superusers\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->\r\n\r\n### Description\r\n\r\nMaking it possible to sort after lvel under /admin/users/ so admins shows up and you get a proper overview. \r\n\r\nOr: Make a list of users with superuser access to the website\r\n\r\n### Steps to Reproduce\r\n\r\n1. [First Step]\r\n2. [Second Step]\r\n3. [and so on...]\r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\r\n\n", "before_files": [{"content": "from django.forms import CheckboxSelectMultiple\nfrom wagtail.contrib.modeladmin.options import ModelAdmin, ModelAdminGroup, \\\n modeladmin_register\nfrom wagtail.contrib.modeladmin.views import EditView, CreateView\nfrom wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList, \\\n FieldPanel\n\nfrom members.models import StudyProgram, Section\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass StudyProgramEditHandler:\n def get_edit_handler_class(self):\n edit_handler = TabbedInterface([\n ObjectList([\n FieldPanel('name_en'),\n FieldPanel('name_sv'),\n FieldPanel('degree'),\n ], heading=_('General'),\n ),\n # TODO: http://stackoverflow.com/questions/43188124/\n # ObjectList([\n # FieldPanel('sections', widget=CheckboxSelectMultiple),\n # ], heading=_('Sections'),\n # ),\n ])\n return edit_handler.bind_to_model(self.model)\n\n\nclass StudyProgramEditView(StudyProgramEditHandler, EditView):\n pass\n\n\nclass StudyProgramCreateView(StudyProgramEditHandler, CreateView):\n pass\n\n\nclass StudyProgramAdmin(ModelAdmin):\n model = StudyProgram\n menu_label = _('Study Program')\n menu_icon = 'fa-graduation-cap'\n menu_order = 510\n add_to_settings_menu = False\n create_view_class = StudyProgramCreateView\n edit_view_class = StudyProgramEditView\n list_display = ('degree', 'name_en', 'name_sv')\n search_fields = ('name_en', 'name_sv')\n\n\nclass SectionEditHandler:\n def get_edit_handler_class(self):\n edit_handler = TabbedInterface([\n ObjectList([\n FieldPanel('name_en'),\n FieldPanel('name_sv'),\n FieldPanel('abbreviation'),\n ], heading=_('General'), ),\n ObjectList([\n FieldPanel('studies', widget=CheckboxSelectMultiple),\n ], heading=_('Studies'), ),\n ])\n return edit_handler.bind_to_model(self.model)\n\n\nclass SectionEditView(SectionEditHandler, EditView):\n pass\n\n\nclass SectionCreateView(SectionEditHandler, CreateView):\n pass\n\n\nclass SectionAdmin(ModelAdmin):\n model = Section\n menu_label = _('Sections')\n menu_icon = 'fa-eye'\n menu_order = 520\n add_to_settings_menu = False\n create_view_class = SectionCreateView\n edit_view_class = SectionEditView\n list_display = ('abbreviation', 'name_en', 'name_sv')\n search_fields = ('name_en', 'name_sv', 'abbreviation')\n\n\nclass EducationAdminGroup(ModelAdminGroup):\n menu_label = _('Education')\n menu_icon = 'fa-university'\n menu_order = 450\n items = (StudyProgramAdmin, SectionAdmin)\n\n\nmodeladmin_register(EducationAdminGroup)\n", "path": "src/members/wagtail_hooks.py"}, {"content": "import requests\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core import validators\nfrom django.db import models\nfrom django.db.models import ManyToManyField\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom requests.auth import HTTPDigestAuth\nfrom simple_email_confirmation.models import SimpleEmailConfirmationUserMixin\n\nfrom utils.translation import TranslatedField\n\n\nclass StudyProgram(models.Model):\n \"\"\"This class describes a university study program\"\"\"\n\n class Meta:\n verbose_name = _('study program')\n verbose_name_plural = _('study programs')\n\n DEGREE_CHOICES = (\n ('bsc', _('Bachelor of Science')),\n ('msc', _('Master of Science')),\n ('be', _('Bachelor of Engineering')),\n ('msceng', _('Master of Science in Engineering')),\n )\n\n name_en = models.CharField(\n max_length=255,\n verbose_name=_('English program name'),\n help_text=_('Enter the name of the study program'),\n blank=False,\n )\n\n name_sv = models.CharField(\n max_length=255,\n verbose_name=_('Swedish program name'),\n help_text=_('Enter the name of the study program'),\n blank=False,\n )\n\n name = TranslatedField('name_en', 'name_sv')\n\n degree = models.CharField(\n max_length=20,\n choices=DEGREE_CHOICES,\n verbose_name=_('Degree type'),\n blank=True,\n )\n\n def __str__(self) -> str:\n if self.degree:\n return _('%(degree_type)s in %(study_program)s') % {\n 'degree_type': self.get_degree_display(),\n 'study_program': self.name,\n }\n else:\n return self.name.__str__()\n\n\nclass Section(models.Model):\n \"\"\"This class represent a study section\"\"\"\n\n class Meta:\n verbose_name = _('section')\n verbose_name_plural = _('sections')\n\n name_en = models.CharField(\n max_length=255,\n verbose_name=_('English section name'),\n help_text=_('Enter the name of the section'),\n blank=False,\n )\n\n name_sv = models.CharField(\n max_length=255,\n verbose_name=_('Swedish section name'),\n help_text=_('Enter the name of the section'),\n blank=False,\n )\n\n name = TranslatedField('name_en', 'name_sv')\n\n abbreviation = models.CharField(\n max_length=130,\n verbose_name=_('Section abbreviation'),\n help_text=_('Enter the abbreviation for the section'),\n blank=True,\n )\n\n studies = ManyToManyField(\n StudyProgram,\n related_name='sections',\n blank=True,\n )\n\n def __str__(self) -> str:\n if self.abbreviation:\n return '%s - %s' % (self.abbreviation, self.name)\n else:\n return self.name.__str__()\n\n\nclass Member(SimpleEmailConfirmationUserMixin, AbstractUser):\n \"\"\"This class describes a member\"\"\"\n\n # ---- Personal information ------\n\n birthday = models.DateField(\n verbose_name=_('Birthday'),\n null=True\n )\n\n person_number_ext = models.CharField(\n max_length=4,\n verbose_name=_('Person number extension'),\n help_text=_('Enter the last four digits of your Swedish person '\n 'number, given by the Swedish tax authority'),\n validators=[validators.RegexValidator(\n regex=r'^\\d{4}$',\n message=_('The person number extension consists of four numbers'),\n )],\n unique_for_date=\"birthday\",\n blank=True,\n )\n\n # ---- Membership information ------\n\n MEMBERSHIP_CHOICES = (\n ('unknown', _('Unknown')),\n ('nonmember', _('Nonmember')),\n ('member', _('Member')),\n ('alumnus', _('Alumnus')),\n )\n\n status = models.CharField(\n max_length=20,\n choices=MEMBERSHIP_CHOICES,\n verbose_name=_('Membership status'),\n blank=False,\n default='unknown'\n )\n status_changed = models.DateTimeField(\n default=timezone.now,\n null=False,\n )\n\n # ---- Contact information ------\n\n phone_number = models.CharField(\n max_length=20,\n verbose_name=_('Phone number'),\n help_text=_('Enter a phone number so UTN may reach you'),\n validators=[validators.RegexValidator(\n regex=r'^\\+?\\d+$',\n message=_('Please enter a valid phone number'),\n )],\n blank=True,\n )\n\n # ---- University information ------\n\n registration_year = models.CharField(\n max_length=4,\n verbose_name=_('Registration year'),\n help_text=_('Enter the year you started studying at the TakNat '\n 'faculty'),\n validators=[validators.RegexValidator(\n regex=r'^\\d{4}$',\n message=_('Please enter a valid year')\n )],\n blank=True,\n )\n\n study = models.ForeignKey(\n StudyProgram,\n verbose_name=_('Study program'),\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n\n section = models.ForeignKey(\n Section,\n verbose_name=_('Member of section'),\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n\n def __str__(self) -> str:\n if self.first_name and self.last_name:\n return '%s %s' % (self.first_name, self.last_name)\n else:\n return self.username\n\n def person_number(self) -> str:\n if self.birthday is None or self.person_number_ext is None:\n return ''\n else:\n return '%s-%s' % (self.birthday.strftime('%Y%m%d'),\n self.person_number_ext)\n\n def update_status(self, data=None):\n if data is None:\n if self.person_number() == '':\n return\n try:\n r = requests.get(\n 'https://register.utn.se/api.php',\n auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n settings.MEMBERSHIP_API_PASSWORD),\n params={\n 'action': 'check',\n 'person_number': self.person_number().replace('-', '')\n },\n )\n data = r.json().get('status')\n except requests.exceptions.ConnectionError:\n data = 'unknown'\n except ValueError:\n return\n\n if data == 'member':\n self.status = 'member'\n elif data == 'nonmember':\n if self.status in ['unknown', 'nonmember']:\n self.status = 'nonmember'\n else:\n self.status = 'alumnus'\n\n self.status_changed = timezone.now()\n\n def remove_old_email(self):\n for email in self.get_unconfirmed_emails() or []:\n self.remove_email(email)\n for email in self.get_confirmed_emails():\n if email != self.email:\n self.remove_email(email)\n", "path": "src/members/models.py"}, {"content": "import datetime\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.template import loader\nfrom django.utils import timezone\nfrom simple_email_confirmation import unconfirmed_email_created\n\nfrom members.models import Member\n\n\n@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\ndef send_confirmation_email(sender, email, user=None, **kwargs):\n user = user or sender\n if user is not None:\n context = {\n 'email': email,\n 'domain': settings.BASE_URL,\n 'site_name': settings.WAGTAIL_SITE_NAME,\n 'token': user.get_confirmation_key(email),\n 'new_user': user.get_confirmed_emails() == []\n }\n\n subject = loader.render_to_string(\n 'members/email_change_subject.txt', context)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n body = loader.render_to_string('members/email_change_email.html',\n context)\n\n email_message = EmailMultiAlternatives(subject, body, None, [email])\n email_message.send()\n\n\n@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\ndef check_membership(sender, instance, **kwargs):\n if timezone.now() - instance.status_changed > datetime.timedelta(1)\\\n or instance.status == 'unknown':\n instance.update_status()\n", "path": "src/members/signals.py"}], "after_files": [{"content": "from django.forms import CheckboxSelectMultiple\nfrom django.template import loader\nfrom django.utils.translation import ugettext_lazy as _\nfrom wagtail.contrib.modeladmin.options import ModelAdmin, ModelAdminGroup, \\\n modeladmin_register\nfrom wagtail.contrib.modeladmin.views import EditView, CreateView\nfrom wagtail.wagtailadmin.edit_handlers import TabbedInterface, ObjectList, \\\n FieldPanel\nfrom wagtail.wagtailcore import hooks\n\nfrom members.models import StudyProgram, Section, Member\n\n\nclass StudyProgramEditHandler:\n def get_edit_handler_class(self):\n edit_handler = TabbedInterface([\n ObjectList([\n FieldPanel('name_en'),\n FieldPanel('name_sv'),\n FieldPanel('degree'),\n ], heading=_('General'),\n ),\n # TODO: http://stackoverflow.com/questions/43188124/\n # ObjectList([\n # FieldPanel('sections', widget=CheckboxSelectMultiple),\n # ], heading=_('Sections'),\n # ),\n ])\n return edit_handler.bind_to_model(self.model)\n\n\nclass StudyProgramEditView(StudyProgramEditHandler, EditView):\n pass\n\n\nclass StudyProgramCreateView(StudyProgramEditHandler, CreateView):\n pass\n\n\nclass StudyProgramAdmin(ModelAdmin):\n model = StudyProgram\n menu_label = _('Study Program')\n menu_icon = 'fa-graduation-cap'\n menu_order = 510\n add_to_settings_menu = False\n create_view_class = StudyProgramCreateView\n edit_view_class = StudyProgramEditView\n list_display = ('degree', 'name_en', 'name_sv')\n search_fields = ('name_en', 'name_sv')\n\n\nclass SectionEditHandler:\n def get_edit_handler_class(self):\n edit_handler = TabbedInterface([\n ObjectList([\n FieldPanel('name_en'),\n FieldPanel('name_sv'),\n FieldPanel('abbreviation'),\n ], heading=_('General'), ),\n ObjectList([\n FieldPanel('studies', widget=CheckboxSelectMultiple),\n ], heading=_('Studies'), ),\n ])\n return edit_handler.bind_to_model(self.model)\n\n\nclass SectionEditView(SectionEditHandler, EditView):\n pass\n\n\nclass SectionCreateView(SectionEditHandler, CreateView):\n pass\n\n\nclass SectionAdmin(ModelAdmin):\n model = Section\n menu_label = _('Sections')\n menu_icon = 'fa-eye'\n menu_order = 520\n add_to_settings_menu = False\n create_view_class = SectionCreateView\n edit_view_class = SectionEditView\n list_display = ('abbreviation', 'name_en', 'name_sv')\n search_fields = ('name_en', 'name_sv', 'abbreviation')\n\n\nclass EducationAdminGroup(ModelAdminGroup):\n menu_label = _('Education')\n menu_icon = 'fa-university'\n menu_order = 450\n items = (StudyProgramAdmin, SectionAdmin)\n\n\nmodeladmin_register(EducationAdminGroup)\n\n\nclass SuperUserPanel(object):\n order = 1000\n\n def __init__(self, request):\n self.request = request\n\n def render(self):\n c = {\n 'supers': Member.objects.filter(is_superuser=True),\n 'user': self.request.user\n }\n return loader.get_template('members/admin_panel.html').render(c)\n\n\[email protected]('construct_homepage_panels')\ndef add_super_user_panel(request, panels):\n return panels.append(SuperUserPanel(request))\n", "path": "src/members/wagtail_hooks.py"}, {"content": "import requests\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core import validators\nfrom django.db import models\nfrom django.db.models import ManyToManyField\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom requests.auth import HTTPDigestAuth\nfrom simple_email_confirmation.models import SimpleEmailConfirmationUserMixin\n\nfrom utils.translation import TranslatedField\n\n\nclass StudyProgram(models.Model):\n \"\"\"This class describes a university study program\"\"\"\n\n class Meta:\n verbose_name = _('study program')\n verbose_name_plural = _('study programs')\n\n DEGREE_CHOICES = (\n ('bsc', _('Bachelor of Science')),\n ('msc', _('Master of Science')),\n ('be', _('Bachelor of Engineering')),\n ('msceng', _('Master of Science in Engineering')),\n )\n\n name_en = models.CharField(\n max_length=255,\n verbose_name=_('English program name'),\n help_text=_('Enter the name of the study program'),\n blank=False,\n )\n\n name_sv = models.CharField(\n max_length=255,\n verbose_name=_('Swedish program name'),\n help_text=_('Enter the name of the study program'),\n blank=False,\n )\n\n name = TranslatedField('name_en', 'name_sv')\n\n degree = models.CharField(\n max_length=20,\n choices=DEGREE_CHOICES,\n verbose_name=_('Degree type'),\n blank=True,\n )\n\n def __str__(self) -> str:\n if self.degree:\n return _('%(degree_type)s in %(study_program)s') % {\n 'degree_type': self.get_degree_display(),\n 'study_program': self.name,\n }\n else:\n return self.name.__str__()\n\n\nclass Section(models.Model):\n \"\"\"This class represent a study section\"\"\"\n\n class Meta:\n verbose_name = _('section')\n verbose_name_plural = _('sections')\n\n name_en = models.CharField(\n max_length=255,\n verbose_name=_('English section name'),\n help_text=_('Enter the name of the section'),\n blank=False,\n )\n\n name_sv = models.CharField(\n max_length=255,\n verbose_name=_('Swedish section name'),\n help_text=_('Enter the name of the section'),\n blank=False,\n )\n\n name = TranslatedField('name_en', 'name_sv')\n\n abbreviation = models.CharField(\n max_length=130,\n verbose_name=_('Section abbreviation'),\n help_text=_('Enter the abbreviation for the section'),\n blank=True,\n )\n\n studies = ManyToManyField(\n StudyProgram,\n related_name='sections',\n blank=True,\n )\n\n def __str__(self) -> str:\n if self.abbreviation:\n return '%s - %s' % (self.abbreviation, self.name)\n else:\n return self.name.__str__()\n\n\nclass Member(SimpleEmailConfirmationUserMixin, AbstractUser):\n \"\"\"This class describes a member\"\"\"\n\n # ---- Personal information ------\n\n birthday = models.DateField(\n verbose_name=_('Birthday'),\n null=True\n )\n\n person_number_ext = models.CharField(\n max_length=4,\n verbose_name=_('Person number extension'),\n help_text=_('Enter the last four digits of your Swedish person '\n 'number, given by the Swedish tax authority'),\n validators=[validators.RegexValidator(\n regex=r'^\\d{4}$',\n message=_('The person number extension consists of four numbers'),\n )],\n unique_for_date=\"birthday\",\n blank=True,\n )\n\n # ---- Membership information ------\n\n MEMBERSHIP_CHOICES = (\n ('unknown', _('Unknown')),\n ('nonmember', _('Nonmember')),\n ('member', _('Member')),\n ('alumnus', _('Alumnus')),\n )\n\n status = models.CharField(\n max_length=20,\n choices=MEMBERSHIP_CHOICES,\n verbose_name=_('Membership status'),\n blank=False,\n default='unknown'\n )\n status_changed = models.DateTimeField(\n default=timezone.now,\n null=False,\n )\n\n # ---- Contact information ------\n\n phone_number = models.CharField(\n max_length=20,\n verbose_name=_('Phone number'),\n help_text=_('Enter a phone number so UTN may reach you'),\n validators=[validators.RegexValidator(\n regex=r'^\\+?\\d+$',\n message=_('Please enter a valid phone number'),\n )],\n blank=True,\n )\n\n # ---- University information ------\n\n registration_year = models.CharField(\n max_length=4,\n verbose_name=_('Registration year'),\n help_text=_('Enter the year you started studying at the TakNat '\n 'faculty'),\n validators=[validators.RegexValidator(\n regex=r'^\\d{4}$',\n message=_('Please enter a valid year')\n )],\n blank=True,\n )\n\n study = models.ForeignKey(\n StudyProgram,\n verbose_name=_('Study program'),\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n\n section = models.ForeignKey(\n Section,\n verbose_name=_('Member of section'),\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n\n def __str__(self) -> str:\n if self.first_name and self.last_name:\n return self.get_full_name()\n else:\n return self.username\n\n def person_number(self) -> str:\n if self.birthday is None or self.person_number_ext is None:\n return ''\n else:\n return '%s-%s' % (self.birthday.strftime('%Y%m%d'),\n self.person_number_ext)\n\n def update_status(self, data=None):\n if data is None:\n if self.person_number() == '':\n return\n try:\n r = requests.get(\n 'https://register.utn.se/api.php',\n auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n settings.MEMBERSHIP_API_PASSWORD),\n params={\n 'action': 'check',\n 'person_number': self.person_number().replace('-', '')\n },\n )\n data = r.json().get('status')\n except requests.exceptions.ConnectionError:\n data = 'unknown'\n except ValueError:\n return\n\n if data == 'member':\n self.status = 'member'\n elif data == 'nonmember':\n if self.status in ['unknown', 'nonmember']:\n self.status = 'nonmember'\n else:\n self.status = 'alumnus'\n\n self.status_changed = timezone.now()\n\n def remove_old_email(self):\n for email in self.get_unconfirmed_emails() or []:\n self.remove_email(email)\n for email in self.get_confirmed_emails():\n if email != self.email:\n self.remove_email(email)\n", "path": "src/members/models.py"}, {"content": "import datetime\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.template import loader\nfrom django.utils import timezone\nfrom simple_email_confirmation import unconfirmed_email_created\n\nfrom members.models import Member\n\n\n@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\ndef send_confirmation_email(sender, email, user=None, **kwargs):\n user = user or sender # TODO: use user.send_email\n if user is not None:\n context = {\n 'email': email,\n 'domain': settings.BASE_URL,\n 'site_name': settings.WAGTAIL_SITE_NAME,\n 'token': user.get_confirmation_key(email),\n 'new_user': user.get_confirmed_emails() == []\n }\n\n subject = loader.render_to_string(\n 'members/email_change_subject.txt', context)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n body = loader.render_to_string('members/email_change_email.html',\n context)\n\n email_message = EmailMultiAlternatives(subject, body, None, [email])\n email_message.send()\n\n\n@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\ndef check_membership(sender, instance, **kwargs):\n if timezone.now() - instance.status_changed > datetime.timedelta(1)\\\n or instance.status == 'unknown':\n instance.update_status()\n", "path": "src/members/signals.py"}]} | 3,632 | 552 |
gh_patches_debug_8940 | rasdani/github-patches | git_diff | cowrie__cowrie-802 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
adding root:x:!password to userdb.txt doesn't exclude root/password as valid credentials
Fresh install.
I tried to exclude 'password' or 'abc123' from valid passwords for the user root
Now file looks like
```
root:x:!root
root:x:!123456
root:x:!password
root:x:*
```
Retarted cowrie, but no way to deny login with root/password credentials
Maybe, some sort of problem with new regexp checking?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cowrie/core/auth.py`
Content:
```
1 # Copyright (c) 2009-2014 Upi Tamminen <[email protected]>
2 # See the COPYRIGHT file for more information
3
4 """
5 This module contains ...
6 """
7
8 from __future__ import division, absolute_import
9
10 import re
11 import json
12 from os import path
13 from random import randint
14
15 from twisted.python import log
16
17 from cowrie.core.config import CONFIG
18
19 class UserDB(object):
20 """
21 By Walter de Jong <[email protected]>
22 """
23
24 def __init__(self):
25 self.userdb = {}
26 self.userdb_file = '{}/userdb.txt'.format(CONFIG.get('honeypot', 'data_path'))
27 self.load()
28
29
30 def load(self):
31 """
32 load the user db
33 """
34
35 with open(self.userdb_file, 'rb') as f:
36 while True:
37 rawline = f.readline()
38 if not rawline:
39 break
40
41 line = rawline.strip()
42 if not line:
43 continue
44
45 if line.startswith(b'#'):
46 continue
47
48 login, passwd = re.split(br':\w+:', line, 1)
49 self.adduser(login, passwd)
50
51
52 def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'):
53 for credentials, policy in self.userdb.items():
54 login, passwd = credentials
55
56 if self.match_rule(login, thelogin):
57 if self.match_rule(passwd, thepasswd):
58 return policy
59
60 return False
61
62
63 def match_rule(self, rule, input):
64 if type(rule) is bytes:
65 return rule in [b'*', input]
66 else:
67 return bool(rule.search(input))
68
69
70 def re_or_str(self, rule):
71 """
72 Convert a /.../ type rule to a regex, otherwise return the string as-is
73 """
74 res = re.match(br'/(.+)/(i)?$', rule)
75 if res:
76 return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0)
77
78 return rule
79
80
81 def adduser(self, login, passwd):
82 login = self.re_or_str(login)
83
84 if passwd.startswith(b'!'):
85 policy = False
86 passwd = passwd[1:]
87 else:
88 policy = True
89
90 passwd = self.re_or_str(passwd)
91 self.userdb[(login, passwd)] = policy
92
93
94 class AuthRandom(object):
95 """
96 Alternative class that defines the checklogin() method.
97 Users will be authenticated after a random number of attempts.
98 """
99
100 def __init__(self):
101 # Default values
102 self.mintry, self.maxtry, self.maxcache = 2, 5, 10
103
104 # Are there auth_class parameters?
105 if CONFIG.has_option('honeypot', 'auth_class_parameters'):
106 parameters = CONFIG.get('honeypot', 'auth_class_parameters')
107 parlist = parameters.split(',')
108 if len(parlist) == 3:
109 self.mintry = int(parlist[0])
110 self.maxtry = int(parlist[1])
111 self.maxcache = int(parlist[2])
112
113 if self.maxtry < self.mintry:
114 self.maxtry = self.mintry + 1
115 log.msg("maxtry < mintry, adjusting maxtry to: {}".format(self.maxtry))
116 self.uservar = {}
117 self.uservar_file = '{}/auth_random.json'.format(CONFIG.get('honeypot', 'state_path'))
118 self.loadvars()
119
120
121 def loadvars(self):
122 """
123 Load user vars from json file
124 """
125 if path.isfile(self.uservar_file):
126 with open(self.uservar_file, 'rb') as fp:
127 try:
128 self.uservar = json.load(fp)
129 except:
130 self.uservar = {}
131
132
133 def savevars(self):
134 """
135 Save the user vars to json file
136 """
137 data = self.uservar
138 # Note: this is subject to races between cowrie logins
139 with open(self.uservar_file, 'wb') as fp:
140 json.dump(data, fp)
141
142
143 def checklogin(self, thelogin, thepasswd, src_ip):
144 """
145 Every new source IP will have to try a random number of times between
146 'mintry' and 'maxtry' before succeeding to login.
147 All username/password combinations must be different.
148 The successful login combination is stored with the IP address.
149 Successful username/passwords pairs are also cached for 'maxcache' times.
150 This is to allow access for returns from different IP addresses.
151 Variables are saved in 'uservar.json' in the data directory.
152 """
153
154 auth = False
155 userpass = thelogin + ':' + thepasswd
156
157 if not 'cache' in self.uservar:
158 self.uservar['cache'] = []
159 cache = self.uservar['cache']
160
161 # Check if it is the first visit from src_ip
162 if src_ip not in self.uservar:
163 self.uservar[src_ip] = {}
164 ipinfo = self.uservar[src_ip]
165 ipinfo['try'] = 0
166 if userpass in cache:
167 log.msg("first time for {}, found cached: {}".format(src_ip, userpass))
168 ipinfo['max'] = 1
169 ipinfo['user'] = thelogin
170 ipinfo['pw'] = thepasswd
171 auth = True
172 self.savevars()
173 return auth
174 else:
175 ipinfo['max'] = randint(self.mintry, self.maxtry)
176 log.msg("first time for {}, need: {}".format(src_ip, ipinfo['max']))
177
178 ipinfo = self.uservar[src_ip]
179
180 # Fill in missing variables
181 if not 'max' in ipinfo:
182 ipinfo['max'] = randint(self.mintry, self.maxtry)
183 if not 'try' in ipinfo:
184 ipinfo['try'] = 0
185 if not 'tried' in ipinfo:
186 ipinfo['tried'] = []
187
188 # Don't count repeated username/password combinations
189 if userpass in ipinfo['tried']:
190 log.msg('already tried this combination')
191 self.savevars()
192 return auth
193
194 ipinfo['try'] += 1
195 attempts = ipinfo['try']
196 need = ipinfo['max']
197 log.msg("login attempt: {}".format(attempts))
198
199 # Check if enough login attempts are tried
200 if attempts < need:
201 self.uservar[src_ip]['tried'].append(userpass)
202 elif attempts == need:
203 ipinfo['user'] = thelogin
204 ipinfo['pw'] = thepasswd
205 cache.append(userpass)
206 if len(cache) > self.maxcache:
207 cache.pop(0)
208 auth = True
209 # Returning after successful login
210 elif attempts > need:
211 if not 'user' in ipinfo or not 'pw' in ipinfo:
212 log.msg('return, but username or password not set!!!')
213 ipinfo['tried'].append(userpass)
214 ipinfo['try'] = 1
215 else:
216 log.msg("login return, expect: [{}/{}]".format(ipinfo['user'], ipinfo['pw']))
217 if thelogin == ipinfo['user'] and thepasswd == ipinfo['pw']:
218 auth = True
219 self.savevars()
220 return auth
221
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cowrie/core/auth.py b/cowrie/core/auth.py
--- a/cowrie/core/auth.py
+++ b/cowrie/core/auth.py
@@ -11,6 +11,7 @@
import json
from os import path
from random import randint
+from collections import OrderedDict
from twisted.python import log
@@ -22,7 +23,7 @@
"""
def __init__(self):
- self.userdb = {}
+ self.userdb = OrderedDict()
self.userdb_file = '{}/userdb.txt'.format(CONFIG.get('honeypot', 'data_path'))
self.load()
| {"golden_diff": "diff --git a/cowrie/core/auth.py b/cowrie/core/auth.py\n--- a/cowrie/core/auth.py\n+++ b/cowrie/core/auth.py\n@@ -11,6 +11,7 @@\n import json\n from os import path\n from random import randint\n+from collections import OrderedDict\n \n from twisted.python import log\n \n@@ -22,7 +23,7 @@\n \"\"\"\n \n def __init__(self):\n- self.userdb = {}\n+ self.userdb = OrderedDict()\n self.userdb_file = '{}/userdb.txt'.format(CONFIG.get('honeypot', 'data_path'))\n self.load()\n", "issue": "adding root:x:!password to userdb.txt doesn't exclude root/password as valid credentials\nFresh install.\r\n\r\nI tried to exclude 'password' or 'abc123' from valid passwords for the user root\r\n\r\nNow file looks like \r\n```\r\nroot:x:!root\r\nroot:x:!123456\r\nroot:x:!password\r\nroot:x:*\r\n```\r\n\r\nRetarted cowrie, but no way to deny login with root/password credentials\r\n\r\nMaybe, some sort of problem with new regexp checking?\n", "before_files": [{"content": "# Copyright (c) 2009-2014 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\n\"\"\"\nThis module contains ...\n\"\"\"\n\nfrom __future__ import division, absolute_import\n\nimport re\nimport json\nfrom os import path\nfrom random import randint\n\nfrom twisted.python import log\n\nfrom cowrie.core.config import CONFIG\n\nclass UserDB(object):\n \"\"\"\n By Walter de Jong <[email protected]>\n \"\"\"\n\n def __init__(self):\n self.userdb = {}\n self.userdb_file = '{}/userdb.txt'.format(CONFIG.get('honeypot', 'data_path'))\n self.load()\n\n\n def load(self):\n \"\"\"\n load the user db\n \"\"\"\n\n with open(self.userdb_file, 'rb') as f:\n while True:\n rawline = f.readline()\n if not rawline:\n break\n\n line = rawline.strip()\n if not line:\n continue\n\n if line.startswith(b'#'):\n continue\n\n login, passwd = re.split(br':\\w+:', line, 1)\n self.adduser(login, passwd)\n\n\n def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'):\n for credentials, policy in self.userdb.items():\n login, passwd = credentials\n\n if self.match_rule(login, thelogin):\n if self.match_rule(passwd, thepasswd):\n return policy\n\n return False\n\n\n def match_rule(self, rule, input):\n if type(rule) is bytes:\n return rule in [b'*', input]\n else:\n return bool(rule.search(input))\n\n\n def re_or_str(self, rule):\n \"\"\"\n Convert a /.../ type rule to a regex, otherwise return the string as-is\n \"\"\"\n res = re.match(br'/(.+)/(i)?$', rule)\n if res:\n return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0)\n\n return rule\n\n\n def adduser(self, login, passwd):\n login = self.re_or_str(login)\n\n if passwd.startswith(b'!'):\n policy = False\n passwd = passwd[1:]\n else:\n policy = True\n\n passwd = self.re_or_str(passwd)\n self.userdb[(login, passwd)] = policy\n\n\nclass AuthRandom(object):\n \"\"\"\n Alternative class that defines the checklogin() method.\n Users will be authenticated after a random number of attempts.\n \"\"\"\n\n def __init__(self):\n # Default values\n self.mintry, self.maxtry, self.maxcache = 2, 5, 10\n\n # Are there auth_class parameters?\n if CONFIG.has_option('honeypot', 'auth_class_parameters'):\n parameters = CONFIG.get('honeypot', 'auth_class_parameters')\n parlist = parameters.split(',')\n if len(parlist) == 3:\n self.mintry = int(parlist[0])\n self.maxtry = int(parlist[1])\n self.maxcache = int(parlist[2])\n\n if self.maxtry < self.mintry:\n self.maxtry = self.mintry + 1\n log.msg(\"maxtry < mintry, adjusting maxtry to: {}\".format(self.maxtry))\n self.uservar = {}\n self.uservar_file = '{}/auth_random.json'.format(CONFIG.get('honeypot', 'state_path'))\n self.loadvars()\n\n\n def loadvars(self):\n \"\"\"\n Load user vars from json file\n \"\"\"\n if path.isfile(self.uservar_file):\n with open(self.uservar_file, 'rb') as fp:\n try:\n self.uservar = json.load(fp)\n except:\n self.uservar = {}\n\n\n def savevars(self):\n \"\"\"\n Save the user vars to json file\n \"\"\"\n data = self.uservar\n # Note: this is subject to races between cowrie logins\n with open(self.uservar_file, 'wb') as fp:\n json.dump(data, fp)\n\n\n def checklogin(self, thelogin, thepasswd, src_ip):\n \"\"\"\n Every new source IP will have to try a random number of times between\n 'mintry' and 'maxtry' before succeeding to login.\n All username/password combinations must be different.\n The successful login combination is stored with the IP address.\n Successful username/passwords pairs are also cached for 'maxcache' times.\n This is to allow access for returns from different IP addresses.\n Variables are saved in 'uservar.json' in the data directory.\n \"\"\"\n\n auth = False\n userpass = thelogin + ':' + thepasswd\n\n if not 'cache' in self.uservar:\n self.uservar['cache'] = []\n cache = self.uservar['cache']\n\n # Check if it is the first visit from src_ip\n if src_ip not in self.uservar:\n self.uservar[src_ip] = {}\n ipinfo = self.uservar[src_ip]\n ipinfo['try'] = 0\n if userpass in cache:\n log.msg(\"first time for {}, found cached: {}\".format(src_ip, userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = thelogin\n ipinfo['pw'] = thepasswd\n auth = True\n self.savevars()\n return auth\n else:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n log.msg(\"first time for {}, need: {}\".format(src_ip, ipinfo['max']))\n\n ipinfo = self.uservar[src_ip]\n\n # Fill in missing variables\n if not 'max' in ipinfo:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n if not 'try' in ipinfo:\n ipinfo['try'] = 0\n if not 'tried' in ipinfo:\n ipinfo['tried'] = []\n\n # Don't count repeated username/password combinations\n if userpass in ipinfo['tried']:\n log.msg('already tried this combination')\n self.savevars()\n return auth\n\n ipinfo['try'] += 1\n attempts = ipinfo['try']\n need = ipinfo['max']\n log.msg(\"login attempt: {}\".format(attempts))\n\n # Check if enough login attempts are tried\n if attempts < need:\n self.uservar[src_ip]['tried'].append(userpass)\n elif attempts == need:\n ipinfo['user'] = thelogin\n ipinfo['pw'] = thepasswd\n cache.append(userpass)\n if len(cache) > self.maxcache:\n cache.pop(0)\n auth = True\n # Returning after successful login\n elif attempts > need:\n if not 'user' in ipinfo or not 'pw' in ipinfo:\n log.msg('return, but username or password not set!!!')\n ipinfo['tried'].append(userpass)\n ipinfo['try'] = 1\n else:\n log.msg(\"login return, expect: [{}/{}]\".format(ipinfo['user'], ipinfo['pw']))\n if thelogin == ipinfo['user'] and thepasswd == ipinfo['pw']:\n auth = True\n self.savevars()\n return auth\n\n", "path": "cowrie/core/auth.py"}], "after_files": [{"content": "# Copyright (c) 2009-2014 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\n\"\"\"\nThis module contains ...\n\"\"\"\n\nfrom __future__ import division, absolute_import\n\nimport re\nimport json\nfrom os import path\nfrom random import randint\nfrom collections import OrderedDict\n\nfrom twisted.python import log\n\nfrom cowrie.core.config import CONFIG\n\nclass UserDB(object):\n \"\"\"\n By Walter de Jong <[email protected]>\n \"\"\"\n\n def __init__(self):\n self.userdb = OrderedDict()\n self.userdb_file = '{}/userdb.txt'.format(CONFIG.get('honeypot', 'data_path'))\n self.load()\n\n\n def load(self):\n \"\"\"\n load the user db\n \"\"\"\n\n with open(self.userdb_file, 'rb') as f:\n while True:\n rawline = f.readline()\n if not rawline:\n break\n\n line = rawline.strip()\n if not line:\n continue\n\n if line.startswith(b'#'):\n continue\n\n login, passwd = re.split(br':\\w+:', line, 1)\n self.adduser(login, passwd)\n\n\n def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'):\n for credentials, policy in self.userdb.items():\n login, passwd = credentials\n\n if self.match_rule(login, thelogin):\n if self.match_rule(passwd, thepasswd):\n return policy\n\n return False\n\n\n def match_rule(self, rule, input):\n if type(rule) is bytes:\n return rule in [b'*', input]\n else:\n return bool(rule.search(input))\n\n\n def re_or_str(self, rule):\n \"\"\"\n Convert a /.../ type rule to a regex, otherwise return the string as-is\n \"\"\"\n res = re.match(br'/(.+)/(i)?$', rule)\n if res:\n return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0)\n\n return rule\n\n\n def adduser(self, login, passwd):\n login = self.re_or_str(login)\n\n if passwd.startswith(b'!'):\n policy = False\n passwd = passwd[1:]\n else:\n policy = True\n\n passwd = self.re_or_str(passwd)\n self.userdb[(login, passwd)] = policy\n\n\nclass AuthRandom(object):\n \"\"\"\n Alternative class that defines the checklogin() method.\n Users will be authenticated after a random number of attempts.\n \"\"\"\n\n def __init__(self):\n # Default values\n self.mintry, self.maxtry, self.maxcache = 2, 5, 10\n\n # Are there auth_class parameters?\n if CONFIG.has_option('honeypot', 'auth_class_parameters'):\n parameters = CONFIG.get('honeypot', 'auth_class_parameters')\n parlist = parameters.split(',')\n if len(parlist) == 3:\n self.mintry = int(parlist[0])\n self.maxtry = int(parlist[1])\n self.maxcache = int(parlist[2])\n\n if self.maxtry < self.mintry:\n self.maxtry = self.mintry + 1\n log.msg(\"maxtry < mintry, adjusting maxtry to: {}\".format(self.maxtry))\n self.uservar = {}\n self.uservar_file = '{}/auth_random.json'.format(CONFIG.get('honeypot', 'state_path'))\n self.loadvars()\n\n\n def loadvars(self):\n \"\"\"\n Load user vars from json file\n \"\"\"\n if path.isfile(self.uservar_file):\n with open(self.uservar_file, 'rb') as fp:\n try:\n self.uservar = json.load(fp)\n except:\n self.uservar = {}\n\n\n def savevars(self):\n \"\"\"\n Save the user vars to json file\n \"\"\"\n data = self.uservar\n # Note: this is subject to races between cowrie logins\n with open(self.uservar_file, 'wb') as fp:\n json.dump(data, fp)\n\n\n def checklogin(self, thelogin, thepasswd, src_ip):\n \"\"\"\n Every new source IP will have to try a random number of times between\n 'mintry' and 'maxtry' before succeeding to login.\n All username/password combinations must be different.\n The successful login combination is stored with the IP address.\n Successful username/passwords pairs are also cached for 'maxcache' times.\n This is to allow access for returns from different IP addresses.\n Variables are saved in 'uservar.json' in the data directory.\n \"\"\"\n\n auth = False\n userpass = thelogin + ':' + thepasswd\n\n if not 'cache' in self.uservar:\n self.uservar['cache'] = []\n cache = self.uservar['cache']\n\n # Check if it is the first visit from src_ip\n if src_ip not in self.uservar:\n self.uservar[src_ip] = {}\n ipinfo = self.uservar[src_ip]\n ipinfo['try'] = 0\n if userpass in cache:\n log.msg(\"first time for {}, found cached: {}\".format(src_ip, userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = thelogin\n ipinfo['pw'] = thepasswd\n auth = True\n self.savevars()\n return auth\n else:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n log.msg(\"first time for {}, need: {}\".format(src_ip, ipinfo['max']))\n\n ipinfo = self.uservar[src_ip]\n\n # Fill in missing variables\n if not 'max' in ipinfo:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n if not 'try' in ipinfo:\n ipinfo['try'] = 0\n if not 'tried' in ipinfo:\n ipinfo['tried'] = []\n\n # Don't count repeated username/password combinations\n if userpass in ipinfo['tried']:\n log.msg('already tried this combination')\n self.savevars()\n return auth\n\n ipinfo['try'] += 1\n attempts = ipinfo['try']\n need = ipinfo['max']\n log.msg(\"login attempt: {}\".format(attempts))\n\n # Check if enough login attempts are tried\n if attempts < need:\n self.uservar[src_ip]['tried'].append(userpass)\n elif attempts == need:\n ipinfo['user'] = thelogin\n ipinfo['pw'] = thepasswd\n cache.append(userpass)\n if len(cache) > self.maxcache:\n cache.pop(0)\n auth = True\n # Returning after successful login\n elif attempts > need:\n if not 'user' in ipinfo or not 'pw' in ipinfo:\n log.msg('return, but username or password not set!!!')\n ipinfo['tried'].append(userpass)\n ipinfo['try'] = 1\n else:\n log.msg(\"login return, expect: [{}/{}]\".format(ipinfo['user'], ipinfo['pw']))\n if thelogin == ipinfo['user'] and thepasswd == ipinfo['pw']:\n auth = True\n self.savevars()\n return auth\n\n", "path": "cowrie/core/auth.py"}]} | 2,530 | 138 |
gh_patches_debug_1572 | rasdani/github-patches | git_diff | hylang__hy-2070 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The manual is missing module names
It looks like the big doc reorganization ended up omitting the actual module names of the various contrib and extra modules; for example, the section named "Walk" documents the functions and macros in `hy.contrib.walk`, but doesn't mention the name `hy.contrib.walk` or otherwise indicate how to bring the enumerated names into scope.
Pointed out in https://github.com/hylang/hy/issues/2065#issuecomment-842377526.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # This file is execfile()d with the current directory set to its containing dir.
4
5 import re, os, sys, time, html
6
7 sys.path.insert(0, os.path.abspath('..'))
8
9 extensions = [
10 'sphinx.ext.napoleon',
11 'sphinx.ext.intersphinx',
12 'sphinx.ext.autodoc',
13 'sphinxcontrib.hydomain',
14 ]
15
16 from get_version import __version__ as hy_version
17
18 # Read the Docs might dirty its checkout, so strip the dirty flag.
19 hy_version = re.sub(r'[+.]dirty\Z', '', hy_version)
20
21 templates_path = ['_templates']
22 source_suffix = '.rst'
23
24 master_doc = 'index'
25
26 # General information about the project.
27 project = 'hy'
28 copyright = '%s the authors' % time.strftime('%Y')
29
30 # The version info for the project you're documenting, acts as replacement for
31 # |version| and |release|, also used in various other places throughout the
32 # built documents.
33 #
34 # The short X.Y version.
35 version = ".".join(hy_version.split(".")[:-1])
36 # The full version, including alpha/beta/rc tags.
37 release = hy_version
38 hy_descriptive_version = html.escape(hy_version)
39 if "+" in hy_version:
40 hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>"
41
42 exclude_patterns = ['_build', 'coreteam.rst']
43 add_module_names = False
44
45 pygments_style = 'sphinx'
46
47 import sphinx_rtd_theme
48 html_theme = 'sphinx_rtd_theme'
49 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
50
51 # Add any paths that contain custom static files (such as style sheets) here,
52 # relative to this directory. They are copied after the builtin static files,
53 # so a file named "default.css" will overwrite the builtin "default.css".
54 html_static_path = ['_static']
55
56 html_use_smartypants = False
57 html_show_sphinx = False
58
59 html_context = dict(
60 hy_descriptive_version = hy_descriptive_version)
61
62 highlight_language = 'clojure'
63
64 intersphinx_mapping = dict(
65 py = ('https://docs.python.org/3/', None))
66 # ** Generate Cheatsheet
67 import json
68 from pathlib import Path
69 from itertools import zip_longest
70
71 def refize(spec):
72 role = ':hy:func:'
73 if isinstance(spec, dict):
74 _name = spec['name']
75 uri = spec['uri']
76 if spec.get('internal'):
77 role = ':ref:'
78 else:
79 uri = spec
80 _name = str.split(uri, '.')[-1]
81 return '{}`{} <{}>`'.format(role, _name, uri)
82
83
84 def format_refs(refs, indent):
85 args = [iter(map(refize, refs))]
86 ref_groups = zip_longest(*args, fillvalue="")
87 return str.join(
88 ' \\\n' + ' ' * (indent + 3),
89 [str.join(' ', ref_group) for ref_group in ref_groups],
90 )
91
92
93 def format_row(category, divider_loc):
94 return '{title: <{width}} | {methods}'.format(
95 width=divider_loc,
96 title=category['name'],
97 methods=format_refs(category['methods'], divider_loc)
98 )
99
100
101 def format_table(table_spec):
102 table_name = table_spec['name']
103 categories = table_spec['categories']
104 longest_cat_name = max([len(category['name']) for category in categories])
105 table = [
106 table_name,
107 '-' * len(table_name),
108 '',
109 '=' * longest_cat_name + ' ' + '=' * 25,
110 *(format_row(category, longest_cat_name) for category in categories),
111 '=' * longest_cat_name + ' ' + '=' * 25,
112 ''
113 ]
114 return '\n'.join(table)
115
116
117 # Modifications to the cheatsheet should be added in `cheatsheet.json`
118 cheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text())
119 cheatsheet = [
120 '..',
121 ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``',
122 ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``',
123 '',
124 '.. _cheatsheet:',
125 '',
126 'Cheatsheet',
127 '==========',
128 '',
129 *map(format_table, cheatsheet_spec),
130 ]
131 Path('./docs/cheatsheet.rst').write_text('\n'.join(cheatsheet))
132
133
134 # ** Sphinx App Setup
135
136
137 def setup(app):
138 app.add_css_file('overrides.css')
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -40,7 +40,7 @@
hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>"
exclude_patterns = ['_build', 'coreteam.rst']
-add_module_names = False
+add_module_names = True
pygments_style = 'sphinx'
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -40,7 +40,7 @@\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n \n exclude_patterns = ['_build', 'coreteam.rst']\n-add_module_names = False\n+add_module_names = True\n \n pygments_style = 'sphinx'\n", "issue": "The manual is missing module names\nIt looks like the big doc reorganization ended up omitting the actual module names of the various contrib and extra modules; for example, the section named \"Walk\" documents the functions and macros in `hy.contrib.walk`, but doesn't mention the name `hy.contrib.walk` or otherwise indicate how to bring the enumerated names into scope.\r\n\r\nPointed out in https://github.com/hylang/hy/issues/2065#issuecomment-842377526.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# This file is execfile()d with the current directory set to its containing dir.\n\nimport re, os, sys, time, html\n\nsys.path.insert(0, os.path.abspath('..'))\n\nextensions = [\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.autodoc',\n 'sphinxcontrib.hydomain',\n]\n\nfrom get_version import __version__ as hy_version\n\n# Read the Docs might dirty its checkout, so strip the dirty flag.\nhy_version = re.sub(r'[+.]dirty\\Z', '', hy_version)\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'hy'\ncopyright = '%s the authors' % time.strftime('%Y')\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \".\".join(hy_version.split(\".\")[:-1])\n# The full version, including alpha/beta/rc tags.\nrelease = hy_version\nhy_descriptive_version = html.escape(hy_version)\nif \"+\" in hy_version:\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n\nexclude_patterns = ['_build', 'coreteam.rst']\nadd_module_names = False\n\npygments_style = 'sphinx'\n\nimport sphinx_rtd_theme\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_use_smartypants = False\nhtml_show_sphinx = False\n\nhtml_context = dict(\n hy_descriptive_version = hy_descriptive_version)\n\nhighlight_language = 'clojure'\n\nintersphinx_mapping = dict(\n py = ('https://docs.python.org/3/', None))\n# ** Generate Cheatsheet\nimport json\nfrom pathlib import Path\nfrom itertools import zip_longest\n\ndef refize(spec):\n role = ':hy:func:'\n if isinstance(spec, dict):\n _name = spec['name']\n uri = spec['uri']\n if spec.get('internal'):\n role = ':ref:'\n else:\n uri = spec\n _name = str.split(uri, '.')[-1]\n return '{}`{} <{}>`'.format(role, _name, uri)\n\n\ndef format_refs(refs, indent):\n args = [iter(map(refize, refs))]\n ref_groups = zip_longest(*args, fillvalue=\"\")\n return str.join(\n ' \\\\\\n' + ' ' * (indent + 3),\n [str.join(' ', ref_group) for ref_group in ref_groups],\n )\n\n\ndef format_row(category, divider_loc):\n return '{title: <{width}} | {methods}'.format(\n width=divider_loc,\n title=category['name'],\n methods=format_refs(category['methods'], divider_loc)\n )\n\n\ndef format_table(table_spec):\n table_name = table_spec['name']\n categories = table_spec['categories']\n longest_cat_name = max([len(category['name']) for category in categories])\n table = [\n table_name,\n '-' * len(table_name),\n '',\n '=' * longest_cat_name + ' ' + '=' * 25,\n *(format_row(category, longest_cat_name) for category in categories),\n '=' * longest_cat_name + ' ' + '=' * 25,\n ''\n ]\n return '\\n'.join(table)\n\n\n# Modifications to the cheatsheet should be added in `cheatsheet.json`\ncheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text())\ncheatsheet = [\n '..',\n ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``',\n ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``',\n '',\n '.. _cheatsheet:',\n '',\n 'Cheatsheet',\n '==========',\n '',\n *map(format_table, cheatsheet_spec),\n]\nPath('./docs/cheatsheet.rst').write_text('\\n'.join(cheatsheet))\n\n\n# ** Sphinx App Setup\n\n\ndef setup(app):\n app.add_css_file('overrides.css')\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# This file is execfile()d with the current directory set to its containing dir.\n\nimport re, os, sys, time, html\n\nsys.path.insert(0, os.path.abspath('..'))\n\nextensions = [\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.autodoc',\n 'sphinxcontrib.hydomain',\n]\n\nfrom get_version import __version__ as hy_version\n\n# Read the Docs might dirty its checkout, so strip the dirty flag.\nhy_version = re.sub(r'[+.]dirty\\Z', '', hy_version)\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'hy'\ncopyright = '%s the authors' % time.strftime('%Y')\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \".\".join(hy_version.split(\".\")[:-1])\n# The full version, including alpha/beta/rc tags.\nrelease = hy_version\nhy_descriptive_version = html.escape(hy_version)\nif \"+\" in hy_version:\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n\nexclude_patterns = ['_build', 'coreteam.rst']\nadd_module_names = True\n\npygments_style = 'sphinx'\n\nimport sphinx_rtd_theme\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_use_smartypants = False\nhtml_show_sphinx = False\n\nhtml_context = dict(\n hy_descriptive_version = hy_descriptive_version)\n\nhighlight_language = 'clojure'\n\nintersphinx_mapping = dict(\n py = ('https://docs.python.org/3/', None))\n# ** Generate Cheatsheet\nimport json\nfrom pathlib import Path\nfrom itertools import zip_longest\n\ndef refize(spec):\n role = ':hy:func:'\n if isinstance(spec, dict):\n _name = spec['name']\n uri = spec['uri']\n if spec.get('internal'):\n role = ':ref:'\n else:\n uri = spec\n _name = str.split(uri, '.')[-1]\n return '{}`{} <{}>`'.format(role, _name, uri)\n\n\ndef format_refs(refs, indent):\n args = [iter(map(refize, refs))]\n ref_groups = zip_longest(*args, fillvalue=\"\")\n return str.join(\n ' \\\\\\n' + ' ' * (indent + 3),\n [str.join(' ', ref_group) for ref_group in ref_groups],\n )\n\n\ndef format_row(category, divider_loc):\n return '{title: <{width}} | {methods}'.format(\n width=divider_loc,\n title=category['name'],\n methods=format_refs(category['methods'], divider_loc)\n )\n\n\ndef format_table(table_spec):\n table_name = table_spec['name']\n categories = table_spec['categories']\n longest_cat_name = max([len(category['name']) for category in categories])\n table = [\n table_name,\n '-' * len(table_name),\n '',\n '=' * longest_cat_name + ' ' + '=' * 25,\n *(format_row(category, longest_cat_name) for category in categories),\n '=' * longest_cat_name + ' ' + '=' * 25,\n ''\n ]\n return '\\n'.join(table)\n\n\n# Modifications to the cheatsheet should be added in `cheatsheet.json`\ncheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text())\ncheatsheet = [\n '..',\n ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``',\n ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``',\n '',\n '.. _cheatsheet:',\n '',\n 'Cheatsheet',\n '==========',\n '',\n *map(format_table, cheatsheet_spec),\n]\nPath('./docs/cheatsheet.rst').write_text('\\n'.join(cheatsheet))\n\n\n# ** Sphinx App Setup\n\n\ndef setup(app):\n app.add_css_file('overrides.css')\n", "path": "docs/conf.py"}]} | 1,669 | 92 |
gh_patches_debug_23171 | rasdani/github-patches | git_diff | vas3k__vas3k.club-1117 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Убрать атрибуты Autoplay и Loop для видео, заливаемых напрямую в клуб
### Discussed in https://github.com/vas3k/vas3k.club/discussions/1113
<div type='discussions-op-text'>
<sup>Originally posted by **resaixe** November 4, 2020</sup>
Сейчас видео, которые заливаются напрямую в клуб, имеют атрибуты `autoplay="autoplay" loop="loop"`.
Например, в [этом комментарии](https://vas3k.club/post/5942/#comment-6e78d5e4-7e7d-496e-a75a-e6516f0255f8) сейчас так:
```html
<video src="https://i.vas3k.club/d6600209ae05582854a384bb1ce5cfe23c64697b8f111b21ceabdfd72fce72bc.mp4" controls="controls" autoplay="autoplay" loop="loop" muted="muted" playsinline=""></video>
```
Предлагаю убрать эти атрибуты, чтобы пользователи сами решали, когда и сколько раз они хотят посмотреть видео. Чтобы стало так:
```html
<video src="https://i.vas3k.club/d6600209ae05582854a384bb1ce5cfe23c64697b8f111b21ceabdfd72fce72bc.mp4" controls="controls" muted="muted" playsinline=""></video>
```</div>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `common/markdown/email_renderer.py`
Content:
```
1 from mistune import escape_html
2
3 from common.markdown.club_renderer import ClubRenderer
4 from common.regexp import YOUTUBE_RE
5
6
7 class EmailRenderer(ClubRenderer):
8 def simple_image(self, src, alt="", title=None):
9 return f"""<img src="{src}" alt="{alt}" width="600" border="0"><br>{title or ""}"""
10
11 def youtube(self, src, alt="", title=None):
12 youtube_match = YOUTUBE_RE.match(src)
13 youtube_id = escape_html(youtube_match.group(1) or "")
14 return f'<a href="{escape_html(src)}"><span class="ratio-16-9 video-preview" ' \
15 f'style="background-image: url(\'https://img.youtube.com/vi/{escape_html(youtube_id)}/0.jpg\');">' \
16 f'</span></a><br>{escape_html(title or "")}'
17
18 def video(self, src, alt="", title=None):
19 return f'<video src="{escape_html(src)}" controls autoplay loop muted playsinline>{alt}</video><br>{title or ""}'
20
21 def tweet(self, src, alt="", title=None):
22 return f'<a href="{escape_html(src)}">{escape_html(src)}</a><br>{escape_html(title or "")}'
23
24 def heading(self, text, level):
25 tag = f"h{level}"
26 return f"<{tag}>{text}</{tag}>\n"
27
```
Path: `common/markdown/club_renderer.py`
Content:
```
1 import html
2 import mistune
3 from urllib.parse import unquote
4 from mistune import escape_html
5 from slugify import slugify
6
7 from common.regexp import IMAGE_RE, VIDEO_RE, YOUTUBE_RE, TWITTER_RE, USERNAME_RE
8
9 IMAGE_CSS_CLASSES = {
10 "-": "text-body-image-full"
11 }
12
13
14 class ClubRenderer(mistune.HTMLRenderer):
15 def text(self, text):
16 text = escape_html(text)
17 text = USERNAME_RE.sub(r' <a href="/user/\1/">@\1</a>', text)
18 return text
19
20 def paragraph(self, text):
21 text = text.replace("\n", "<br>\n") # Mistune 2.0 broke newlines, let's hack it =/
22 return f"<p>{text}</p>\n"
23
24 def heading(self, text, level):
25 tag = f"h{level}"
26 anchor = slugify(text[:24])
27 return f"<{tag} id=\"{anchor}\"><a href=\"#{anchor}\">{text}</a></{tag}>\n"
28
29 def link(self, link, text=None, title=None):
30 if not text and not title:
31 # it's a pure link (without link tag) and we can try to parse it
32 embed = self.embed(link, text or "", title or "")
33 if embed:
34 return embed
35
36 if text is None:
37 text = link
38
39 # here's some magic of unescape->unquote->escape
40 # to fix cyrillic (and other non-latin) wikipedia URLs
41 return f'<a href="{self._safe_url(link)}">{html.escape(unquote(html.unescape(text or link)))}</a>'
42
43 def image(self, src, alt="", title=None):
44 embed = self.embed(src, alt, title)
45 if embed:
46 return embed
47
48 # users can try to "hack" our parser by using non-image urls
49 # so, if its not an image or video, display it as a link to avoid auto-loading
50 return f'<a href="{escape_html(src)}">{escape_html(src)}</a>'
51
52 def embed(self, src, alt="", title=None):
53 if IMAGE_RE.match(src):
54 return self.simple_image(src, alt, title)
55
56 if YOUTUBE_RE.match(src):
57 return self.youtube(src, alt, title)
58
59 if VIDEO_RE.match(src):
60 return self.video(src, alt, title)
61
62 if TWITTER_RE.match(src):
63 return self.tweet(src, alt, title)
64
65 return None
66
67 def simple_image(self, src, alt="", title=None):
68 css_classes = ""
69 title = title or alt
70 if title in IMAGE_CSS_CLASSES:
71 css_classes = IMAGE_CSS_CLASSES[title]
72
73 image_tag = f'<img src="{escape_html(src)}" alt="{escape_html(title)}">'
74 caption = f"<figcaption>{escape_html(title)}</figcaption>" if title else ""
75 return f'<figure class="{css_classes}">{image_tag}{caption}</figure>'
76
77 def youtube(self, src, alt="", title=None):
78 youtube_match = YOUTUBE_RE.match(src)
79 playlist = ""
80 if youtube_match.group(2):
81 playlist = f"list={escape_html(youtube_match.group(2))}&listType=playlist&"
82 video_tag = (
83 f'<span class="ratio-16-9">'
84 f'<iframe loading="lazy" src="https://www.youtube.com/embed/{escape_html(youtube_match.group(1) or "")}'
85 f'?{playlist}autoplay=0&controls=1&showinfo=1&vq=hd1080"'
86 f'allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; fullscreen"'
87 f'allowfullscreen></iframe>'
88 f"</span>"
89 )
90 caption = f"<figcaption>{escape_html(title)}</figcaption>" if title else ""
91 return f"<figure>{video_tag}{caption}</figure>"
92
93 def video(self, src, alt="", title=None):
94 video_tag = (
95 f'<video src="{escape_html(src)}" controls autoplay loop muted playsinline>{escape_html(alt)}</video>'
96 )
97 caption = f"<figcaption>{escape_html(title)}</figcaption>" if title else ""
98 return f"<figure>{video_tag}{caption}</figure>"
99
100 def tweet(self, src, alt="", title=None):
101 tweet_match = TWITTER_RE.match(src)
102 twitter_tag = f'<blockquote class="twitter-tweet" tw-align-center>' \
103 f'<a href="{tweet_match.group(1)}"></a></blockquote><br>' \
104 f'<a href="{src}" target="_blank">{src}</a>'
105 return twitter_tag
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/common/markdown/club_renderer.py b/common/markdown/club_renderer.py
--- a/common/markdown/club_renderer.py
+++ b/common/markdown/club_renderer.py
@@ -92,7 +92,7 @@
def video(self, src, alt="", title=None):
video_tag = (
- f'<video src="{escape_html(src)}" controls autoplay loop muted playsinline>{escape_html(alt)}</video>'
+ f'<video src="{escape_html(src)}" controls muted playsinline>{escape_html(alt)}</video>'
)
caption = f"<figcaption>{escape_html(title)}</figcaption>" if title else ""
return f"<figure>{video_tag}{caption}</figure>"
diff --git a/common/markdown/email_renderer.py b/common/markdown/email_renderer.py
--- a/common/markdown/email_renderer.py
+++ b/common/markdown/email_renderer.py
@@ -16,7 +16,7 @@
f'</span></a><br>{escape_html(title or "")}'
def video(self, src, alt="", title=None):
- return f'<video src="{escape_html(src)}" controls autoplay loop muted playsinline>{alt}</video><br>{title or ""}'
+ return f'<video src="{escape_html(src)}" controls muted playsinline>{alt}</video><br>{title or ""}'
def tweet(self, src, alt="", title=None):
return f'<a href="{escape_html(src)}">{escape_html(src)}</a><br>{escape_html(title or "")}'
| {"golden_diff": "diff --git a/common/markdown/club_renderer.py b/common/markdown/club_renderer.py\n--- a/common/markdown/club_renderer.py\n+++ b/common/markdown/club_renderer.py\n@@ -92,7 +92,7 @@\n \n def video(self, src, alt=\"\", title=None):\n video_tag = (\n- f'<video src=\"{escape_html(src)}\" controls autoplay loop muted playsinline>{escape_html(alt)}</video>'\n+ f'<video src=\"{escape_html(src)}\" controls muted playsinline>{escape_html(alt)}</video>'\n )\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f\"<figure>{video_tag}{caption}</figure>\"\ndiff --git a/common/markdown/email_renderer.py b/common/markdown/email_renderer.py\n--- a/common/markdown/email_renderer.py\n+++ b/common/markdown/email_renderer.py\n@@ -16,7 +16,7 @@\n f'</span></a><br>{escape_html(title or \"\")}'\n \n def video(self, src, alt=\"\", title=None):\n- return f'<video src=\"{escape_html(src)}\" controls autoplay loop muted playsinline>{alt}</video><br>{title or \"\"}'\n+ return f'<video src=\"{escape_html(src)}\" controls muted playsinline>{alt}</video><br>{title or \"\"}'\n \n def tweet(self, src, alt=\"\", title=None):\n return f'<a href=\"{escape_html(src)}\">{escape_html(src)}</a><br>{escape_html(title or \"\")}'\n", "issue": "\u0423\u0431\u0440\u0430\u0442\u044c \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u044b Autoplay \u0438 Loop \u0434\u043b\u044f \u0432\u0438\u0434\u0435\u043e, \u0437\u0430\u043b\u0438\u0432\u0430\u0435\u043c\u044b\u0445 \u043d\u0430\u043f\u0440\u044f\u043c\u0443\u044e \u0432 \u043a\u043b\u0443\u0431\n### Discussed in https://github.com/vas3k/vas3k.club/discussions/1113\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **resaixe** November 4, 2020</sup>\r\n\u0421\u0435\u0439\u0447\u0430\u0441 \u0432\u0438\u0434\u0435\u043e, \u043a\u043e\u0442\u043e\u0440\u044b\u0435 \u0437\u0430\u043b\u0438\u0432\u0430\u044e\u0442\u0441\u044f \u043d\u0430\u043f\u0440\u044f\u043c\u0443\u044e \u0432 \u043a\u043b\u0443\u0431, \u0438\u043c\u0435\u044e\u0442 \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u044b `autoplay=\"autoplay\" loop=\"loop\"`. \r\n\u041d\u0430\u043f\u0440\u0438\u043c\u0435\u0440, \u0432 [\u044d\u0442\u043e\u043c \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0438](https://vas3k.club/post/5942/#comment-6e78d5e4-7e7d-496e-a75a-e6516f0255f8) \u0441\u0435\u0439\u0447\u0430\u0441 \u0442\u0430\u043a:\r\n```html\r\n<video src=\"https://i.vas3k.club/d6600209ae05582854a384bb1ce5cfe23c64697b8f111b21ceabdfd72fce72bc.mp4\" controls=\"controls\" autoplay=\"autoplay\" loop=\"loop\" muted=\"muted\" playsinline=\"\"></video>\r\n```\r\n\r\n\u041f\u0440\u0435\u0434\u043b\u0430\u0433\u0430\u044e \u0443\u0431\u0440\u0430\u0442\u044c \u044d\u0442\u0438 \u0430\u0442\u0440\u0438\u0431\u0443\u0442\u044b, \u0447\u0442\u043e\u0431\u044b \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u0438 \u0441\u0430\u043c\u0438 \u0440\u0435\u0448\u0430\u043b\u0438, \u043a\u043e\u0433\u0434\u0430 \u0438 \u0441\u043a\u043e\u043b\u044c\u043a\u043e \u0440\u0430\u0437 \u043e\u043d\u0438 \u0445\u043e\u0442\u044f\u0442 \u043f\u043e\u0441\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u0432\u0438\u0434\u0435\u043e. \u0427\u0442\u043e\u0431\u044b \u0441\u0442\u0430\u043b\u043e \u0442\u0430\u043a:\r\n```html\r\n<video src=\"https://i.vas3k.club/d6600209ae05582854a384bb1ce5cfe23c64697b8f111b21ceabdfd72fce72bc.mp4\" controls=\"controls\" muted=\"muted\" playsinline=\"\"></video>\r\n```</div>\n", "before_files": [{"content": "from mistune import escape_html\n\nfrom common.markdown.club_renderer import ClubRenderer\nfrom common.regexp import YOUTUBE_RE\n\n\nclass EmailRenderer(ClubRenderer):\n def simple_image(self, src, alt=\"\", title=None):\n return f\"\"\"<img src=\"{src}\" alt=\"{alt}\" width=\"600\" border=\"0\"><br>{title or \"\"}\"\"\"\n\n def youtube(self, src, alt=\"\", title=None):\n youtube_match = YOUTUBE_RE.match(src)\n youtube_id = escape_html(youtube_match.group(1) or \"\")\n return f'<a href=\"{escape_html(src)}\"><span class=\"ratio-16-9 video-preview\" ' \\\n f'style=\"background-image: url(\\'https://img.youtube.com/vi/{escape_html(youtube_id)}/0.jpg\\');\">' \\\n f'</span></a><br>{escape_html(title or \"\")}'\n\n def video(self, src, alt=\"\", title=None):\n return f'<video src=\"{escape_html(src)}\" controls autoplay loop muted playsinline>{alt}</video><br>{title or \"\"}'\n\n def tweet(self, src, alt=\"\", title=None):\n return f'<a href=\"{escape_html(src)}\">{escape_html(src)}</a><br>{escape_html(title or \"\")}'\n\n def heading(self, text, level):\n tag = f\"h{level}\"\n return f\"<{tag}>{text}</{tag}>\\n\"\n", "path": "common/markdown/email_renderer.py"}, {"content": "import html\nimport mistune\nfrom urllib.parse import unquote\nfrom mistune import escape_html\nfrom slugify import slugify\n\nfrom common.regexp import IMAGE_RE, VIDEO_RE, YOUTUBE_RE, TWITTER_RE, USERNAME_RE\n\nIMAGE_CSS_CLASSES = {\n \"-\": \"text-body-image-full\"\n}\n\n\nclass ClubRenderer(mistune.HTMLRenderer):\n def text(self, text):\n text = escape_html(text)\n text = USERNAME_RE.sub(r' <a href=\"/user/\\1/\">@\\1</a>', text)\n return text\n\n def paragraph(self, text):\n text = text.replace(\"\\n\", \"<br>\\n\") # Mistune 2.0 broke newlines, let's hack it =/\n return f\"<p>{text}</p>\\n\"\n\n def heading(self, text, level):\n tag = f\"h{level}\"\n anchor = slugify(text[:24])\n return f\"<{tag} id=\\\"{anchor}\\\"><a href=\\\"#{anchor}\\\">{text}</a></{tag}>\\n\"\n\n def link(self, link, text=None, title=None):\n if not text and not title:\n # it's a pure link (without link tag) and we can try to parse it\n embed = self.embed(link, text or \"\", title or \"\")\n if embed:\n return embed\n\n if text is None:\n text = link\n\n # here's some magic of unescape->unquote->escape\n # to fix cyrillic (and other non-latin) wikipedia URLs\n return f'<a href=\"{self._safe_url(link)}\">{html.escape(unquote(html.unescape(text or link)))}</a>'\n\n def image(self, src, alt=\"\", title=None):\n embed = self.embed(src, alt, title)\n if embed:\n return embed\n\n # users can try to \"hack\" our parser by using non-image urls\n # so, if its not an image or video, display it as a link to avoid auto-loading\n return f'<a href=\"{escape_html(src)}\">{escape_html(src)}</a>'\n\n def embed(self, src, alt=\"\", title=None):\n if IMAGE_RE.match(src):\n return self.simple_image(src, alt, title)\n\n if YOUTUBE_RE.match(src):\n return self.youtube(src, alt, title)\n\n if VIDEO_RE.match(src):\n return self.video(src, alt, title)\n\n if TWITTER_RE.match(src):\n return self.tweet(src, alt, title)\n\n return None\n\n def simple_image(self, src, alt=\"\", title=None):\n css_classes = \"\"\n title = title or alt\n if title in IMAGE_CSS_CLASSES:\n css_classes = IMAGE_CSS_CLASSES[title]\n\n image_tag = f'<img src=\"{escape_html(src)}\" alt=\"{escape_html(title)}\">'\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f'<figure class=\"{css_classes}\">{image_tag}{caption}</figure>'\n\n def youtube(self, src, alt=\"\", title=None):\n youtube_match = YOUTUBE_RE.match(src)\n playlist = \"\"\n if youtube_match.group(2):\n playlist = f\"list={escape_html(youtube_match.group(2))}&listType=playlist&\"\n video_tag = (\n f'<span class=\"ratio-16-9\">'\n f'<iframe loading=\"lazy\" src=\"https://www.youtube.com/embed/{escape_html(youtube_match.group(1) or \"\")}'\n f'?{playlist}autoplay=0&controls=1&showinfo=1&vq=hd1080\"'\n f'allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; fullscreen\"'\n f'allowfullscreen></iframe>'\n f\"</span>\"\n )\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f\"<figure>{video_tag}{caption}</figure>\"\n\n def video(self, src, alt=\"\", title=None):\n video_tag = (\n f'<video src=\"{escape_html(src)}\" controls autoplay loop muted playsinline>{escape_html(alt)}</video>'\n )\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f\"<figure>{video_tag}{caption}</figure>\"\n\n def tweet(self, src, alt=\"\", title=None):\n tweet_match = TWITTER_RE.match(src)\n twitter_tag = f'<blockquote class=\"twitter-tweet\" tw-align-center>' \\\n f'<a href=\"{tweet_match.group(1)}\"></a></blockquote><br>' \\\n f'<a href=\"{src}\" target=\"_blank\">{src}</a>'\n return twitter_tag\n", "path": "common/markdown/club_renderer.py"}], "after_files": [{"content": "from mistune import escape_html\n\nfrom common.markdown.club_renderer import ClubRenderer\nfrom common.regexp import YOUTUBE_RE\n\n\nclass EmailRenderer(ClubRenderer):\n def simple_image(self, src, alt=\"\", title=None):\n return f\"\"\"<img src=\"{src}\" alt=\"{alt}\" width=\"600\" border=\"0\"><br>{title or \"\"}\"\"\"\n\n def youtube(self, src, alt=\"\", title=None):\n youtube_match = YOUTUBE_RE.match(src)\n youtube_id = escape_html(youtube_match.group(1) or \"\")\n return f'<a href=\"{escape_html(src)}\"><span class=\"ratio-16-9 video-preview\" ' \\\n f'style=\"background-image: url(\\'https://img.youtube.com/vi/{escape_html(youtube_id)}/0.jpg\\');\">' \\\n f'</span></a><br>{escape_html(title or \"\")}'\n\n def video(self, src, alt=\"\", title=None):\n return f'<video src=\"{escape_html(src)}\" controls muted playsinline>{alt}</video><br>{title or \"\"}'\n\n def tweet(self, src, alt=\"\", title=None):\n return f'<a href=\"{escape_html(src)}\">{escape_html(src)}</a><br>{escape_html(title or \"\")}'\n\n def heading(self, text, level):\n tag = f\"h{level}\"\n return f\"<{tag}>{text}</{tag}>\\n\"\n", "path": "common/markdown/email_renderer.py"}, {"content": "import html\nimport mistune\nfrom urllib.parse import unquote\nfrom mistune import escape_html\nfrom slugify import slugify\n\nfrom common.regexp import IMAGE_RE, VIDEO_RE, YOUTUBE_RE, TWITTER_RE, USERNAME_RE\n\nIMAGE_CSS_CLASSES = {\n \"-\": \"text-body-image-full\"\n}\n\n\nclass ClubRenderer(mistune.HTMLRenderer):\n def text(self, text):\n text = escape_html(text)\n text = USERNAME_RE.sub(r' <a href=\"/user/\\1/\">@\\1</a>', text)\n return text\n\n def paragraph(self, text):\n text = text.replace(\"\\n\", \"<br>\\n\") # Mistune 2.0 broke newlines, let's hack it =/\n return f\"<p>{text}</p>\\n\"\n\n def heading(self, text, level):\n tag = f\"h{level}\"\n anchor = slugify(text[:24])\n return f\"<{tag} id=\\\"{anchor}\\\"><a href=\\\"#{anchor}\\\">{text}</a></{tag}>\\n\"\n\n def link(self, link, text=None, title=None):\n if not text and not title:\n # it's a pure link (without link tag) and we can try to parse it\n embed = self.embed(link, text or \"\", title or \"\")\n if embed:\n return embed\n\n if text is None:\n text = link\n\n # here's some magic of unescape->unquote->escape\n # to fix cyrillic (and other non-latin) wikipedia URLs\n return f'<a href=\"{self._safe_url(link)}\">{html.escape(unquote(html.unescape(text or link)))}</a>'\n\n def image(self, src, alt=\"\", title=None):\n embed = self.embed(src, alt, title)\n if embed:\n return embed\n\n # users can try to \"hack\" our parser by using non-image urls\n # so, if its not an image or video, display it as a link to avoid auto-loading\n return f'<a href=\"{escape_html(src)}\">{escape_html(src)}</a>'\n\n def embed(self, src, alt=\"\", title=None):\n if IMAGE_RE.match(src):\n return self.simple_image(src, alt, title)\n\n if YOUTUBE_RE.match(src):\n return self.youtube(src, alt, title)\n\n if VIDEO_RE.match(src):\n return self.video(src, alt, title)\n\n if TWITTER_RE.match(src):\n return self.tweet(src, alt, title)\n\n return None\n\n def simple_image(self, src, alt=\"\", title=None):\n css_classes = \"\"\n title = title or alt\n if title in IMAGE_CSS_CLASSES:\n css_classes = IMAGE_CSS_CLASSES[title]\n\n image_tag = f'<img src=\"{escape_html(src)}\" alt=\"{escape_html(title)}\">'\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f'<figure class=\"{css_classes}\">{image_tag}{caption}</figure>'\n\n def youtube(self, src, alt=\"\", title=None):\n youtube_match = YOUTUBE_RE.match(src)\n playlist = \"\"\n if youtube_match.group(2):\n playlist = f\"list={escape_html(youtube_match.group(2))}&listType=playlist&\"\n video_tag = (\n f'<span class=\"ratio-16-9\">'\n f'<iframe loading=\"lazy\" src=\"https://www.youtube.com/embed/{escape_html(youtube_match.group(1) or \"\")}'\n f'?{playlist}autoplay=0&controls=1&showinfo=1&vq=hd1080\"'\n f'allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; fullscreen\"'\n f'allowfullscreen></iframe>'\n f\"</span>\"\n )\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f\"<figure>{video_tag}{caption}</figure>\"\n\n def video(self, src, alt=\"\", title=None):\n video_tag = (\n f'<video src=\"{escape_html(src)}\" controls muted playsinline>{escape_html(alt)}</video>'\n )\n caption = f\"<figcaption>{escape_html(title)}</figcaption>\" if title else \"\"\n return f\"<figure>{video_tag}{caption}</figure>\"\n\n def tweet(self, src, alt=\"\", title=None):\n tweet_match = TWITTER_RE.match(src)\n twitter_tag = f'<blockquote class=\"twitter-tweet\" tw-align-center>' \\\n f'<a href=\"{tweet_match.group(1)}\"></a></blockquote><br>' \\\n f'<a href=\"{src}\" target=\"_blank\">{src}</a>'\n return twitter_tag\n", "path": "common/markdown/club_renderer.py"}]} | 2,257 | 325 |
gh_patches_debug_16394 | rasdani/github-patches | git_diff | yt-project__yt-2265 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dynamic parallel objects StopIteration error in Python 3.7
### Bug report
**Bug summary**
Dynamic parallel objects would result in [`StopIteration` error stemming from a Python 3.7 new behavior](https://stackoverflow.com/a/51701040/10892982).
**Code for reproduction**
Given `test.py`:
```python
import yt
yt.enable_parallelism()
for x in yt.parallel_objects(range(10), dynamic=True):
print(x)
```
Run:
```console
$ mpirun python test.py
```
**Actual outcome**
```console
...
File "test.py", line 4, in <module>
for x in yt.parallel_objects(range(10), dynamic=True):
File "/Users/qobilidop/work/rse/yt-project/yt-dev/yt/utilities/parallel_tools/parallel_analysis_interface.py", line 481, in parallel_objects
storage=storage):
File "/Users/qobilidop/work/rse/yt-project/yt-dev/yt/utilities/parallel_tools/task_queue.py", line 174, in dynamic_parallel_objects
for task in my_q:
P001 yt : [ERROR ] 2019-05-27 12:40:34,817 RuntimeError: generator raised StopIteration
...
```
**Expected outcome**
No error.
**Version Information**
* Python Version: 3.7
* yt version: master branch (3.6.dev0)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt/utilities/parallel_tools/task_queue.py`
Content:
```
1 """
2 Task queue in yt
3
4
5
6 """
7
8 #-----------------------------------------------------------------------------
9 # Copyright (c) 2013, yt Development Team.
10 #
11 # Distributed under the terms of the Modified BSD License.
12 #
13 # The full license is in the file COPYING.txt, distributed with this software.
14 #-----------------------------------------------------------------------------
15
16 import numpy as np
17
18 from yt.funcs import mylog
19 from .parallel_analysis_interface import \
20 communication_system, \
21 _get_comm, \
22 parallel_capable, \
23 ResultsStorage
24
25 messages = dict(
26 task = dict(msg = 'next'),
27 result = dict(msg = 'result'),
28 task_req = dict(msg = 'task_req'),
29 end = dict(msg = 'no_more_tasks'),
30 )
31
32 class TaskQueueNonRoot(object):
33 def __init__(self, tasks, comm, subcomm):
34 self.tasks = tasks
35 self.results = {}
36 self.comm = comm
37 self.subcomm = subcomm
38
39 def send_result(self, result):
40 new_msg = messages['result'].copy()
41 new_msg['value'] = result
42 if self.subcomm.rank == 0:
43 self.comm.comm.send(new_msg, dest = 0, tag=1)
44 self.subcomm.barrier()
45
46 def get_next(self):
47 msg = messages['task_req'].copy()
48 if self.subcomm.rank == 0:
49 self.comm.comm.send(msg, dest = 0, tag=1)
50 msg = self.comm.comm.recv(source = 0, tag=2)
51 msg = self.subcomm.bcast(msg, root=0)
52 if msg['msg'] == messages['end']['msg']:
53 mylog.debug("Notified to end")
54 raise StopIteration
55 return msg['value']
56
57 def __iter__(self):
58 while 1:
59 yield self.get_next()
60
61 def run(self, callable):
62 for task in self:
63 self.send_result(callable(task))
64 return self.finalize()
65
66 def finalize(self, vals = None):
67 return self.comm.comm.bcast(vals, root = 0)
68
69 class TaskQueueRoot(TaskQueueNonRoot):
70 def __init__(self, tasks, comm, njobs):
71 self.njobs = njobs
72 self.tasks = tasks
73 self.results = {}
74 self.assignments = {}
75 self._notified = 0
76 self._current = 0
77 self._remaining = len(self.tasks)
78 self.comm = comm
79 # Set up threading here
80 # self.dist = threading.Thread(target=self.handle_assignments)
81 # self.dist.daemon = True
82 # self.dist.start()
83
84 def run(self, func = None):
85 self.comm.probe_loop(1, self.handle_assignment)
86 return self.finalize(self.results)
87
88 def insert_result(self, source_id, result):
89 task_id = self.assignments[source_id]
90 self.results[task_id] = result
91
92 def assign_task(self, source_id):
93 if self._remaining == 0:
94 mylog.debug("Notifying %s to end", source_id)
95 msg = messages['end'].copy()
96 self._notified += 1
97 else:
98 msg = messages['task'].copy()
99 task_id = self._current
100 task = self.tasks[task_id]
101 self.assignments[source_id] = task_id
102 self._current += 1
103 self._remaining -= 1
104 msg['value'] = task
105 self.comm.comm.send(msg, dest = source_id, tag = 2)
106
107 def handle_assignment(self, status):
108 msg = self.comm.comm.recv(source = status.source, tag = 1)
109 if msg['msg'] == messages['result']['msg']:
110 self.insert_result(status.source, msg['value'])
111 elif msg['msg'] == messages['task_req']['msg']:
112 self.assign_task(status.source)
113 else:
114 mylog.error("GOT AN UNKNOWN MESSAGE: %s", msg)
115 raise RuntimeError
116 if self._notified >= self.njobs:
117 raise StopIteration
118
119 def task_queue(func, tasks, njobs=0):
120 comm = _get_comm(())
121 if not parallel_capable:
122 mylog.error("Cannot create task queue for serial process.")
123 raise RuntimeError
124 my_size = comm.comm.size
125 if njobs <= 0:
126 njobs = my_size - 1
127 if njobs >= my_size:
128 mylog.error("You have asked for %s jobs, but only %s processors are available.",
129 njobs, (my_size - 1))
130 raise RuntimeError
131 my_rank = comm.rank
132 all_new_comms = np.array_split(np.arange(1, my_size), njobs)
133 all_new_comms.insert(0, np.array([0]))
134 for i,comm_set in enumerate(all_new_comms):
135 if my_rank in comm_set:
136 my_new_id = i
137 break
138 subcomm = communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
139
140 if comm.comm.rank == 0:
141 my_q = TaskQueueRoot(tasks, comm, njobs)
142 else:
143 my_q = TaskQueueNonRoot(None, comm, subcomm)
144 communication_system.pop()
145 return my_q.run(func)
146
147 def dynamic_parallel_objects(tasks, njobs=0, storage=None, broadcast=True):
148 comm = _get_comm(())
149 if not parallel_capable:
150 mylog.error("Cannot create task queue for serial process.")
151 raise RuntimeError
152 my_size = comm.comm.size
153 if njobs <= 0:
154 njobs = my_size - 1
155 if njobs >= my_size:
156 mylog.error("You have asked for %s jobs, but only %s processors are available.",
157 njobs, (my_size - 1))
158 raise RuntimeError
159 my_rank = comm.rank
160 all_new_comms = np.array_split(np.arange(1, my_size), njobs)
161 all_new_comms.insert(0, np.array([0]))
162 for i,comm_set in enumerate(all_new_comms):
163 if my_rank in comm_set:
164 my_new_id = i
165 break
166 subcomm = communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
167
168 if comm.comm.rank == 0:
169 my_q = TaskQueueRoot(tasks, comm, njobs)
170 my_q.comm.probe_loop(1, my_q.handle_assignment)
171 else:
172 my_q = TaskQueueNonRoot(None, comm, subcomm)
173 if storage is None:
174 for task in my_q:
175 yield task
176 else:
177 for task in my_q:
178 rstore = ResultsStorage()
179 yield rstore, task
180 my_q.send_result(rstore.result)
181
182 if storage is not None:
183 if broadcast:
184 my_results = my_q.comm.comm.bcast(my_q.results, root=0)
185 else:
186 my_results = my_q.results
187 storage.update(my_results)
188
189 communication_system.pop()
190
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt/utilities/parallel_tools/task_queue.py b/yt/utilities/parallel_tools/task_queue.py
--- a/yt/utilities/parallel_tools/task_queue.py
+++ b/yt/utilities/parallel_tools/task_queue.py
@@ -43,7 +43,7 @@
self.comm.comm.send(new_msg, dest = 0, tag=1)
self.subcomm.barrier()
- def get_next(self):
+ def __next__(self):
msg = messages['task_req'].copy()
if self.subcomm.rank == 0:
self.comm.comm.send(msg, dest = 0, tag=1)
@@ -54,9 +54,11 @@
raise StopIteration
return msg['value']
+ # For Python 2 compatibility
+ next = __next__
+
def __iter__(self):
- while 1:
- yield self.get_next()
+ return self
def run(self, callable):
for task in self:
| {"golden_diff": "diff --git a/yt/utilities/parallel_tools/task_queue.py b/yt/utilities/parallel_tools/task_queue.py\n--- a/yt/utilities/parallel_tools/task_queue.py\n+++ b/yt/utilities/parallel_tools/task_queue.py\n@@ -43,7 +43,7 @@\n self.comm.comm.send(new_msg, dest = 0, tag=1)\n self.subcomm.barrier()\n \n- def get_next(self):\n+ def __next__(self):\n msg = messages['task_req'].copy()\n if self.subcomm.rank == 0:\n self.comm.comm.send(msg, dest = 0, tag=1)\n@@ -54,9 +54,11 @@\n raise StopIteration\n return msg['value']\n \n+ # For Python 2 compatibility\n+ next = __next__\n+\n def __iter__(self):\n- while 1:\n- yield self.get_next()\n+ return self\n \n def run(self, callable):\n for task in self:\n", "issue": "Dynamic parallel objects StopIteration error in Python 3.7\n### Bug report\r\n\r\n**Bug summary**\r\n\r\nDynamic parallel objects would result in [`StopIteration` error stemming from a Python 3.7 new behavior](https://stackoverflow.com/a/51701040/10892982).\r\n\r\n**Code for reproduction**\r\n\r\nGiven `test.py`:\r\n```python\r\nimport yt\r\nyt.enable_parallelism()\r\n\r\nfor x in yt.parallel_objects(range(10), dynamic=True):\r\n print(x)\r\n```\r\n\r\nRun:\r\n```console\r\n$ mpirun python test.py\r\n```\r\n\r\n**Actual outcome**\r\n\r\n```console\r\n...\r\n File \"test.py\", line 4, in <module>\r\n for x in yt.parallel_objects(range(10), dynamic=True):\r\n File \"/Users/qobilidop/work/rse/yt-project/yt-dev/yt/utilities/parallel_tools/parallel_analysis_interface.py\", line 481, in parallel_objects\r\n storage=storage):\r\n File \"/Users/qobilidop/work/rse/yt-project/yt-dev/yt/utilities/parallel_tools/task_queue.py\", line 174, in dynamic_parallel_objects\r\n for task in my_q:\r\nP001 yt : [ERROR ] 2019-05-27 12:40:34,817 RuntimeError: generator raised StopIteration\r\n...\r\n```\r\n\r\n**Expected outcome**\r\n\r\nNo error.\r\n\r\n**Version Information**\r\n\r\n * Python Version: 3.7\r\n * yt version: master branch (3.6.dev0)\r\n\n", "before_files": [{"content": "\"\"\"\nTask queue in yt\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom yt.funcs import mylog\nfrom .parallel_analysis_interface import \\\n communication_system, \\\n _get_comm, \\\n parallel_capable, \\\n ResultsStorage\n\nmessages = dict(\n task = dict(msg = 'next'),\n result = dict(msg = 'result'),\n task_req = dict(msg = 'task_req'),\n end = dict(msg = 'no_more_tasks'),\n)\n\nclass TaskQueueNonRoot(object):\n def __init__(self, tasks, comm, subcomm):\n self.tasks = tasks\n self.results = {}\n self.comm = comm\n self.subcomm = subcomm\n\n def send_result(self, result):\n new_msg = messages['result'].copy()\n new_msg['value'] = result\n if self.subcomm.rank == 0:\n self.comm.comm.send(new_msg, dest = 0, tag=1)\n self.subcomm.barrier()\n\n def get_next(self):\n msg = messages['task_req'].copy()\n if self.subcomm.rank == 0:\n self.comm.comm.send(msg, dest = 0, tag=1)\n msg = self.comm.comm.recv(source = 0, tag=2)\n msg = self.subcomm.bcast(msg, root=0)\n if msg['msg'] == messages['end']['msg']:\n mylog.debug(\"Notified to end\")\n raise StopIteration\n return msg['value']\n\n def __iter__(self):\n while 1:\n yield self.get_next()\n\n def run(self, callable):\n for task in self:\n self.send_result(callable(task))\n return self.finalize()\n\n def finalize(self, vals = None):\n return self.comm.comm.bcast(vals, root = 0)\n\nclass TaskQueueRoot(TaskQueueNonRoot):\n def __init__(self, tasks, comm, njobs):\n self.njobs = njobs\n self.tasks = tasks\n self.results = {}\n self.assignments = {}\n self._notified = 0\n self._current = 0\n self._remaining = len(self.tasks)\n self.comm = comm\n # Set up threading here\n # self.dist = threading.Thread(target=self.handle_assignments)\n # self.dist.daemon = True\n # self.dist.start()\n\n def run(self, func = None):\n self.comm.probe_loop(1, self.handle_assignment)\n return self.finalize(self.results)\n\n def insert_result(self, source_id, result):\n task_id = self.assignments[source_id]\n self.results[task_id] = result\n\n def assign_task(self, source_id):\n if self._remaining == 0:\n mylog.debug(\"Notifying %s to end\", source_id)\n msg = messages['end'].copy()\n self._notified += 1\n else:\n msg = messages['task'].copy()\n task_id = self._current\n task = self.tasks[task_id]\n self.assignments[source_id] = task_id\n self._current += 1\n self._remaining -= 1\n msg['value'] = task\n self.comm.comm.send(msg, dest = source_id, tag = 2)\n\n def handle_assignment(self, status):\n msg = self.comm.comm.recv(source = status.source, tag = 1)\n if msg['msg'] == messages['result']['msg']:\n self.insert_result(status.source, msg['value'])\n elif msg['msg'] == messages['task_req']['msg']:\n self.assign_task(status.source)\n else:\n mylog.error(\"GOT AN UNKNOWN MESSAGE: %s\", msg)\n raise RuntimeError\n if self._notified >= self.njobs:\n raise StopIteration\n\ndef task_queue(func, tasks, njobs=0):\n comm = _get_comm(())\n if not parallel_capable:\n mylog.error(\"Cannot create task queue for serial process.\")\n raise RuntimeError\n my_size = comm.comm.size\n if njobs <= 0:\n njobs = my_size - 1\n if njobs >= my_size:\n mylog.error(\"You have asked for %s jobs, but only %s processors are available.\",\n njobs, (my_size - 1))\n raise RuntimeError\n my_rank = comm.rank\n all_new_comms = np.array_split(np.arange(1, my_size), njobs)\n all_new_comms.insert(0, np.array([0]))\n for i,comm_set in enumerate(all_new_comms):\n if my_rank in comm_set:\n my_new_id = i\n break\n subcomm = communication_system.push_with_ids(all_new_comms[my_new_id].tolist())\n \n if comm.comm.rank == 0:\n my_q = TaskQueueRoot(tasks, comm, njobs)\n else:\n my_q = TaskQueueNonRoot(None, comm, subcomm)\n communication_system.pop()\n return my_q.run(func)\n\ndef dynamic_parallel_objects(tasks, njobs=0, storage=None, broadcast=True):\n comm = _get_comm(())\n if not parallel_capable:\n mylog.error(\"Cannot create task queue for serial process.\")\n raise RuntimeError\n my_size = comm.comm.size\n if njobs <= 0:\n njobs = my_size - 1\n if njobs >= my_size:\n mylog.error(\"You have asked for %s jobs, but only %s processors are available.\",\n njobs, (my_size - 1))\n raise RuntimeError\n my_rank = comm.rank\n all_new_comms = np.array_split(np.arange(1, my_size), njobs)\n all_new_comms.insert(0, np.array([0]))\n for i,comm_set in enumerate(all_new_comms):\n if my_rank in comm_set:\n my_new_id = i\n break\n subcomm = communication_system.push_with_ids(all_new_comms[my_new_id].tolist())\n \n if comm.comm.rank == 0:\n my_q = TaskQueueRoot(tasks, comm, njobs)\n my_q.comm.probe_loop(1, my_q.handle_assignment)\n else:\n my_q = TaskQueueNonRoot(None, comm, subcomm)\n if storage is None:\n for task in my_q:\n yield task\n else:\n for task in my_q:\n rstore = ResultsStorage()\n yield rstore, task\n my_q.send_result(rstore.result)\n\n if storage is not None:\n if broadcast:\n my_results = my_q.comm.comm.bcast(my_q.results, root=0)\n else:\n my_results = my_q.results\n storage.update(my_results)\n\n communication_system.pop()\n", "path": "yt/utilities/parallel_tools/task_queue.py"}], "after_files": [{"content": "\"\"\"\nTask queue in yt\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom yt.funcs import mylog\nfrom .parallel_analysis_interface import \\\n communication_system, \\\n _get_comm, \\\n parallel_capable, \\\n ResultsStorage\n\nmessages = dict(\n task = dict(msg = 'next'),\n result = dict(msg = 'result'),\n task_req = dict(msg = 'task_req'),\n end = dict(msg = 'no_more_tasks'),\n)\n\nclass TaskQueueNonRoot(object):\n def __init__(self, tasks, comm, subcomm):\n self.tasks = tasks\n self.results = {}\n self.comm = comm\n self.subcomm = subcomm\n\n def send_result(self, result):\n new_msg = messages['result'].copy()\n new_msg['value'] = result\n if self.subcomm.rank == 0:\n self.comm.comm.send(new_msg, dest = 0, tag=1)\n self.subcomm.barrier()\n\n def __next__(self):\n msg = messages['task_req'].copy()\n if self.subcomm.rank == 0:\n self.comm.comm.send(msg, dest = 0, tag=1)\n msg = self.comm.comm.recv(source = 0, tag=2)\n msg = self.subcomm.bcast(msg, root=0)\n if msg['msg'] == messages['end']['msg']:\n mylog.debug(\"Notified to end\")\n raise StopIteration\n return msg['value']\n\n # For Python 2 compatibility\n next = __next__\n\n def __iter__(self):\n return self\n\n def run(self, callable):\n for task in self:\n self.send_result(callable(task))\n return self.finalize()\n\n def finalize(self, vals = None):\n return self.comm.comm.bcast(vals, root = 0)\n\nclass TaskQueueRoot(TaskQueueNonRoot):\n def __init__(self, tasks, comm, njobs):\n self.njobs = njobs\n self.tasks = tasks\n self.results = {}\n self.assignments = {}\n self._notified = 0\n self._current = 0\n self._remaining = len(self.tasks)\n self.comm = comm\n # Set up threading here\n # self.dist = threading.Thread(target=self.handle_assignments)\n # self.dist.daemon = True\n # self.dist.start()\n\n def run(self, func = None):\n self.comm.probe_loop(1, self.handle_assignment)\n return self.finalize(self.results)\n\n def insert_result(self, source_id, result):\n task_id = self.assignments[source_id]\n self.results[task_id] = result\n\n def assign_task(self, source_id):\n if self._remaining == 0:\n mylog.debug(\"Notifying %s to end\", source_id)\n msg = messages['end'].copy()\n self._notified += 1\n else:\n msg = messages['task'].copy()\n task_id = self._current\n task = self.tasks[task_id]\n self.assignments[source_id] = task_id\n self._current += 1\n self._remaining -= 1\n msg['value'] = task\n self.comm.comm.send(msg, dest = source_id, tag = 2)\n\n def handle_assignment(self, status):\n msg = self.comm.comm.recv(source = status.source, tag = 1)\n if msg['msg'] == messages['result']['msg']:\n self.insert_result(status.source, msg['value'])\n elif msg['msg'] == messages['task_req']['msg']:\n self.assign_task(status.source)\n else:\n mylog.error(\"GOT AN UNKNOWN MESSAGE: %s\", msg)\n raise RuntimeError\n if self._notified >= self.njobs:\n raise StopIteration\n\ndef task_queue(func, tasks, njobs=0):\n comm = _get_comm(())\n if not parallel_capable:\n mylog.error(\"Cannot create task queue for serial process.\")\n raise RuntimeError\n my_size = comm.comm.size\n if njobs <= 0:\n njobs = my_size - 1\n if njobs >= my_size:\n mylog.error(\"You have asked for %s jobs, but only %s processors are available.\",\n njobs, (my_size - 1))\n raise RuntimeError\n my_rank = comm.rank\n all_new_comms = np.array_split(np.arange(1, my_size), njobs)\n all_new_comms.insert(0, np.array([0]))\n for i,comm_set in enumerate(all_new_comms):\n if my_rank in comm_set:\n my_new_id = i\n break\n subcomm = communication_system.push_with_ids(all_new_comms[my_new_id].tolist())\n \n if comm.comm.rank == 0:\n my_q = TaskQueueRoot(tasks, comm, njobs)\n else:\n my_q = TaskQueueNonRoot(None, comm, subcomm)\n communication_system.pop()\n return my_q.run(func)\n\ndef dynamic_parallel_objects(tasks, njobs=0, storage=None, broadcast=True):\n comm = _get_comm(())\n if not parallel_capable:\n mylog.error(\"Cannot create task queue for serial process.\")\n raise RuntimeError\n my_size = comm.comm.size\n if njobs <= 0:\n njobs = my_size - 1\n if njobs >= my_size:\n mylog.error(\"You have asked for %s jobs, but only %s processors are available.\",\n njobs, (my_size - 1))\n raise RuntimeError\n my_rank = comm.rank\n all_new_comms = np.array_split(np.arange(1, my_size), njobs)\n all_new_comms.insert(0, np.array([0]))\n for i,comm_set in enumerate(all_new_comms):\n if my_rank in comm_set:\n my_new_id = i\n break\n subcomm = communication_system.push_with_ids(all_new_comms[my_new_id].tolist())\n \n if comm.comm.rank == 0:\n my_q = TaskQueueRoot(tasks, comm, njobs)\n my_q.comm.probe_loop(1, my_q.handle_assignment)\n else:\n my_q = TaskQueueNonRoot(None, comm, subcomm)\n if storage is None:\n for task in my_q:\n yield task\n else:\n for task in my_q:\n rstore = ResultsStorage()\n yield rstore, task\n my_q.send_result(rstore.result)\n\n if storage is not None:\n if broadcast:\n my_results = my_q.comm.comm.bcast(my_q.results, root=0)\n else:\n my_results = my_q.results\n storage.update(my_results)\n\n communication_system.pop()\n", "path": "yt/utilities/parallel_tools/task_queue.py"}]} | 2,554 | 220 |
gh_patches_debug_7312 | rasdani/github-patches | git_diff | spack__spack-41502 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation issue: tcl with <= gcc@11
### Steps to reproduce
I started the most up to date version of spack, 05-12-2023, and had a glitch when trying to compile `tcl`:
spack install -v [email protected]
it appears to be as a result of the addition of this edit [40946](https://github.com/spack/spack/pull/40946)
filter_compiler_wrappers("tclConfig.sh", relative_root="lib")
I think the error is related to the `filter_compile_wrappers function` trying to replace a line containing `gcc` and `g++`, however the file in question, `tclConfig.sh` only has one line containing `gcc`, no `g++`:
# C compiler to use for compilation.
TCL_CC='/mnt/sda/cadtools/spack_git/lib/spack/env/gcc/gcc'
My base system, ubuntu22.04, has `[email protected]` as its default. I have stripped this back to a clean checkout of spack and go straight into a compile of `tcl`.
If I compile and instead use `[email protected].` it do not get the error, the installation is successful. Also if I comment out that one line from the `tcl/package.py` file, the installation is successful using `[email protected]`.
### Error message
```
==> [2023-12-05-10:43:44.657378] Find complete: /mnt/sda/cadtools/spack_install/opt/linux-ubuntu22.04-skylake/gcc-11.4.0/tcl/8.6.11-yvwju3kstvxvughqov6ommkuxr7mzrpm/lib ('tclConfig.sh',)
==> [2023-12-05-10:43:44.658905] FILTER FILE: /mnt/sda/cadtools/spack_install/opt/linux-ubuntu22.04-skylake/gcc-11.4.0/tcl/8.6.11-yvwju3kstvxvughqov6ommkuxr7mzrpm/lib/tclConfig.sh [replacing "/mnt/sda/cadtools/spack_git/lib/spack/env/gcc/gcc"]
==> [2023-12-05-10:43:44.659794] FILTER FILE: /mnt/sda/cadtools/spack_install/opt/linux-ubuntu22.04-skylake/gcc-11.4.0/tcl/8.6.11-yvwju3kstvxvughqov6ommkuxr7mzrpm/lib/tclConfig.sh [replacing "/mnt/sda/cadtools/spack_git/lib/spack/env/gcc/g\+\+"]
==> Error: AttributeError: 'NoneType' object has no attribute 'replace'
```
### Information on your system
* **Spack:** 0.22.0.dev0 (798770f9e54bfd3cb1a52af4a9bd2937f826018e)
* **Python:** 3.11.6
* **Platform:** linux-ubuntu22.04-skylake
* **Concretizer:** clingo
### General information
- [X] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [X] I have searched the issues of this repo and believe this is not a duplicate
- [X] I have run the failing commands in debug mode and reported the output
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/spack/spack/mixins.py`
Content:
```
1 # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 """This module contains additional behavior that can be attached to any given
7 package.
8 """
9 import os
10
11 import llnl.util.filesystem
12
13 import spack.builder
14
15
16 def filter_compiler_wrappers(*files, **kwargs):
17 """Substitutes any path referring to a Spack compiler wrapper with the
18 path of the underlying compiler that has been used.
19
20 If this isn't done, the files will have CC, CXX, F77, and FC set to
21 Spack's generic cc, c++, f77, and f90. We want them to be bound to
22 whatever compiler they were built with.
23
24 Args:
25 *files: files to be filtered relative to the search root (which is,
26 by default, the installation prefix)
27
28 **kwargs: allowed keyword arguments
29
30 after
31 specifies after which phase the files should be
32 filtered (defaults to 'install')
33
34 relative_root
35 path relative to prefix where to start searching for
36 the files to be filtered. If not set the install prefix
37 wil be used as the search root. **It is highly recommended
38 to set this, as searching from the installation prefix may
39 affect performance severely in some cases**.
40
41 ignore_absent, backup
42 these two keyword arguments, if present, will be forwarded
43 to ``filter_file`` (see its documentation for more information
44 on their behavior)
45
46 recursive
47 this keyword argument, if present, will be forwarded to
48 ``find`` (see its documentation for more information on the
49 behavior)
50 """
51 after = kwargs.get("after", "install")
52 relative_root = kwargs.get("relative_root", None)
53
54 filter_kwargs = {
55 "ignore_absent": kwargs.get("ignore_absent", True),
56 "backup": kwargs.get("backup", False),
57 "string": True,
58 }
59
60 find_kwargs = {"recursive": kwargs.get("recursive", False)}
61
62 def _filter_compiler_wrappers_impl(pkg_or_builder):
63 pkg = getattr(pkg_or_builder, "pkg", pkg_or_builder)
64 # Compute the absolute path of the search root
65 root = os.path.join(pkg.prefix, relative_root) if relative_root else pkg.prefix
66
67 # Compute the absolute path of the files to be filtered and
68 # remove links from the list.
69 abs_files = llnl.util.filesystem.find(root, files, **find_kwargs)
70 abs_files = [x for x in abs_files if not os.path.islink(x)]
71
72 x = llnl.util.filesystem.FileFilter(*abs_files)
73
74 compiler_vars = [
75 ("CC", pkg.compiler.cc),
76 ("CXX", pkg.compiler.cxx),
77 ("F77", pkg.compiler.f77),
78 ("FC", pkg.compiler.fc),
79 ]
80
81 # Some paths to the compiler wrappers might be substrings of the others.
82 # For example:
83 # CC=/path/to/spack/lib/spack/env/cc (realpath to the wrapper)
84 # FC=/path/to/spack/lib/spack/env/cce/ftn
85 # Therefore, we perform the filtering in the reversed sorted order of
86 # the substituted strings. If, however, the strings are identical (e.g.
87 # both CC and FC are set using realpath), the filtering is done
88 # according to the order in compiler_vars. To achieve that, we populate
89 # the following array with tuples of three elements: path to the
90 # wrapper, negated index of the variable in compiler_vars, path to the
91 # real compiler. This way, the reversed sorted order of the resulting
92 # array is the order of replacements that we need.
93 replacements = []
94
95 for idx, (env_var, compiler_path) in enumerate(compiler_vars):
96 if env_var in os.environ:
97 # filter spack wrapper and links to spack wrapper in case
98 # build system runs realpath
99 wrapper = os.environ[env_var]
100 for wrapper_path in (wrapper, os.path.realpath(wrapper)):
101 replacements.append((wrapper_path, -idx, compiler_path))
102
103 for wrapper_path, _, compiler_path in sorted(replacements, reverse=True):
104 x.filter(wrapper_path, compiler_path, **filter_kwargs)
105
106 # Remove this linking flag if present (it turns RPATH into RUNPATH)
107 x.filter("{0}--enable-new-dtags".format(pkg.compiler.linker_arg), "", **filter_kwargs)
108
109 # NAG compiler is usually mixed with GCC, which has a different
110 # prefix for linker arguments.
111 if pkg.compiler.name == "nag":
112 x.filter("-Wl,--enable-new-dtags", "", **filter_kwargs)
113
114 spack.builder.run_after(after)(_filter_compiler_wrappers_impl)
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/spack/spack/mixins.py b/lib/spack/spack/mixins.py
--- a/lib/spack/spack/mixins.py
+++ b/lib/spack/spack/mixins.py
@@ -93,7 +93,7 @@
replacements = []
for idx, (env_var, compiler_path) in enumerate(compiler_vars):
- if env_var in os.environ:
+ if env_var in os.environ and compiler_path is not None:
# filter spack wrapper and links to spack wrapper in case
# build system runs realpath
wrapper = os.environ[env_var]
| {"golden_diff": "diff --git a/lib/spack/spack/mixins.py b/lib/spack/spack/mixins.py\n--- a/lib/spack/spack/mixins.py\n+++ b/lib/spack/spack/mixins.py\n@@ -93,7 +93,7 @@\n replacements = []\n \n for idx, (env_var, compiler_path) in enumerate(compiler_vars):\n- if env_var in os.environ:\n+ if env_var in os.environ and compiler_path is not None:\n # filter spack wrapper and links to spack wrapper in case\n # build system runs realpath\n wrapper = os.environ[env_var]\n", "issue": "Installation issue: tcl with <= gcc@11\n### Steps to reproduce\r\n\r\nI started the most up to date version of spack, 05-12-2023, and had a glitch when trying to compile `tcl`:\r\n\r\n spack install -v [email protected]\r\n \r\nit appears to be as a result of the addition of this edit [40946](https://github.com/spack/spack/pull/40946)\r\n\r\n filter_compiler_wrappers(\"tclConfig.sh\", relative_root=\"lib\")\r\n \r\nI think the error is related to the `filter_compile_wrappers function` trying to replace a line containing `gcc` and `g++`, however the file in question, `tclConfig.sh` only has one line containing `gcc`, no `g++`:\r\n\r\n # C compiler to use for compilation.\r\n TCL_CC='/mnt/sda/cadtools/spack_git/lib/spack/env/gcc/gcc'\r\n\r\n\r\nMy base system, ubuntu22.04, has `[email protected]` as its default. I have stripped this back to a clean checkout of spack and go straight into a compile of `tcl`.\r\n\r\nIf I compile and instead use `[email protected].` it do not get the error, the installation is successful. Also if I comment out that one line from the `tcl/package.py` file, the installation is successful using `[email protected]`.\r\n\r\n### Error message\r\n\r\n```\r\n==> [2023-12-05-10:43:44.657378] Find complete: /mnt/sda/cadtools/spack_install/opt/linux-ubuntu22.04-skylake/gcc-11.4.0/tcl/8.6.11-yvwju3kstvxvughqov6ommkuxr7mzrpm/lib ('tclConfig.sh',)\r\n==> [2023-12-05-10:43:44.658905] FILTER FILE: /mnt/sda/cadtools/spack_install/opt/linux-ubuntu22.04-skylake/gcc-11.4.0/tcl/8.6.11-yvwju3kstvxvughqov6ommkuxr7mzrpm/lib/tclConfig.sh [replacing \"/mnt/sda/cadtools/spack_git/lib/spack/env/gcc/gcc\"]\r\n==> [2023-12-05-10:43:44.659794] FILTER FILE: /mnt/sda/cadtools/spack_install/opt/linux-ubuntu22.04-skylake/gcc-11.4.0/tcl/8.6.11-yvwju3kstvxvughqov6ommkuxr7mzrpm/lib/tclConfig.sh [replacing \"/mnt/sda/cadtools/spack_git/lib/spack/env/gcc/g\\+\\+\"]\r\n==> Error: AttributeError: 'NoneType' object has no attribute 'replace'\r\n```\r\n\r\n### Information on your system\r\n\r\n* **Spack:** 0.22.0.dev0 (798770f9e54bfd3cb1a52af4a9bd2937f826018e)\r\n* **Python:** 3.11.6\r\n* **Platform:** linux-ubuntu22.04-skylake\r\n* **Concretizer:** clingo\r\n\r\n\r\n### General information\r\n\r\n- [X] I have run `spack debug report` and reported the version of Spack/Python/Platform\r\n- [X] I have searched the issues of this repo and believe this is not a duplicate\r\n- [X] I have run the failing commands in debug mode and reported the output\n", "before_files": [{"content": "# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\"\"\"This module contains additional behavior that can be attached to any given\npackage.\n\"\"\"\nimport os\n\nimport llnl.util.filesystem\n\nimport spack.builder\n\n\ndef filter_compiler_wrappers(*files, **kwargs):\n \"\"\"Substitutes any path referring to a Spack compiler wrapper with the\n path of the underlying compiler that has been used.\n\n If this isn't done, the files will have CC, CXX, F77, and FC set to\n Spack's generic cc, c++, f77, and f90. We want them to be bound to\n whatever compiler they were built with.\n\n Args:\n *files: files to be filtered relative to the search root (which is,\n by default, the installation prefix)\n\n **kwargs: allowed keyword arguments\n\n after\n specifies after which phase the files should be\n filtered (defaults to 'install')\n\n relative_root\n path relative to prefix where to start searching for\n the files to be filtered. If not set the install prefix\n wil be used as the search root. **It is highly recommended\n to set this, as searching from the installation prefix may\n affect performance severely in some cases**.\n\n ignore_absent, backup\n these two keyword arguments, if present, will be forwarded\n to ``filter_file`` (see its documentation for more information\n on their behavior)\n\n recursive\n this keyword argument, if present, will be forwarded to\n ``find`` (see its documentation for more information on the\n behavior)\n \"\"\"\n after = kwargs.get(\"after\", \"install\")\n relative_root = kwargs.get(\"relative_root\", None)\n\n filter_kwargs = {\n \"ignore_absent\": kwargs.get(\"ignore_absent\", True),\n \"backup\": kwargs.get(\"backup\", False),\n \"string\": True,\n }\n\n find_kwargs = {\"recursive\": kwargs.get(\"recursive\", False)}\n\n def _filter_compiler_wrappers_impl(pkg_or_builder):\n pkg = getattr(pkg_or_builder, \"pkg\", pkg_or_builder)\n # Compute the absolute path of the search root\n root = os.path.join(pkg.prefix, relative_root) if relative_root else pkg.prefix\n\n # Compute the absolute path of the files to be filtered and\n # remove links from the list.\n abs_files = llnl.util.filesystem.find(root, files, **find_kwargs)\n abs_files = [x for x in abs_files if not os.path.islink(x)]\n\n x = llnl.util.filesystem.FileFilter(*abs_files)\n\n compiler_vars = [\n (\"CC\", pkg.compiler.cc),\n (\"CXX\", pkg.compiler.cxx),\n (\"F77\", pkg.compiler.f77),\n (\"FC\", pkg.compiler.fc),\n ]\n\n # Some paths to the compiler wrappers might be substrings of the others.\n # For example:\n # CC=/path/to/spack/lib/spack/env/cc (realpath to the wrapper)\n # FC=/path/to/spack/lib/spack/env/cce/ftn\n # Therefore, we perform the filtering in the reversed sorted order of\n # the substituted strings. If, however, the strings are identical (e.g.\n # both CC and FC are set using realpath), the filtering is done\n # according to the order in compiler_vars. To achieve that, we populate\n # the following array with tuples of three elements: path to the\n # wrapper, negated index of the variable in compiler_vars, path to the\n # real compiler. This way, the reversed sorted order of the resulting\n # array is the order of replacements that we need.\n replacements = []\n\n for idx, (env_var, compiler_path) in enumerate(compiler_vars):\n if env_var in os.environ:\n # filter spack wrapper and links to spack wrapper in case\n # build system runs realpath\n wrapper = os.environ[env_var]\n for wrapper_path in (wrapper, os.path.realpath(wrapper)):\n replacements.append((wrapper_path, -idx, compiler_path))\n\n for wrapper_path, _, compiler_path in sorted(replacements, reverse=True):\n x.filter(wrapper_path, compiler_path, **filter_kwargs)\n\n # Remove this linking flag if present (it turns RPATH into RUNPATH)\n x.filter(\"{0}--enable-new-dtags\".format(pkg.compiler.linker_arg), \"\", **filter_kwargs)\n\n # NAG compiler is usually mixed with GCC, which has a different\n # prefix for linker arguments.\n if pkg.compiler.name == \"nag\":\n x.filter(\"-Wl,--enable-new-dtags\", \"\", **filter_kwargs)\n\n spack.builder.run_after(after)(_filter_compiler_wrappers_impl)\n", "path": "lib/spack/spack/mixins.py"}], "after_files": [{"content": "# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\"\"\"This module contains additional behavior that can be attached to any given\npackage.\n\"\"\"\nimport os\n\nimport llnl.util.filesystem\n\nimport spack.builder\n\n\ndef filter_compiler_wrappers(*files, **kwargs):\n \"\"\"Substitutes any path referring to a Spack compiler wrapper with the\n path of the underlying compiler that has been used.\n\n If this isn't done, the files will have CC, CXX, F77, and FC set to\n Spack's generic cc, c++, f77, and f90. We want them to be bound to\n whatever compiler they were built with.\n\n Args:\n *files: files to be filtered relative to the search root (which is,\n by default, the installation prefix)\n\n **kwargs: allowed keyword arguments\n\n after\n specifies after which phase the files should be\n filtered (defaults to 'install')\n\n relative_root\n path relative to prefix where to start searching for\n the files to be filtered. If not set the install prefix\n wil be used as the search root. **It is highly recommended\n to set this, as searching from the installation prefix may\n affect performance severely in some cases**.\n\n ignore_absent, backup\n these two keyword arguments, if present, will be forwarded\n to ``filter_file`` (see its documentation for more information\n on their behavior)\n\n recursive\n this keyword argument, if present, will be forwarded to\n ``find`` (see its documentation for more information on the\n behavior)\n \"\"\"\n after = kwargs.get(\"after\", \"install\")\n relative_root = kwargs.get(\"relative_root\", None)\n\n filter_kwargs = {\n \"ignore_absent\": kwargs.get(\"ignore_absent\", True),\n \"backup\": kwargs.get(\"backup\", False),\n \"string\": True,\n }\n\n find_kwargs = {\"recursive\": kwargs.get(\"recursive\", False)}\n\n def _filter_compiler_wrappers_impl(pkg_or_builder):\n pkg = getattr(pkg_or_builder, \"pkg\", pkg_or_builder)\n # Compute the absolute path of the search root\n root = os.path.join(pkg.prefix, relative_root) if relative_root else pkg.prefix\n\n # Compute the absolute path of the files to be filtered and\n # remove links from the list.\n abs_files = llnl.util.filesystem.find(root, files, **find_kwargs)\n abs_files = [x for x in abs_files if not os.path.islink(x)]\n\n x = llnl.util.filesystem.FileFilter(*abs_files)\n\n compiler_vars = [\n (\"CC\", pkg.compiler.cc),\n (\"CXX\", pkg.compiler.cxx),\n (\"F77\", pkg.compiler.f77),\n (\"FC\", pkg.compiler.fc),\n ]\n\n # Some paths to the compiler wrappers might be substrings of the others.\n # For example:\n # CC=/path/to/spack/lib/spack/env/cc (realpath to the wrapper)\n # FC=/path/to/spack/lib/spack/env/cce/ftn\n # Therefore, we perform the filtering in the reversed sorted order of\n # the substituted strings. If, however, the strings are identical (e.g.\n # both CC and FC are set using realpath), the filtering is done\n # according to the order in compiler_vars. To achieve that, we populate\n # the following array with tuples of three elements: path to the\n # wrapper, negated index of the variable in compiler_vars, path to the\n # real compiler. This way, the reversed sorted order of the resulting\n # array is the order of replacements that we need.\n replacements = []\n\n for idx, (env_var, compiler_path) in enumerate(compiler_vars):\n if env_var in os.environ and compiler_path is not None:\n # filter spack wrapper and links to spack wrapper in case\n # build system runs realpath\n wrapper = os.environ[env_var]\n for wrapper_path in (wrapper, os.path.realpath(wrapper)):\n replacements.append((wrapper_path, -idx, compiler_path))\n\n for wrapper_path, _, compiler_path in sorted(replacements, reverse=True):\n x.filter(wrapper_path, compiler_path, **filter_kwargs)\n\n # Remove this linking flag if present (it turns RPATH into RUNPATH)\n x.filter(\"{0}--enable-new-dtags\".format(pkg.compiler.linker_arg), \"\", **filter_kwargs)\n\n # NAG compiler is usually mixed with GCC, which has a different\n # prefix for linker arguments.\n if pkg.compiler.name == \"nag\":\n x.filter(\"-Wl,--enable-new-dtags\", \"\", **filter_kwargs)\n\n spack.builder.run_after(after)(_filter_compiler_wrappers_impl)\n", "path": "lib/spack/spack/mixins.py"}]} | 2,404 | 137 |
gh_patches_debug_6727 | rasdani/github-patches | git_diff | saulpw__visidata-2018 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Parquet Large String columns are typed vlen by default
**Small description**
I have a parquet file which contains string columns:
[australian_public_holidays_2014_2020.zip](https://github.com/saulpw/visidata/files/12389440/australian_public_holidays_2014_2020.zip)
**Expected result**
In DuckDB, it looks like this:
```
D select * from 'australian_public_holidays_2014_2020.parquet';
┌────────────┬──────────────────────┬──────────────────────┬────────────────────────────────────────────────────┬──────────────┐
│ Date │ Holiday_Name │ Information │ More_Information │ Jurisdiction │
│ date │ varchar │ varchar │ varchar │ varchar │
├────────────┼──────────────────────┼──────────────────────┼────────────────────────────────────────────────────┼──────────────┤
│ 2014-01-01 │ New Year's Day │ New Year's Day is . │ │ act │
│ 2014-01-27 │ Australia Day │ Always celebrated . │ │ act │
│ 2014-03-10 │ Canberra Day │ Held on the second. │ http://www.cmd.act.gov.au/communication/holidays │ act │
```
**Actual result with screenshot**
In Visidata with pyarrow installed, it looks like this:
```
Date Holiday_Name Information More_Information Jurisdiction
2014-01-01 14 87 3
2014-01-27 13 31 3
2014-03-10 12 148 48 3
```
Note that the numbers are the _exact_ length of the missing strings!
**Steps to reproduce with sample data and a .vd**
```
vd australian_public_holidays_2014_2020.parquet
```
(Skipping the command-log as simply opening the file reproduces the issue)
**Additional context**
```
❯ vd --version
saul.pw/VisiData v2.11.1
```
Python 3.10.12
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/loaders/arrow.py`
Content:
```
1 from collections import defaultdict
2
3 from visidata import Sheet, VisiData, TypedWrapper, anytype, date, vlen, Column, vd
4
5
6
7 @VisiData.api
8 def open_arrow(vd, p):
9 'Apache Arrow IPC file format'
10 return ArrowSheet(p.name, source=p)
11
12
13 @VisiData.api
14 def open_arrows(vd, p):
15 'Apache Arrow IPC streaming format'
16 return ArrowSheet(p.name, source=p)
17
18
19 def arrow_to_vdtype(t):
20 pa = vd.importExternal('pyarrow')
21
22 arrow_to_vd_typemap = {
23 pa.lib.Type_BOOL: bool,
24 pa.lib.Type_UINT8: int,
25 pa.lib.Type_UINT16: int,
26 pa.lib.Type_UINT32: int,
27 pa.lib.Type_UINT64: int,
28 pa.lib.Type_INT8: int,
29 pa.lib.Type_INT16: int,
30 pa.lib.Type_INT32: int,
31 pa.lib.Type_INT64: int,
32 pa.lib.Type_HALF_FLOAT: float,
33 pa.lib.Type_FLOAT: float,
34 pa.lib.Type_DOUBLE: float,
35 # pa.lib.Type_DECIMAL128: Decimal128Scalar,
36 # pa.lib.Type_DECIMAL256: Decimal256Scalar,
37 pa.lib.Type_DATE32: date,
38 pa.lib.Type_DATE64: date,
39 pa.lib.Type_TIME32: date,
40 pa.lib.Type_TIME64: date,
41 pa.lib.Type_TIMESTAMP: date,
42 pa.lib.Type_DURATION: int,
43 pa.lib.Type_BINARY: bytes,
44 pa.lib.Type_LARGE_BINARY: vlen,
45 # pa.lib.Type_FIXED_SIZE_BINARY: bytes,
46 # pa.lib.Type_STRING: str,
47 pa.lib.Type_LARGE_STRING: vlen,
48 # pa.lib.Type_LIST: list,
49 # pa.lib.Type_LARGE_LIST: list,
50 # pa.lib.Type_FIXED_SIZE_LIST: list,
51 # pa.lib.Type_STRUCT: dict,
52 # pa.lib.Type_MAP: dict,
53 # pa.lib.Type_DICTIONARY: dict,
54 # pa.lib.Type_SPARSE_UNION: UnionScalar,
55 # pa.lib.Type_DENSE_UNION: UnionScalar,
56 }
57 return arrow_to_vd_typemap.get(t.id, anytype)
58
59 class ArrowSheet(Sheet):
60 def iterload(self):
61 pa = vd.importExternal('pyarrow')
62
63 try:
64 with pa.OSFile(str(self.source), 'rb') as fp:
65 self.coldata = pa.ipc.open_file(fp).read_all()
66 except pa.lib.ArrowInvalid as e:
67 with pa.OSFile(str(self.source), 'rb') as fp:
68 self.coldata = pa.ipc.open_stream(fp).read_all()
69
70 self.columns = []
71 for colnum, col in enumerate(self.coldata):
72 coltype = arrow_to_vdtype(self.coldata.schema.types[colnum])
73 colname = self.coldata.schema.names[colnum]
74
75 self.addColumn(Column(colname, type=coltype, expr=colnum,
76 getter=lambda c,r: c.sheet.coldata[c.expr][r[0]].as_py()))
77
78 for rownum in range(max(len(c) for c in self.coldata)):
79 yield [rownum]
80
81
82 @VisiData.api
83 def save_arrow(vd, p, sheet, streaming=False):
84 pa = vd.importExternal('pyarrow')
85 np = vd.importExternal('numpy')
86
87 typemap = {
88 anytype: pa.string(),
89 int: pa.int64(),
90 vlen: pa.int64(),
91 float: pa.float64(),
92 str: pa.string(),
93 date: pa.date64(),
94 }
95
96 for t in vd.numericTypes:
97 if t not in typemap:
98 typemap[t] = pa.float64()
99
100 databycol = defaultdict(list) # col -> [values]
101
102 for typedvals in sheet.iterdispvals(format=False):
103 for col, val in typedvals.items():
104 if isinstance(val, TypedWrapper):
105 val = None
106
107 databycol[col].append(val)
108
109 data = [pa.array(vals, type=typemap.get(col.type, pa.string())) for col, vals in databycol.items()]
110
111 schema = pa.schema([
112 (c.name, typemap.get(c.type, pa.string()))
113 for c in sheet.visibleCols
114 ])
115 with p.open_bytes(mode='w') as outf:
116 if streaming:
117 with pa.ipc.new_stream(outf, schema) as writer:
118 writer.write_batch(pa.record_batch(data, names=[c.name for c in sheet.visibleCols]))
119 else:
120 with pa.ipc.new_file(outf, schema) as writer:
121 writer.write_batch(pa.record_batch(data, names=[c.name for c in sheet.visibleCols]))
122
123
124 @VisiData.api
125 def save_arrows(vd, p, sheet):
126 return vd.save_arrow(p, sheet, streaming=True)
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/visidata/loaders/arrow.py b/visidata/loaders/arrow.py
--- a/visidata/loaders/arrow.py
+++ b/visidata/loaders/arrow.py
@@ -44,7 +44,7 @@
pa.lib.Type_LARGE_BINARY: vlen,
# pa.lib.Type_FIXED_SIZE_BINARY: bytes,
# pa.lib.Type_STRING: str,
- pa.lib.Type_LARGE_STRING: vlen,
+# pa.lib.Type_LARGE_STRING: vlen, #2003
# pa.lib.Type_LIST: list,
# pa.lib.Type_LARGE_LIST: list,
# pa.lib.Type_FIXED_SIZE_LIST: list,
| {"golden_diff": "diff --git a/visidata/loaders/arrow.py b/visidata/loaders/arrow.py\n--- a/visidata/loaders/arrow.py\n+++ b/visidata/loaders/arrow.py\n@@ -44,7 +44,7 @@\n pa.lib.Type_LARGE_BINARY: vlen,\n # pa.lib.Type_FIXED_SIZE_BINARY: bytes,\n # pa.lib.Type_STRING: str,\n- pa.lib.Type_LARGE_STRING: vlen,\n+# pa.lib.Type_LARGE_STRING: vlen, #2003\n # pa.lib.Type_LIST: list,\n # pa.lib.Type_LARGE_LIST: list,\n # pa.lib.Type_FIXED_SIZE_LIST: list,\n", "issue": "Parquet Large String columns are typed vlen by default\n**Small description**\r\n\r\nI have a parquet file which contains string columns:\r\n[australian_public_holidays_2014_2020.zip](https://github.com/saulpw/visidata/files/12389440/australian_public_holidays_2014_2020.zip)\r\n\r\n\r\n**Expected result**\r\n\r\nIn DuckDB, it looks like this:\r\n\r\n\r\n```\r\nD select * from 'australian_public_holidays_2014_2020.parquet';\r\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\r\n\u2502 Date \u2502 Holiday_Name \u2502 Information \u2502 More_Information \u2502 Jurisdiction \u2502\r\n\u2502 date \u2502 varchar \u2502 varchar \u2502 varchar \u2502 varchar \u2502\r\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\r\n\u2502 2014-01-01 \u2502 New Year's Day \u2502 New Year's Day is . \u2502 \u2502 act \u2502\r\n\u2502 2014-01-27 \u2502 Australia Day \u2502 Always celebrated . \u2502 \u2502 act \u2502\r\n\u2502 2014-03-10 \u2502 Canberra Day \u2502 Held on the second. \u2502 http://www.cmd.act.gov.au/communication/holidays \u2502 act \u2502\r\n```\r\n\r\n**Actual result with screenshot**\r\n\r\nIn Visidata with pyarrow installed, it looks like this:\r\n\r\n```\r\nDate Holiday_Name Information More_Information Jurisdiction \r\n2014-01-01 14 87 3 \r\n2014-01-27 13 31 3 \r\n2014-03-10 12 148 48 3 \r\n```\r\n\r\nNote that the numbers are the _exact_ length of the missing strings!\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\n\r\n```\r\nvd australian_public_holidays_2014_2020.parquet\r\n```\r\n\r\n(Skipping the command-log as simply opening the file reproduces the issue)\r\n\r\n**Additional context**\r\n\r\n```\r\n\u276f vd --version\r\nsaul.pw/VisiData v2.11.1\r\n```\r\n\r\nPython 3.10.12\r\n\n", "before_files": [{"content": "from collections import defaultdict\n\nfrom visidata import Sheet, VisiData, TypedWrapper, anytype, date, vlen, Column, vd\n\n\n\[email protected]\ndef open_arrow(vd, p):\n 'Apache Arrow IPC file format'\n return ArrowSheet(p.name, source=p)\n\n\[email protected]\ndef open_arrows(vd, p):\n 'Apache Arrow IPC streaming format'\n return ArrowSheet(p.name, source=p)\n\n\ndef arrow_to_vdtype(t):\n pa = vd.importExternal('pyarrow')\n\n arrow_to_vd_typemap = {\n pa.lib.Type_BOOL: bool,\n pa.lib.Type_UINT8: int,\n pa.lib.Type_UINT16: int,\n pa.lib.Type_UINT32: int,\n pa.lib.Type_UINT64: int,\n pa.lib.Type_INT8: int,\n pa.lib.Type_INT16: int,\n pa.lib.Type_INT32: int,\n pa.lib.Type_INT64: int,\n pa.lib.Type_HALF_FLOAT: float,\n pa.lib.Type_FLOAT: float,\n pa.lib.Type_DOUBLE: float,\n# pa.lib.Type_DECIMAL128: Decimal128Scalar,\n# pa.lib.Type_DECIMAL256: Decimal256Scalar,\n pa.lib.Type_DATE32: date,\n pa.lib.Type_DATE64: date,\n pa.lib.Type_TIME32: date,\n pa.lib.Type_TIME64: date,\n pa.lib.Type_TIMESTAMP: date,\n pa.lib.Type_DURATION: int,\n pa.lib.Type_BINARY: bytes,\n pa.lib.Type_LARGE_BINARY: vlen,\n# pa.lib.Type_FIXED_SIZE_BINARY: bytes,\n# pa.lib.Type_STRING: str,\n pa.lib.Type_LARGE_STRING: vlen,\n# pa.lib.Type_LIST: list,\n# pa.lib.Type_LARGE_LIST: list,\n# pa.lib.Type_FIXED_SIZE_LIST: list,\n# pa.lib.Type_STRUCT: dict,\n# pa.lib.Type_MAP: dict,\n# pa.lib.Type_DICTIONARY: dict,\n# pa.lib.Type_SPARSE_UNION: UnionScalar,\n# pa.lib.Type_DENSE_UNION: UnionScalar,\n }\n return arrow_to_vd_typemap.get(t.id, anytype)\n\nclass ArrowSheet(Sheet):\n def iterload(self):\n pa = vd.importExternal('pyarrow')\n\n try:\n with pa.OSFile(str(self.source), 'rb') as fp:\n self.coldata = pa.ipc.open_file(fp).read_all()\n except pa.lib.ArrowInvalid as e:\n with pa.OSFile(str(self.source), 'rb') as fp:\n self.coldata = pa.ipc.open_stream(fp).read_all()\n\n self.columns = []\n for colnum, col in enumerate(self.coldata):\n coltype = arrow_to_vdtype(self.coldata.schema.types[colnum])\n colname = self.coldata.schema.names[colnum]\n\n self.addColumn(Column(colname, type=coltype, expr=colnum,\n getter=lambda c,r: c.sheet.coldata[c.expr][r[0]].as_py()))\n\n for rownum in range(max(len(c) for c in self.coldata)):\n yield [rownum]\n\n\[email protected]\ndef save_arrow(vd, p, sheet, streaming=False):\n pa = vd.importExternal('pyarrow')\n np = vd.importExternal('numpy')\n\n typemap = {\n anytype: pa.string(),\n int: pa.int64(),\n vlen: pa.int64(),\n float: pa.float64(),\n str: pa.string(),\n date: pa.date64(),\n }\n\n for t in vd.numericTypes:\n if t not in typemap:\n typemap[t] = pa.float64()\n\n databycol = defaultdict(list) # col -> [values]\n\n for typedvals in sheet.iterdispvals(format=False):\n for col, val in typedvals.items():\n if isinstance(val, TypedWrapper):\n val = None\n\n databycol[col].append(val)\n\n data = [pa.array(vals, type=typemap.get(col.type, pa.string())) for col, vals in databycol.items()]\n\n schema = pa.schema([\n (c.name, typemap.get(c.type, pa.string()))\n for c in sheet.visibleCols\n ])\n with p.open_bytes(mode='w') as outf:\n if streaming:\n with pa.ipc.new_stream(outf, schema) as writer:\n writer.write_batch(pa.record_batch(data, names=[c.name for c in sheet.visibleCols]))\n else:\n with pa.ipc.new_file(outf, schema) as writer:\n writer.write_batch(pa.record_batch(data, names=[c.name for c in sheet.visibleCols]))\n\n\[email protected]\ndef save_arrows(vd, p, sheet):\n return vd.save_arrow(p, sheet, streaming=True)\n", "path": "visidata/loaders/arrow.py"}], "after_files": [{"content": "from collections import defaultdict\n\nfrom visidata import Sheet, VisiData, TypedWrapper, anytype, date, vlen, Column, vd\n\n\n\[email protected]\ndef open_arrow(vd, p):\n 'Apache Arrow IPC file format'\n return ArrowSheet(p.name, source=p)\n\n\[email protected]\ndef open_arrows(vd, p):\n 'Apache Arrow IPC streaming format'\n return ArrowSheet(p.name, source=p)\n\n\ndef arrow_to_vdtype(t):\n pa = vd.importExternal('pyarrow')\n\n arrow_to_vd_typemap = {\n pa.lib.Type_BOOL: bool,\n pa.lib.Type_UINT8: int,\n pa.lib.Type_UINT16: int,\n pa.lib.Type_UINT32: int,\n pa.lib.Type_UINT64: int,\n pa.lib.Type_INT8: int,\n pa.lib.Type_INT16: int,\n pa.lib.Type_INT32: int,\n pa.lib.Type_INT64: int,\n pa.lib.Type_HALF_FLOAT: float,\n pa.lib.Type_FLOAT: float,\n pa.lib.Type_DOUBLE: float,\n# pa.lib.Type_DECIMAL128: Decimal128Scalar,\n# pa.lib.Type_DECIMAL256: Decimal256Scalar,\n pa.lib.Type_DATE32: date,\n pa.lib.Type_DATE64: date,\n pa.lib.Type_TIME32: date,\n pa.lib.Type_TIME64: date,\n pa.lib.Type_TIMESTAMP: date,\n pa.lib.Type_DURATION: int,\n pa.lib.Type_BINARY: bytes,\n pa.lib.Type_LARGE_BINARY: vlen,\n# pa.lib.Type_FIXED_SIZE_BINARY: bytes,\n# pa.lib.Type_STRING: str,\n# pa.lib.Type_LARGE_STRING: vlen, #2003\n# pa.lib.Type_LIST: list,\n# pa.lib.Type_LARGE_LIST: list,\n# pa.lib.Type_FIXED_SIZE_LIST: list,\n# pa.lib.Type_STRUCT: dict,\n# pa.lib.Type_MAP: dict,\n# pa.lib.Type_DICTIONARY: dict,\n# pa.lib.Type_SPARSE_UNION: UnionScalar,\n# pa.lib.Type_DENSE_UNION: UnionScalar,\n }\n return arrow_to_vd_typemap.get(t.id, anytype)\n\nclass ArrowSheet(Sheet):\n def iterload(self):\n pa = vd.importExternal('pyarrow')\n\n try:\n with pa.OSFile(str(self.source), 'rb') as fp:\n self.coldata = pa.ipc.open_file(fp).read_all()\n except pa.lib.ArrowInvalid as e:\n with pa.OSFile(str(self.source), 'rb') as fp:\n self.coldata = pa.ipc.open_stream(fp).read_all()\n\n self.columns = []\n for colnum, col in enumerate(self.coldata):\n coltype = arrow_to_vdtype(self.coldata.schema.types[colnum])\n colname = self.coldata.schema.names[colnum]\n\n self.addColumn(Column(colname, type=coltype, expr=colnum,\n getter=lambda c,r: c.sheet.coldata[c.expr][r[0]].as_py()))\n\n for rownum in range(max(len(c) for c in self.coldata)):\n yield [rownum]\n\n\[email protected]\ndef save_arrow(vd, p, sheet, streaming=False):\n pa = vd.importExternal('pyarrow')\n np = vd.importExternal('numpy')\n\n typemap = {\n anytype: pa.string(),\n int: pa.int64(),\n vlen: pa.int64(),\n float: pa.float64(),\n str: pa.string(),\n date: pa.date64(),\n }\n\n for t in vd.numericTypes:\n if t not in typemap:\n typemap[t] = pa.float64()\n\n databycol = defaultdict(list) # col -> [values]\n\n for typedvals in sheet.iterdispvals(format=False):\n for col, val in typedvals.items():\n if isinstance(val, TypedWrapper):\n val = None\n\n databycol[col].append(val)\n\n data = [pa.array(vals, type=typemap.get(col.type, pa.string())) for col, vals in databycol.items()]\n\n schema = pa.schema([\n (c.name, typemap.get(c.type, pa.string()))\n for c in sheet.visibleCols\n ])\n with p.open_bytes(mode='w') as outf:\n if streaming:\n with pa.ipc.new_stream(outf, schema) as writer:\n writer.write_batch(pa.record_batch(data, names=[c.name for c in sheet.visibleCols]))\n else:\n with pa.ipc.new_file(outf, schema) as writer:\n writer.write_batch(pa.record_batch(data, names=[c.name for c in sheet.visibleCols]))\n\n\[email protected]\ndef save_arrows(vd, p, sheet):\n return vd.save_arrow(p, sheet, streaming=True)\n", "path": "visidata/loaders/arrow.py"}]} | 2,135 | 145 |
gh_patches_debug_2849 | rasdani/github-patches | git_diff | ipython__ipython-9202 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IPythonLexer test failure with pygments 2.1
Jenkins is showing us a test failure on Windows:
https://jenkins.jupyter.org/job/windows-multi/_pyversion=3.5,label=windows/504/testReport/
It looks like `$HOME` is unexpectedly being split into two tokens. I suspect it's failing since pygments 2.1 was released. I see the same locally on Linux, but it's not failing on Travis (maybe we're not installing pygments so those tests are skipped?).
@abalkin and @chebee7i have done the most signficant work on the lexer machinery. Any ideas?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """Setup script for IPython.
4
5 Under Posix environments it works like a typical setup.py script.
6 Under Windows, the command sdist is not supported, since IPython
7 requires utilities which are not available under Windows."""
8
9 #-----------------------------------------------------------------------------
10 # Copyright (c) 2008-2011, IPython Development Team.
11 # Copyright (c) 2001-2007, Fernando Perez <[email protected]>
12 # Copyright (c) 2001, Janko Hauser <[email protected]>
13 # Copyright (c) 2001, Nathaniel Gray <[email protected]>
14 #
15 # Distributed under the terms of the Modified BSD License.
16 #
17 # The full license is in the file COPYING.rst, distributed with this software.
18 #-----------------------------------------------------------------------------
19
20 #-----------------------------------------------------------------------------
21 # Minimal Python version sanity check
22 #-----------------------------------------------------------------------------
23 from __future__ import print_function
24
25 import sys
26
27 # This check is also made in IPython/__init__, don't forget to update both when
28 # changing Python version requirements.
29 v = sys.version_info
30 if v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):
31 error = "ERROR: IPython requires Python version 2.7 or 3.3 or above."
32 print(error, file=sys.stderr)
33 sys.exit(1)
34
35 PY3 = (sys.version_info[0] >= 3)
36
37 # At least we're on the python version we need, move on.
38
39 #-------------------------------------------------------------------------------
40 # Imports
41 #-------------------------------------------------------------------------------
42
43 # Stdlib imports
44 import os
45
46 from glob import glob
47
48 # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
49 # update it when the contents of directories change.
50 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
51
52 from distutils.core import setup
53
54 # Our own imports
55 from setupbase import target_update
56
57 from setupbase import (
58 setup_args,
59 find_packages,
60 find_package_data,
61 check_package_data_first,
62 find_entry_points,
63 build_scripts_entrypt,
64 find_data_files,
65 git_prebuild,
66 install_symlinked,
67 install_lib_symlink,
68 install_scripts_for_symlink,
69 unsymlink,
70 )
71
72 isfile = os.path.isfile
73 pjoin = os.path.join
74
75 #-------------------------------------------------------------------------------
76 # Handle OS specific things
77 #-------------------------------------------------------------------------------
78
79 if os.name in ('nt','dos'):
80 os_name = 'windows'
81 else:
82 os_name = os.name
83
84 # Under Windows, 'sdist' has not been supported. Now that the docs build with
85 # Sphinx it might work, but let's not turn it on until someone confirms that it
86 # actually works.
87 if os_name == 'windows' and 'sdist' in sys.argv:
88 print('The sdist command is not available under Windows. Exiting.')
89 sys.exit(1)
90
91
92 #-------------------------------------------------------------------------------
93 # Things related to the IPython documentation
94 #-------------------------------------------------------------------------------
95
96 # update the manuals when building a source dist
97 if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):
98
99 # List of things to be updated. Each entry is a triplet of args for
100 # target_update()
101 to_update = [
102 ('docs/man/ipython.1.gz',
103 ['docs/man/ipython.1'],
104 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),
105 ]
106
107
108 [ target_update(*t) for t in to_update ]
109
110 #---------------------------------------------------------------------------
111 # Find all the packages, package data, and data_files
112 #---------------------------------------------------------------------------
113
114 packages = find_packages()
115 package_data = find_package_data()
116
117 data_files = find_data_files()
118
119 setup_args['packages'] = packages
120 setup_args['package_data'] = package_data
121 setup_args['data_files'] = data_files
122
123 #---------------------------------------------------------------------------
124 # custom distutils commands
125 #---------------------------------------------------------------------------
126 # imports here, so they are after setuptools import if there was one
127 from distutils.command.sdist import sdist
128 from distutils.command.upload import upload
129
130 class UploadWindowsInstallers(upload):
131
132 description = "Upload Windows installers to PyPI (only used from tools/release_windows.py)"
133 user_options = upload.user_options + [
134 ('files=', 'f', 'exe file (or glob) to upload')
135 ]
136 def initialize_options(self):
137 upload.initialize_options(self)
138 meta = self.distribution.metadata
139 base = '{name}-{version}'.format(
140 name=meta.get_name(),
141 version=meta.get_version()
142 )
143 self.files = os.path.join('dist', '%s.*.exe' % base)
144
145 def run(self):
146 for dist_file in glob(self.files):
147 self.upload_file('bdist_wininst', 'any', dist_file)
148
149 setup_args['cmdclass'] = {
150 'build_py': \
151 check_package_data_first(git_prebuild('IPython')),
152 'sdist' : git_prebuild('IPython', sdist),
153 'upload_wininst' : UploadWindowsInstallers,
154 'symlink': install_symlinked,
155 'install_lib_symlink': install_lib_symlink,
156 'install_scripts_sym': install_scripts_for_symlink,
157 'unsymlink': unsymlink,
158 }
159
160
161 #---------------------------------------------------------------------------
162 # Handle scripts, dependencies, and setuptools specific things
163 #---------------------------------------------------------------------------
164
165 # For some commands, use setuptools. Note that we do NOT list install here!
166 # If you want a setuptools-enhanced install, just run 'setupegg.py install'
167 needs_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',
168 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',
169 'egg_info', 'easy_install', 'upload', 'install_egg_info',
170 ))
171
172 if len(needs_setuptools.intersection(sys.argv)) > 0:
173 import setuptools
174
175 # This dict is used for passing extra arguments that are setuptools
176 # specific to setup
177 setuptools_extra_args = {}
178
179 # setuptools requirements
180
181 extras_require = dict(
182 parallel = ['ipyparallel'],
183 qtconsole = ['qtconsole'],
184 doc = ['Sphinx>=1.3'],
185 test = ['nose>=0.10.1', 'requests', 'testpath'],
186 terminal = [],
187 kernel = ['ipykernel'],
188 nbformat = ['nbformat'],
189 notebook = ['notebook', 'ipywidgets'],
190 nbconvert = ['nbconvert'],
191 )
192 install_requires = [
193 'setuptools>=18.5',
194 'decorator',
195 'pickleshare',
196 'simplegeneric>0.8',
197 'traitlets',
198 ]
199
200 # Platform-specific dependencies:
201 # This is the correct way to specify these,
202 # but requires pip >= 6. pip < 6 ignores these.
203
204 extras_require.update({
205 ':sys_platform != "win32"': ['pexpect'],
206 ':sys_platform == "darwin"': ['appnope'],
207 ':sys_platform == "darwin" and platform_python_implementation == "CPython"': ['gnureadline'],
208 'terminal:sys_platform == "win32"': ['pyreadline>=2'],
209 'test:python_version == "2.7"': ['mock'],
210 })
211 # FIXME: re-specify above platform dependencies for pip < 6
212 # These would result in non-portable bdists.
213 if not any(arg.startswith('bdist') for arg in sys.argv):
214 if sys.version_info < (3, 3):
215 extras_require['test'].append('mock')
216
217 if sys.platform == 'darwin':
218 install_requires.extend(['appnope'])
219 have_readline = False
220 try:
221 import readline
222 except ImportError:
223 pass
224 else:
225 if 'libedit' not in readline.__doc__:
226 have_readline = True
227 if not have_readline:
228 install_requires.extend(['gnureadline'])
229
230 if sys.platform.startswith('win'):
231 extras_require['terminal'].append('pyreadline>=2.0')
232 else:
233 install_requires.append('pexpect')
234
235 # workaround pypa/setuptools#147, where setuptools misspells
236 # platform_python_implementation as python_implementation
237 if 'setuptools' in sys.modules:
238 for key in list(extras_require):
239 if 'platform_python_implementation' in key:
240 new_key = key.replace('platform_python_implementation', 'python_implementation')
241 extras_require[new_key] = extras_require.pop(key)
242
243 everything = set()
244 for key, deps in extras_require.items():
245 if ':' not in key:
246 everything.update(deps)
247 extras_require['all'] = everything
248
249 if 'setuptools' in sys.modules:
250 setuptools_extra_args['zip_safe'] = False
251 setuptools_extra_args['entry_points'] = {
252 'console_scripts': find_entry_points(),
253 'pygments.lexers': [
254 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer',
255 'ipython = IPython.lib.lexers:IPythonLexer',
256 'ipython3 = IPython.lib.lexers:IPython3Lexer',
257 ],
258 }
259 setup_args['extras_require'] = extras_require
260 requires = setup_args['install_requires'] = install_requires
261
262 # Script to be run by the windows binary installer after the default setup
263 # routine, to add shortcuts and similar windows-only things. Windows
264 # post-install scripts MUST reside in the scripts/ dir, otherwise distutils
265 # doesn't find them.
266 if 'bdist_wininst' in sys.argv:
267 if len(sys.argv) > 2 and \
268 ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):
269 print("ERROR: bdist_wininst must be run alone. Exiting.", file=sys.stderr)
270 sys.exit(1)
271 setup_args['data_files'].append(
272 ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')])
273 setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]
274 setup_args['options'] = {"bdist_wininst":
275 {"install_script":
276 "ipython_win_post_install.py"}}
277
278 else:
279 # scripts has to be a non-empty list, or install_scripts isn't called
280 setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()]
281
282 setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt
283
284 #---------------------------------------------------------------------------
285 # Do the actual setup now
286 #---------------------------------------------------------------------------
287
288 setup_args.update(setuptools_extra_args)
289
290
291
292 def main():
293 setup(**setup_args)
294
295 if __name__ == '__main__':
296 main()
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -182,7 +182,7 @@
parallel = ['ipyparallel'],
qtconsole = ['qtconsole'],
doc = ['Sphinx>=1.3'],
- test = ['nose>=0.10.1', 'requests', 'testpath'],
+ test = ['nose>=0.10.1', 'requests', 'testpath', 'pygments'],
terminal = [],
kernel = ['ipykernel'],
nbformat = ['nbformat'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -182,7 +182,7 @@\n parallel = ['ipyparallel'],\n qtconsole = ['qtconsole'],\n doc = ['Sphinx>=1.3'],\n- test = ['nose>=0.10.1', 'requests', 'testpath'],\n+ test = ['nose>=0.10.1', 'requests', 'testpath', 'pygments'],\n terminal = [],\n kernel = ['ipykernel'],\n nbformat = ['nbformat'],\n", "issue": "IPythonLexer test failure with pygments 2.1\nJenkins is showing us a test failure on Windows:\n\nhttps://jenkins.jupyter.org/job/windows-multi/_pyversion=3.5,label=windows/504/testReport/\n\nIt looks like `$HOME` is unexpectedly being split into two tokens. I suspect it's failing since pygments 2.1 was released. I see the same locally on Linux, but it's not failing on Travis (maybe we're not installing pygments so those tests are skipped?).\n\n@abalkin and @chebee7i have done the most signficant work on the lexer machinery. Any ideas?\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Setup script for IPython.\n\nUnder Posix environments it works like a typical setup.py script.\nUnder Windows, the command sdist is not supported, since IPython\nrequires utilities which are not available under Windows.\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2008-2011, IPython Development Team.\n# Copyright (c) 2001-2007, Fernando Perez <[email protected]>\n# Copyright (c) 2001, Janko Hauser <[email protected]>\n# Copyright (c) 2001, Nathaniel Gray <[email protected]>\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.rst, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\nimport sys\n\n# This check is also made in IPython/__init__, don't forget to update both when\n# changing Python version requirements.\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: IPython requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\nPY3 = (sys.version_info[0] >= 3)\n\n# At least we're on the python version we need, move on.\n\n#-------------------------------------------------------------------------------\n# Imports\n#-------------------------------------------------------------------------------\n\n# Stdlib imports\nimport os\n\nfrom glob import glob\n\n# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly\n# update it when the contents of directories change.\nif os.path.exists('MANIFEST'): os.remove('MANIFEST')\n\nfrom distutils.core import setup\n\n# Our own imports\nfrom setupbase import target_update\n\nfrom setupbase import (\n setup_args,\n find_packages,\n find_package_data,\n check_package_data_first,\n find_entry_points,\n build_scripts_entrypt,\n find_data_files,\n git_prebuild,\n install_symlinked,\n install_lib_symlink,\n install_scripts_for_symlink,\n unsymlink,\n)\n\nisfile = os.path.isfile\npjoin = os.path.join\n\n#-------------------------------------------------------------------------------\n# Handle OS specific things\n#-------------------------------------------------------------------------------\n\nif os.name in ('nt','dos'):\n os_name = 'windows'\nelse:\n os_name = os.name\n\n# Under Windows, 'sdist' has not been supported. Now that the docs build with\n# Sphinx it might work, but let's not turn it on until someone confirms that it\n# actually works.\nif os_name == 'windows' and 'sdist' in sys.argv:\n print('The sdist command is not available under Windows. Exiting.')\n sys.exit(1)\n\n\n#-------------------------------------------------------------------------------\n# Things related to the IPython documentation\n#-------------------------------------------------------------------------------\n\n# update the manuals when building a source dist\nif len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):\n\n # List of things to be updated. Each entry is a triplet of args for\n # target_update()\n to_update = [\n ('docs/man/ipython.1.gz',\n ['docs/man/ipython.1'],\n 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),\n ]\n\n\n [ target_update(*t) for t in to_update ]\n\n#---------------------------------------------------------------------------\n# Find all the packages, package data, and data_files\n#---------------------------------------------------------------------------\n\npackages = find_packages()\npackage_data = find_package_data()\n\ndata_files = find_data_files()\n\nsetup_args['packages'] = packages\nsetup_args['package_data'] = package_data\nsetup_args['data_files'] = data_files\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n# imports here, so they are after setuptools import if there was one\nfrom distutils.command.sdist import sdist\nfrom distutils.command.upload import upload\n\nclass UploadWindowsInstallers(upload):\n\n description = \"Upload Windows installers to PyPI (only used from tools/release_windows.py)\"\n user_options = upload.user_options + [\n ('files=', 'f', 'exe file (or glob) to upload')\n ]\n def initialize_options(self):\n upload.initialize_options(self)\n meta = self.distribution.metadata\n base = '{name}-{version}'.format(\n name=meta.get_name(),\n version=meta.get_version()\n )\n self.files = os.path.join('dist', '%s.*.exe' % base)\n\n def run(self):\n for dist_file in glob(self.files):\n self.upload_file('bdist_wininst', 'any', dist_file)\n\nsetup_args['cmdclass'] = {\n 'build_py': \\\n check_package_data_first(git_prebuild('IPython')),\n 'sdist' : git_prebuild('IPython', sdist),\n 'upload_wininst' : UploadWindowsInstallers,\n 'symlink': install_symlinked,\n 'install_lib_symlink': install_lib_symlink,\n 'install_scripts_sym': install_scripts_for_symlink,\n 'unsymlink': unsymlink,\n}\n\n\n#---------------------------------------------------------------------------\n# Handle scripts, dependencies, and setuptools specific things\n#---------------------------------------------------------------------------\n\n# For some commands, use setuptools. Note that we do NOT list install here!\n# If you want a setuptools-enhanced install, just run 'setupegg.py install'\nneeds_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',\n 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',\n 'egg_info', 'easy_install', 'upload', 'install_egg_info',\n ))\n\nif len(needs_setuptools.intersection(sys.argv)) > 0:\n import setuptools\n\n# This dict is used for passing extra arguments that are setuptools\n# specific to setup\nsetuptools_extra_args = {}\n\n# setuptools requirements\n\nextras_require = dict(\n parallel = ['ipyparallel'],\n qtconsole = ['qtconsole'],\n doc = ['Sphinx>=1.3'],\n test = ['nose>=0.10.1', 'requests', 'testpath'],\n terminal = [],\n kernel = ['ipykernel'],\n nbformat = ['nbformat'],\n notebook = ['notebook', 'ipywidgets'],\n nbconvert = ['nbconvert'],\n)\ninstall_requires = [\n 'setuptools>=18.5',\n 'decorator',\n 'pickleshare',\n 'simplegeneric>0.8',\n 'traitlets',\n]\n\n# Platform-specific dependencies:\n# This is the correct way to specify these,\n# but requires pip >= 6. pip < 6 ignores these.\n\nextras_require.update({\n ':sys_platform != \"win32\"': ['pexpect'],\n ':sys_platform == \"darwin\"': ['appnope'],\n ':sys_platform == \"darwin\" and platform_python_implementation == \"CPython\"': ['gnureadline'],\n 'terminal:sys_platform == \"win32\"': ['pyreadline>=2'],\n 'test:python_version == \"2.7\"': ['mock'],\n})\n# FIXME: re-specify above platform dependencies for pip < 6\n# These would result in non-portable bdists.\nif not any(arg.startswith('bdist') for arg in sys.argv):\n if sys.version_info < (3, 3):\n extras_require['test'].append('mock')\n\n if sys.platform == 'darwin':\n install_requires.extend(['appnope'])\n have_readline = False\n try:\n import readline\n except ImportError:\n pass\n else:\n if 'libedit' not in readline.__doc__:\n have_readline = True\n if not have_readline:\n install_requires.extend(['gnureadline'])\n\n if sys.platform.startswith('win'):\n extras_require['terminal'].append('pyreadline>=2.0')\n else:\n install_requires.append('pexpect')\n \n # workaround pypa/setuptools#147, where setuptools misspells\n # platform_python_implementation as python_implementation\n if 'setuptools' in sys.modules:\n for key in list(extras_require):\n if 'platform_python_implementation' in key:\n new_key = key.replace('platform_python_implementation', 'python_implementation')\n extras_require[new_key] = extras_require.pop(key)\n\neverything = set()\nfor key, deps in extras_require.items():\n if ':' not in key:\n everything.update(deps)\nextras_require['all'] = everything\n\nif 'setuptools' in sys.modules:\n setuptools_extra_args['zip_safe'] = False\n setuptools_extra_args['entry_points'] = {\n 'console_scripts': find_entry_points(),\n 'pygments.lexers': [\n 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer',\n 'ipython = IPython.lib.lexers:IPythonLexer',\n 'ipython3 = IPython.lib.lexers:IPython3Lexer',\n ],\n }\n setup_args['extras_require'] = extras_require\n requires = setup_args['install_requires'] = install_requires\n\n # Script to be run by the windows binary installer after the default setup\n # routine, to add shortcuts and similar windows-only things. Windows\n # post-install scripts MUST reside in the scripts/ dir, otherwise distutils\n # doesn't find them.\n if 'bdist_wininst' in sys.argv:\n if len(sys.argv) > 2 and \\\n ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):\n print(\"ERROR: bdist_wininst must be run alone. Exiting.\", file=sys.stderr)\n sys.exit(1)\n setup_args['data_files'].append(\n ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')])\n setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]\n setup_args['options'] = {\"bdist_wininst\":\n {\"install_script\":\n \"ipython_win_post_install.py\"}}\n\nelse:\n # scripts has to be a non-empty list, or install_scripts isn't called\n setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()]\n\n setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt\n\n#---------------------------------------------------------------------------\n# Do the actual setup now\n#---------------------------------------------------------------------------\n\nsetup_args.update(setuptools_extra_args)\n\n\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Setup script for IPython.\n\nUnder Posix environments it works like a typical setup.py script.\nUnder Windows, the command sdist is not supported, since IPython\nrequires utilities which are not available under Windows.\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2008-2011, IPython Development Team.\n# Copyright (c) 2001-2007, Fernando Perez <[email protected]>\n# Copyright (c) 2001, Janko Hauser <[email protected]>\n# Copyright (c) 2001, Nathaniel Gray <[email protected]>\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.rst, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\nimport sys\n\n# This check is also made in IPython/__init__, don't forget to update both when\n# changing Python version requirements.\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: IPython requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\nPY3 = (sys.version_info[0] >= 3)\n\n# At least we're on the python version we need, move on.\n\n#-------------------------------------------------------------------------------\n# Imports\n#-------------------------------------------------------------------------------\n\n# Stdlib imports\nimport os\n\nfrom glob import glob\n\n# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly\n# update it when the contents of directories change.\nif os.path.exists('MANIFEST'): os.remove('MANIFEST')\n\nfrom distutils.core import setup\n\n# Our own imports\nfrom setupbase import target_update\n\nfrom setupbase import (\n setup_args,\n find_packages,\n find_package_data,\n check_package_data_first,\n find_entry_points,\n build_scripts_entrypt,\n find_data_files,\n git_prebuild,\n install_symlinked,\n install_lib_symlink,\n install_scripts_for_symlink,\n unsymlink,\n)\n\nisfile = os.path.isfile\npjoin = os.path.join\n\n#-------------------------------------------------------------------------------\n# Handle OS specific things\n#-------------------------------------------------------------------------------\n\nif os.name in ('nt','dos'):\n os_name = 'windows'\nelse:\n os_name = os.name\n\n# Under Windows, 'sdist' has not been supported. Now that the docs build with\n# Sphinx it might work, but let's not turn it on until someone confirms that it\n# actually works.\nif os_name == 'windows' and 'sdist' in sys.argv:\n print('The sdist command is not available under Windows. Exiting.')\n sys.exit(1)\n\n\n#-------------------------------------------------------------------------------\n# Things related to the IPython documentation\n#-------------------------------------------------------------------------------\n\n# update the manuals when building a source dist\nif len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):\n\n # List of things to be updated. Each entry is a triplet of args for\n # target_update()\n to_update = [\n ('docs/man/ipython.1.gz',\n ['docs/man/ipython.1'],\n 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),\n ]\n\n\n [ target_update(*t) for t in to_update ]\n\n#---------------------------------------------------------------------------\n# Find all the packages, package data, and data_files\n#---------------------------------------------------------------------------\n\npackages = find_packages()\npackage_data = find_package_data()\n\ndata_files = find_data_files()\n\nsetup_args['packages'] = packages\nsetup_args['package_data'] = package_data\nsetup_args['data_files'] = data_files\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n# imports here, so they are after setuptools import if there was one\nfrom distutils.command.sdist import sdist\nfrom distutils.command.upload import upload\n\nclass UploadWindowsInstallers(upload):\n\n description = \"Upload Windows installers to PyPI (only used from tools/release_windows.py)\"\n user_options = upload.user_options + [\n ('files=', 'f', 'exe file (or glob) to upload')\n ]\n def initialize_options(self):\n upload.initialize_options(self)\n meta = self.distribution.metadata\n base = '{name}-{version}'.format(\n name=meta.get_name(),\n version=meta.get_version()\n )\n self.files = os.path.join('dist', '%s.*.exe' % base)\n\n def run(self):\n for dist_file in glob(self.files):\n self.upload_file('bdist_wininst', 'any', dist_file)\n\nsetup_args['cmdclass'] = {\n 'build_py': \\\n check_package_data_first(git_prebuild('IPython')),\n 'sdist' : git_prebuild('IPython', sdist),\n 'upload_wininst' : UploadWindowsInstallers,\n 'symlink': install_symlinked,\n 'install_lib_symlink': install_lib_symlink,\n 'install_scripts_sym': install_scripts_for_symlink,\n 'unsymlink': unsymlink,\n}\n\n\n#---------------------------------------------------------------------------\n# Handle scripts, dependencies, and setuptools specific things\n#---------------------------------------------------------------------------\n\n# For some commands, use setuptools. Note that we do NOT list install here!\n# If you want a setuptools-enhanced install, just run 'setupegg.py install'\nneeds_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',\n 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',\n 'egg_info', 'easy_install', 'upload', 'install_egg_info',\n ))\n\nif len(needs_setuptools.intersection(sys.argv)) > 0:\n import setuptools\n\n# This dict is used for passing extra arguments that are setuptools\n# specific to setup\nsetuptools_extra_args = {}\n\n# setuptools requirements\n\nextras_require = dict(\n parallel = ['ipyparallel'],\n qtconsole = ['qtconsole'],\n doc = ['Sphinx>=1.3'],\n test = ['nose>=0.10.1', 'requests', 'testpath', 'pygments'],\n terminal = [],\n kernel = ['ipykernel'],\n nbformat = ['nbformat'],\n notebook = ['notebook', 'ipywidgets'],\n nbconvert = ['nbconvert'],\n)\ninstall_requires = [\n 'setuptools>=18.5',\n 'decorator',\n 'pickleshare',\n 'simplegeneric>0.8',\n 'traitlets',\n]\n\n# Platform-specific dependencies:\n# This is the correct way to specify these,\n# but requires pip >= 6. pip < 6 ignores these.\n\nextras_require.update({\n ':sys_platform != \"win32\"': ['pexpect'],\n ':sys_platform == \"darwin\"': ['appnope'],\n ':sys_platform == \"darwin\" and platform_python_implementation == \"CPython\"': ['gnureadline'],\n 'terminal:sys_platform == \"win32\"': ['pyreadline>=2'],\n 'test:python_version == \"2.7\"': ['mock'],\n})\n# FIXME: re-specify above platform dependencies for pip < 6\n# These would result in non-portable bdists.\nif not any(arg.startswith('bdist') for arg in sys.argv):\n if sys.version_info < (3, 3):\n extras_require['test'].append('mock')\n\n if sys.platform == 'darwin':\n install_requires.extend(['appnope'])\n have_readline = False\n try:\n import readline\n except ImportError:\n pass\n else:\n if 'libedit' not in readline.__doc__:\n have_readline = True\n if not have_readline:\n install_requires.extend(['gnureadline'])\n\n if sys.platform.startswith('win'):\n extras_require['terminal'].append('pyreadline>=2.0')\n else:\n install_requires.append('pexpect')\n \n # workaround pypa/setuptools#147, where setuptools misspells\n # platform_python_implementation as python_implementation\n if 'setuptools' in sys.modules:\n for key in list(extras_require):\n if 'platform_python_implementation' in key:\n new_key = key.replace('platform_python_implementation', 'python_implementation')\n extras_require[new_key] = extras_require.pop(key)\n\neverything = set()\nfor key, deps in extras_require.items():\n if ':' not in key:\n everything.update(deps)\nextras_require['all'] = everything\n\nif 'setuptools' in sys.modules:\n setuptools_extra_args['zip_safe'] = False\n setuptools_extra_args['entry_points'] = {\n 'console_scripts': find_entry_points(),\n 'pygments.lexers': [\n 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer',\n 'ipython = IPython.lib.lexers:IPythonLexer',\n 'ipython3 = IPython.lib.lexers:IPython3Lexer',\n ],\n }\n setup_args['extras_require'] = extras_require\n requires = setup_args['install_requires'] = install_requires\n\n # Script to be run by the windows binary installer after the default setup\n # routine, to add shortcuts and similar windows-only things. Windows\n # post-install scripts MUST reside in the scripts/ dir, otherwise distutils\n # doesn't find them.\n if 'bdist_wininst' in sys.argv:\n if len(sys.argv) > 2 and \\\n ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):\n print(\"ERROR: bdist_wininst must be run alone. Exiting.\", file=sys.stderr)\n sys.exit(1)\n setup_args['data_files'].append(\n ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')])\n setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]\n setup_args['options'] = {\"bdist_wininst\":\n {\"install_script\":\n \"ipython_win_post_install.py\"}}\n\nelse:\n # scripts has to be a non-empty list, or install_scripts isn't called\n setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()]\n\n setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt\n\n#---------------------------------------------------------------------------\n# Do the actual setup now\n#---------------------------------------------------------------------------\n\nsetup_args.update(setuptools_extra_args)\n\n\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}]} | 3,507 | 129 |
gh_patches_debug_11472 | rasdani/github-patches | git_diff | google__trax-1138 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Optimizer tree_init returns a slots list, but tree_update returns a slots tuple
### Description
`tree_init` and `tree_update` are not consistent. One returns a list for the slots, the other a tuple.
It is a super minor detail but I was trying to conditionally run a `tree_update` with `jax.cond`, and this minor difference made that break, since the PyTreeDefs were different.
Casting the slots list comprehension to a tuple ([here](https://github.com/google/trax/blob/0ca17db895c7d9bb203e66e074f49e9481b87513/trax/optimizers/base.py#L119-L120)) solved this for me, but I'm not sure if you want to go with tuple or list so I raise an issue instead of PR.
### Environment information
```
OS: Ubuntu 18.04
$ pip freeze | grep trax
-e [email protected]:google/trax.git@0ca17db895c7d9bb203e66e074f49e9481b87513#egg=trax
(latest commit from Sep 30)
$ pip freeze | grep tensor
tensorflow==2.3.1
$ pip freeze | grep jax
jax==0.2.0
jaxlib @ https://storage.googleapis.com/jax-releases/cuda110/jaxlib-0.1.55-cp36-none-manylinux2010_x86_64.whl
$ python -V
Python 3.6.9
```
### For bugs: reproduction and error logs
You can add the following lines to `optimizers_test.py` and see the behavior.
```
# Steps to reproduce:
# Show that tree_update returns slots in a tuple not list
old_slots = opt_2.slots
grad_tree = np.zeros_like(weight_tree)
_, new_slots, _ = opt_2.tree_update(1, grad_tree, weight_tree, opt_2.slots, opt_2.opt_params)
self.assertIsInstance(old_slots, list) # PASS
self.assertIsInstance(opt_2.slots, list) # FAIL. it's a tuple
self.assertIsInstance(new_slots, list) # FAIL. it's a tuple
```
```
# Error logs:
TypeError: true_fun and false_fun output must have same type structure, got PyTreeDef(tuple, [PyTreeDef(dict[['dyn']], [PyTreeDef(dict[['ff']], [PyTreeDef(dict[['dense0', 'dense1', 'dense2', 'dense_last']], [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])])])]),PyTreeDef(tuple, [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])]),PyTreeDef(dict[['gradients_l2', 'weights_l2']], [*,*])]) and PyTreeDef(tuple, [PyTreeDef(dict[['dyn']], [PyTreeDef(dict[['ff']], [PyTreeDef(dict[['dense0', 'dense1', 'dense2', 'dense_last']], [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])])])]),PyTreeDef(list, [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])]),PyTreeDef(dict[['gradients_l2', 'weights_l2']], [*,*])]).
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `trax/optimizers/base.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2020 The Trax Authors.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 # Lint as: python3
17 """Trax base optimizer class."""
18
19 from trax import fastmath
20 from trax.fastmath import numpy as jnp
21
22
23 class Optimizer(object):
24 """Base class for optimizers that work hand in hand with Trax layers.
25
26 To define an optimizer subclass, specify its behavior with respect to a
27 single level/node in the network (e.g., a single dense layer):
28
29 - `init`: how to create/initialize optimizer-internal weights ("slots")
30 whose shape matches the node's weight shape.
31 - `update`: how to use gradient information to update node weights and
32 optimizer slots.
33
34 The Trax runtime combines these node-local computations into weight updates
35 and slot updates for the whole tree of layers in the model.
36 """
37
38 def __init__(self, learning_rate=0.01, clip_grad_norm=None,
39 **init_opt_params):
40 """Sets initial hyperparameter values for this optimizer.
41
42 Takes initial optimizer parameters as keyword arguments. These values can
43 be changed between training steps, e.g., for learning rate schedules.
44
45 If you want your subclass to expose hyperparameters for gin configuration,
46 override this constructor and use explicitly named keyword arguments. See
47 `momentum.Momentum.__init__` for one such example.
48
49 Args:
50 learning_rate: The initial learning rate.
51 clip_grad_norm: float; the value to which gradients will be clipped.
52 **init_opt_params: Initial values of any additional optimizer parameters.
53 """
54 init_opt_params['learning_rate'] = learning_rate
55 self._init_opt_params = {
56 name: jnp.array(value) for (name, value) in init_opt_params.items()
57 }
58 self._slots = None
59 # Gradient clipping happens with respect to the norm of the whole gradient
60 # tree, so it is not passed to single-slot updates, but done in this class
61 # for the whole gradient tree.
62 self._clip_grad_norm = clip_grad_norm
63
64 def init(self, weights):
65 """Creates optimizer slots for the given parameters.
66
67 Args:
68 weights: Trainable weights for one layer. Optimizer slots typically match
69 the data shape and type of the given layer weights.
70 """
71 raise NotImplementedError
72
73 def update(self, step, grads, weights, slots, opt_params):
74 """Computes one step's worth of updates.
75
76 The update computes both new weights for the layer/node and new slot values
77 for the optimizer.
78
79 Args:
80 step: Current step number in the training process.
81 grads: Gradients for the weights of the sublayer.
82 weights: Current weights for the sublayer.
83 slots: Optimizer slots.
84 opt_params: Optimizer hyperparameters (e.g. learning rate, momentum).
85
86 Returns:
87 Tuple of (new_weights, new_slots).
88 """
89 raise NotImplementedError
90
91 @property
92 def slots(self):
93 return self._slots
94
95 @slots.setter
96 def slots(self, slots):
97 self._slots = slots
98
99 @property
100 def opt_params(self):
101 return self._init_opt_params
102
103 @opt_params.setter
104 def opt_params(self, opt_params):
105 self._init_opt_params = opt_params
106
107 def tree_init(self, weight_tree):
108 """Assembles node-local initializations into full-tree initialization.
109
110 Args:
111 weight_tree: Weights for an entire model, in a tree that matches the
112 model's layer structure.
113
114 Returns:
115 Tuple `(slots, opt_params)`, where `slots` are the initialized optimizer
116 slot values and `opt_params` are optimizer hyperparameters (e.g.,
117 learning rate, momentum).
118 """
119 self._slots = [self.init(weight)
120 for weight in fastmath.tree_flatten(weight_tree)]
121 return (
122 self._slots,
123 self._init_opt_params,
124 )
125
126 def tree_update(self, step, grad_tree, weight_tree, slots, opt_params):
127 """Assembles node-local weight and slot updates for the full layer tree.
128
129 Args:
130 step: Current step number in the training process.
131 grad_tree: Gradients for the entire model, in a tree that matches the
132 model's layer structure.
133 weight_tree: Current weights for the entire model, in a tree that matches
134 the model's layer structure.
135 slots: Optimizer slots.
136 opt_params: Optimizer hyperparameters (e.g. learning rate, momentum).
137
138 Returns:
139 Tuple `(weights, slots)`, where `weights` are the optimizer-updated
140 weights for the whole model (in a tree matching the model's layer
141 structure) and `slots` are the updated optimizer slot values.
142 """
143 grads_flat = fastmath.tree_flatten(grad_tree)
144 grads_norm = self._l2_norm(grads_flat)
145 if self._clip_grad_norm is not None:
146 max_norm = self._clip_grad_norm
147 grads_flat = [jnp.where(grads_norm < max_norm, # pylint: disable=g-complex-comprehension
148 g,
149 g * (max_norm / grads_norm))
150 for g in grads_flat]
151 weights_flat = fastmath.tree_flatten(weight_tree)
152 weights_norm = self._l2_norm(weights_flat)
153 updated_pairs = [
154 self._update_and_check(step, grad, weight, slot, opt_params)
155 for (grad, weight, slot) in zip(grads_flat, weights_flat, slots)
156 ]
157 new_weights_flat, self.slots = zip(*updated_pairs)
158 new_weights, _ = fastmath.tree_unflatten(new_weights_flat, weight_tree)
159 metrics = {'gradients_l2': grads_norm, 'weights_l2': weights_norm}
160 return new_weights, self.slots, metrics
161
162 def _l2_norm(self, flat_list):
163 """Returns the aggregate L2 norm of a list of tensors."""
164 if fastmath.is_backend(fastmath.Backend.JAX):
165 norm = jnp.sqrt(sum(jnp.vdot(x, x) for x in flat_list))
166 else: # TODO(lukaszkaiser): add vdot to TF-numpy
167 norm = jnp.sqrt(sum(jnp.sum(x*x) for x in flat_list))
168 return norm
169
170 def _update_and_check(self, step, grads, weights, slots, opt_params):
171 """Updates a single weight array and checks types."""
172 new_weights, new_slots = self.update(
173 step, grads, weights, slots, opt_params)
174 if isinstance(weights, jnp.ndarray):
175 if not isinstance(new_weights, jnp.ndarray):
176 raise ValueError(
177 f'New weight values should be of type jnp.ndarray or a subclass; '
178 f'instead got {type(new_weights)}.')
179 if new_weights.dtype != weights.dtype:
180 raise ValueError(
181 f'New weight values dtype ({new_weights.dtype}) does not match '
182 f'the old one ({weights.dtype}).')
183 return new_weights, new_slots
184
185
186 class SGD(Optimizer):
187 """Stochastic gradient descent (SGD) optimizer.
188
189 A simple optimizer with no weights ("slots") of its own.
190 """
191
192 def init(self, weights):
193 return None
194
195 def update(self, step, grads, weights, slots, opt_params):
196 del step, slots
197 lr = opt_params['learning_rate']
198 new_weights = weights - (lr * grads).astype(weights.dtype)
199 return new_weights, None
200
201
202 # Utilities.
203
204
205 def l2_norm(tree):
206 """Compute the l2 norm of a pytree of arrays. Useful for weight decay."""
207 leaves = fastmath.tree_flatten(tree)
208 return jnp.sqrt(sum(jnp.vdot(x, x) for x in leaves))
209
210
211 def clip_grads(grad_tree, max_norm):
212 """Clip gradients stored as a pytree of arrays to maximum norm `max_norm`."""
213 norm = l2_norm(grad_tree)
214 normalize = lambda g: jnp.where(norm < max_norm, g, g * (max_norm / norm))
215 return fastmath.nested_map(grad_tree, normalize)
216
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/trax/optimizers/base.py b/trax/optimizers/base.py
--- a/trax/optimizers/base.py
+++ b/trax/optimizers/base.py
@@ -154,7 +154,7 @@
self._update_and_check(step, grad, weight, slot, opt_params)
for (grad, weight, slot) in zip(grads_flat, weights_flat, slots)
]
- new_weights_flat, self.slots = zip(*updated_pairs)
+ new_weights_flat, self.slots = map(list, zip(*updated_pairs))
new_weights, _ = fastmath.tree_unflatten(new_weights_flat, weight_tree)
metrics = {'gradients_l2': grads_norm, 'weights_l2': weights_norm}
return new_weights, self.slots, metrics
| {"golden_diff": "diff --git a/trax/optimizers/base.py b/trax/optimizers/base.py\n--- a/trax/optimizers/base.py\n+++ b/trax/optimizers/base.py\n@@ -154,7 +154,7 @@\n self._update_and_check(step, grad, weight, slot, opt_params)\n for (grad, weight, slot) in zip(grads_flat, weights_flat, slots)\n ]\n- new_weights_flat, self.slots = zip(*updated_pairs)\n+ new_weights_flat, self.slots = map(list, zip(*updated_pairs))\n new_weights, _ = fastmath.tree_unflatten(new_weights_flat, weight_tree)\n metrics = {'gradients_l2': grads_norm, 'weights_l2': weights_norm}\n return new_weights, self.slots, metrics\n", "issue": "Optimizer tree_init returns a slots list, but tree_update returns a slots tuple\n### Description\r\n\r\n`tree_init` and `tree_update` are not consistent. One returns a list for the slots, the other a tuple.\r\n\r\nIt is a super minor detail but I was trying to conditionally run a `tree_update` with `jax.cond`, and this minor difference made that break, since the PyTreeDefs were different.\r\n\r\nCasting the slots list comprehension to a tuple ([here](https://github.com/google/trax/blob/0ca17db895c7d9bb203e66e074f49e9481b87513/trax/optimizers/base.py#L119-L120)) solved this for me, but I'm not sure if you want to go with tuple or list so I raise an issue instead of PR.\r\n\r\n### Environment information\r\n```\r\nOS: Ubuntu 18.04\r\n\r\n$ pip freeze | grep trax\r\n-e [email protected]:google/trax.git@0ca17db895c7d9bb203e66e074f49e9481b87513#egg=trax\r\n(latest commit from Sep 30)\r\n\r\n$ pip freeze | grep tensor\r\ntensorflow==2.3.1\r\n\r\n$ pip freeze | grep jax\r\njax==0.2.0\r\njaxlib @ https://storage.googleapis.com/jax-releases/cuda110/jaxlib-0.1.55-cp36-none-manylinux2010_x86_64.whl\r\n\r\n$ python -V\r\nPython 3.6.9\r\n```\r\n\r\n### For bugs: reproduction and error logs\r\n\r\nYou can add the following lines to `optimizers_test.py` and see the behavior.\r\n\r\n```\r\n# Steps to reproduce:\r\n\r\n\r\n # Show that tree_update returns slots in a tuple not list\r\n old_slots = opt_2.slots\r\n grad_tree = np.zeros_like(weight_tree)\r\n _, new_slots, _ = opt_2.tree_update(1, grad_tree, weight_tree, opt_2.slots, opt_2.opt_params)\r\n self.assertIsInstance(old_slots, list) # PASS\r\n self.assertIsInstance(opt_2.slots, list) # FAIL. it's a tuple\r\n self.assertIsInstance(new_slots, list) # FAIL. it's a tuple\r\n\r\n```\r\n\r\n```\r\n# Error logs:\r\n\r\n\r\nTypeError: true_fun and false_fun output must have same type structure, got PyTreeDef(tuple, [PyTreeDef(dict[['dyn']], [PyTreeDef(dict[['ff']], [PyTreeDef(dict[['dense0', 'dense1', 'dense2', 'dense_last']], [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])])])]),PyTreeDef(tuple, [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])]),PyTreeDef(dict[['gradients_l2', 'weights_l2']], [*,*])]) and PyTreeDef(tuple, [PyTreeDef(dict[['dyn']], [PyTreeDef(dict[['ff']], [PyTreeDef(dict[['dense0', 'dense1', 'dense2', 'dense_last']], [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])])])]),PyTreeDef(list, [PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*]),PyTreeDef(tuple, [*,*])]),PyTreeDef(dict[['gradients_l2', 'weights_l2']], [*,*])]).\r\n\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Trax base optimizer class.\"\"\"\n\nfrom trax import fastmath\nfrom trax.fastmath import numpy as jnp\n\n\nclass Optimizer(object):\n \"\"\"Base class for optimizers that work hand in hand with Trax layers.\n\n To define an optimizer subclass, specify its behavior with respect to a\n single level/node in the network (e.g., a single dense layer):\n\n - `init`: how to create/initialize optimizer-internal weights (\"slots\")\n whose shape matches the node's weight shape.\n - `update`: how to use gradient information to update node weights and\n optimizer slots.\n\n The Trax runtime combines these node-local computations into weight updates\n and slot updates for the whole tree of layers in the model.\n \"\"\"\n\n def __init__(self, learning_rate=0.01, clip_grad_norm=None,\n **init_opt_params):\n \"\"\"Sets initial hyperparameter values for this optimizer.\n\n Takes initial optimizer parameters as keyword arguments. These values can\n be changed between training steps, e.g., for learning rate schedules.\n\n If you want your subclass to expose hyperparameters for gin configuration,\n override this constructor and use explicitly named keyword arguments. See\n `momentum.Momentum.__init__` for one such example.\n\n Args:\n learning_rate: The initial learning rate.\n clip_grad_norm: float; the value to which gradients will be clipped.\n **init_opt_params: Initial values of any additional optimizer parameters.\n \"\"\"\n init_opt_params['learning_rate'] = learning_rate\n self._init_opt_params = {\n name: jnp.array(value) for (name, value) in init_opt_params.items()\n }\n self._slots = None\n # Gradient clipping happens with respect to the norm of the whole gradient\n # tree, so it is not passed to single-slot updates, but done in this class\n # for the whole gradient tree.\n self._clip_grad_norm = clip_grad_norm\n\n def init(self, weights):\n \"\"\"Creates optimizer slots for the given parameters.\n\n Args:\n weights: Trainable weights for one layer. Optimizer slots typically match\n the data shape and type of the given layer weights.\n \"\"\"\n raise NotImplementedError\n\n def update(self, step, grads, weights, slots, opt_params):\n \"\"\"Computes one step's worth of updates.\n\n The update computes both new weights for the layer/node and new slot values\n for the optimizer.\n\n Args:\n step: Current step number in the training process.\n grads: Gradients for the weights of the sublayer.\n weights: Current weights for the sublayer.\n slots: Optimizer slots.\n opt_params: Optimizer hyperparameters (e.g. learning rate, momentum).\n\n Returns:\n Tuple of (new_weights, new_slots).\n \"\"\"\n raise NotImplementedError\n\n @property\n def slots(self):\n return self._slots\n\n @slots.setter\n def slots(self, slots):\n self._slots = slots\n\n @property\n def opt_params(self):\n return self._init_opt_params\n\n @opt_params.setter\n def opt_params(self, opt_params):\n self._init_opt_params = opt_params\n\n def tree_init(self, weight_tree):\n \"\"\"Assembles node-local initializations into full-tree initialization.\n\n Args:\n weight_tree: Weights for an entire model, in a tree that matches the\n model's layer structure.\n\n Returns:\n Tuple `(slots, opt_params)`, where `slots` are the initialized optimizer\n slot values and `opt_params` are optimizer hyperparameters (e.g.,\n learning rate, momentum).\n \"\"\"\n self._slots = [self.init(weight)\n for weight in fastmath.tree_flatten(weight_tree)]\n return (\n self._slots,\n self._init_opt_params,\n )\n\n def tree_update(self, step, grad_tree, weight_tree, slots, opt_params):\n \"\"\"Assembles node-local weight and slot updates for the full layer tree.\n\n Args:\n step: Current step number in the training process.\n grad_tree: Gradients for the entire model, in a tree that matches the\n model's layer structure.\n weight_tree: Current weights for the entire model, in a tree that matches\n the model's layer structure.\n slots: Optimizer slots.\n opt_params: Optimizer hyperparameters (e.g. learning rate, momentum).\n\n Returns:\n Tuple `(weights, slots)`, where `weights` are the optimizer-updated\n weights for the whole model (in a tree matching the model's layer\n structure) and `slots` are the updated optimizer slot values.\n \"\"\"\n grads_flat = fastmath.tree_flatten(grad_tree)\n grads_norm = self._l2_norm(grads_flat)\n if self._clip_grad_norm is not None:\n max_norm = self._clip_grad_norm\n grads_flat = [jnp.where(grads_norm < max_norm, # pylint: disable=g-complex-comprehension\n g,\n g * (max_norm / grads_norm))\n for g in grads_flat]\n weights_flat = fastmath.tree_flatten(weight_tree)\n weights_norm = self._l2_norm(weights_flat)\n updated_pairs = [\n self._update_and_check(step, grad, weight, slot, opt_params)\n for (grad, weight, slot) in zip(grads_flat, weights_flat, slots)\n ]\n new_weights_flat, self.slots = zip(*updated_pairs)\n new_weights, _ = fastmath.tree_unflatten(new_weights_flat, weight_tree)\n metrics = {'gradients_l2': grads_norm, 'weights_l2': weights_norm}\n return new_weights, self.slots, metrics\n\n def _l2_norm(self, flat_list):\n \"\"\"Returns the aggregate L2 norm of a list of tensors.\"\"\"\n if fastmath.is_backend(fastmath.Backend.JAX):\n norm = jnp.sqrt(sum(jnp.vdot(x, x) for x in flat_list))\n else: # TODO(lukaszkaiser): add vdot to TF-numpy\n norm = jnp.sqrt(sum(jnp.sum(x*x) for x in flat_list))\n return norm\n\n def _update_and_check(self, step, grads, weights, slots, opt_params):\n \"\"\"Updates a single weight array and checks types.\"\"\"\n new_weights, new_slots = self.update(\n step, grads, weights, slots, opt_params)\n if isinstance(weights, jnp.ndarray):\n if not isinstance(new_weights, jnp.ndarray):\n raise ValueError(\n f'New weight values should be of type jnp.ndarray or a subclass; '\n f'instead got {type(new_weights)}.')\n if new_weights.dtype != weights.dtype:\n raise ValueError(\n f'New weight values dtype ({new_weights.dtype}) does not match '\n f'the old one ({weights.dtype}).')\n return new_weights, new_slots\n\n\nclass SGD(Optimizer):\n \"\"\"Stochastic gradient descent (SGD) optimizer.\n\n A simple optimizer with no weights (\"slots\") of its own.\n \"\"\"\n\n def init(self, weights):\n return None\n\n def update(self, step, grads, weights, slots, opt_params):\n del step, slots\n lr = opt_params['learning_rate']\n new_weights = weights - (lr * grads).astype(weights.dtype)\n return new_weights, None\n\n\n# Utilities.\n\n\ndef l2_norm(tree):\n \"\"\"Compute the l2 norm of a pytree of arrays. Useful for weight decay.\"\"\"\n leaves = fastmath.tree_flatten(tree)\n return jnp.sqrt(sum(jnp.vdot(x, x) for x in leaves))\n\n\ndef clip_grads(grad_tree, max_norm):\n \"\"\"Clip gradients stored as a pytree of arrays to maximum norm `max_norm`.\"\"\"\n norm = l2_norm(grad_tree)\n normalize = lambda g: jnp.where(norm < max_norm, g, g * (max_norm / norm))\n return fastmath.nested_map(grad_tree, normalize)\n", "path": "trax/optimizers/base.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Trax base optimizer class.\"\"\"\n\nfrom trax import fastmath\nfrom trax.fastmath import numpy as jnp\n\n\nclass Optimizer(object):\n \"\"\"Base class for optimizers that work hand in hand with Trax layers.\n\n To define an optimizer subclass, specify its behavior with respect to a\n single level/node in the network (e.g., a single dense layer):\n\n - `init`: how to create/initialize optimizer-internal weights (\"slots\")\n whose shape matches the node's weight shape.\n - `update`: how to use gradient information to update node weights and\n optimizer slots.\n\n The Trax runtime combines these node-local computations into weight updates\n and slot updates for the whole tree of layers in the model.\n \"\"\"\n\n def __init__(self, learning_rate=0.01, clip_grad_norm=None,\n **init_opt_params):\n \"\"\"Sets initial hyperparameter values for this optimizer.\n\n Takes initial optimizer parameters as keyword arguments. These values can\n be changed between training steps, e.g., for learning rate schedules.\n\n If you want your subclass to expose hyperparameters for gin configuration,\n override this constructor and use explicitly named keyword arguments. See\n `momentum.Momentum.__init__` for one such example.\n\n Args:\n learning_rate: The initial learning rate.\n clip_grad_norm: float; the value to which gradients will be clipped.\n **init_opt_params: Initial values of any additional optimizer parameters.\n \"\"\"\n init_opt_params['learning_rate'] = learning_rate\n self._init_opt_params = {\n name: jnp.array(value) for (name, value) in init_opt_params.items()\n }\n self._slots = None\n # Gradient clipping happens with respect to the norm of the whole gradient\n # tree, so it is not passed to single-slot updates, but done in this class\n # for the whole gradient tree.\n self._clip_grad_norm = clip_grad_norm\n\n def init(self, weights):\n \"\"\"Creates optimizer slots for the given parameters.\n\n Args:\n weights: Trainable weights for one layer. Optimizer slots typically match\n the data shape and type of the given layer weights.\n \"\"\"\n raise NotImplementedError\n\n def update(self, step, grads, weights, slots, opt_params):\n \"\"\"Computes one step's worth of updates.\n\n The update computes both new weights for the layer/node and new slot values\n for the optimizer.\n\n Args:\n step: Current step number in the training process.\n grads: Gradients for the weights of the sublayer.\n weights: Current weights for the sublayer.\n slots: Optimizer slots.\n opt_params: Optimizer hyperparameters (e.g. learning rate, momentum).\n\n Returns:\n Tuple of (new_weights, new_slots).\n \"\"\"\n raise NotImplementedError\n\n @property\n def slots(self):\n return self._slots\n\n @slots.setter\n def slots(self, slots):\n self._slots = slots\n\n @property\n def opt_params(self):\n return self._init_opt_params\n\n @opt_params.setter\n def opt_params(self, opt_params):\n self._init_opt_params = opt_params\n\n def tree_init(self, weight_tree):\n \"\"\"Assembles node-local initializations into full-tree initialization.\n\n Args:\n weight_tree: Weights for an entire model, in a tree that matches the\n model's layer structure.\n\n Returns:\n Tuple `(slots, opt_params)`, where `slots` are the initialized optimizer\n slot values and `opt_params` are optimizer hyperparameters (e.g.,\n learning rate, momentum).\n \"\"\"\n self._slots = [self.init(weight)\n for weight in fastmath.tree_flatten(weight_tree)]\n return (\n self._slots,\n self._init_opt_params,\n )\n\n def tree_update(self, step, grad_tree, weight_tree, slots, opt_params):\n \"\"\"Assembles node-local weight and slot updates for the full layer tree.\n\n Args:\n step: Current step number in the training process.\n grad_tree: Gradients for the entire model, in a tree that matches the\n model's layer structure.\n weight_tree: Current weights for the entire model, in a tree that matches\n the model's layer structure.\n slots: Optimizer slots.\n opt_params: Optimizer hyperparameters (e.g. learning rate, momentum).\n\n Returns:\n Tuple `(weights, slots)`, where `weights` are the optimizer-updated\n weights for the whole model (in a tree matching the model's layer\n structure) and `slots` are the updated optimizer slot values.\n \"\"\"\n grads_flat = fastmath.tree_flatten(grad_tree)\n grads_norm = self._l2_norm(grads_flat)\n if self._clip_grad_norm is not None:\n max_norm = self._clip_grad_norm\n grads_flat = [jnp.where(grads_norm < max_norm, # pylint: disable=g-complex-comprehension\n g,\n g * (max_norm / grads_norm))\n for g in grads_flat]\n weights_flat = fastmath.tree_flatten(weight_tree)\n weights_norm = self._l2_norm(weights_flat)\n updated_pairs = [\n self._update_and_check(step, grad, weight, slot, opt_params)\n for (grad, weight, slot) in zip(grads_flat, weights_flat, slots)\n ]\n new_weights_flat, self.slots = map(list, zip(*updated_pairs))\n new_weights, _ = fastmath.tree_unflatten(new_weights_flat, weight_tree)\n metrics = {'gradients_l2': grads_norm, 'weights_l2': weights_norm}\n return new_weights, self.slots, metrics\n\n def _l2_norm(self, flat_list):\n \"\"\"Returns the aggregate L2 norm of a list of tensors.\"\"\"\n if fastmath.is_backend(fastmath.Backend.JAX):\n norm = jnp.sqrt(sum(jnp.vdot(x, x) for x in flat_list))\n else: # TODO(lukaszkaiser): add vdot to TF-numpy\n norm = jnp.sqrt(sum(jnp.sum(x*x) for x in flat_list))\n return norm\n\n def _update_and_check(self, step, grads, weights, slots, opt_params):\n \"\"\"Updates a single weight array and checks types.\"\"\"\n new_weights, new_slots = self.update(\n step, grads, weights, slots, opt_params)\n if isinstance(weights, jnp.ndarray):\n if not isinstance(new_weights, jnp.ndarray):\n raise ValueError(\n f'New weight values should be of type jnp.ndarray or a subclass; '\n f'instead got {type(new_weights)}.')\n if new_weights.dtype != weights.dtype:\n raise ValueError(\n f'New weight values dtype ({new_weights.dtype}) does not match '\n f'the old one ({weights.dtype}).')\n return new_weights, new_slots\n\n\nclass SGD(Optimizer):\n \"\"\"Stochastic gradient descent (SGD) optimizer.\n\n A simple optimizer with no weights (\"slots\") of its own.\n \"\"\"\n\n def init(self, weights):\n return None\n\n def update(self, step, grads, weights, slots, opt_params):\n del step, slots\n lr = opt_params['learning_rate']\n new_weights = weights - (lr * grads).astype(weights.dtype)\n return new_weights, None\n\n\n# Utilities.\n\n\ndef l2_norm(tree):\n \"\"\"Compute the l2 norm of a pytree of arrays. Useful for weight decay.\"\"\"\n leaves = fastmath.tree_flatten(tree)\n return jnp.sqrt(sum(jnp.vdot(x, x) for x in leaves))\n\n\ndef clip_grads(grad_tree, max_norm):\n \"\"\"Clip gradients stored as a pytree of arrays to maximum norm `max_norm`.\"\"\"\n norm = l2_norm(grad_tree)\n normalize = lambda g: jnp.where(norm < max_norm, g, g * (max_norm / norm))\n return fastmath.nested_map(grad_tree, normalize)\n", "path": "trax/optimizers/base.py"}]} | 3,584 | 175 |
gh_patches_debug_29513 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10140 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Concatenate/consolidate all algorithms with different implementations
### Feature description
There are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strings/reverse_letters.py`
Content:
```
1 def reverse_letters(input_str: str) -> str:
2 """
3 Reverses letters in a given string without adjusting the position of the words
4 >>> reverse_letters('The cat in the hat')
5 'ehT tac ni eht tah'
6 >>> reverse_letters('The quick brown fox jumped over the lazy dog.')
7 'ehT kciuq nworb xof depmuj revo eht yzal .god'
8 >>> reverse_letters('Is this true?')
9 'sI siht ?eurt'
10 >>> reverse_letters("I love Python")
11 'I evol nohtyP'
12 """
13 return " ".join([word[::-1] for word in input_str.split()])
14
15
16 if __name__ == "__main__":
17 import doctest
18
19 doctest.testmod()
20
```
Path: `strings/reverse_long_words.py`
Content:
```
1 def reverse_long_words(sentence: str) -> str:
2 """
3 Reverse all words that are longer than 4 characters in a sentence.
4
5 >>> reverse_long_words("Hey wollef sroirraw")
6 'Hey fellow warriors'
7 >>> reverse_long_words("nohtyP is nohtyP")
8 'Python is Python'
9 >>> reverse_long_words("1 12 123 1234 54321 654321")
10 '1 12 123 1234 12345 123456'
11 """
12 return " ".join(
13 "".join(word[::-1]) if len(word) > 4 else word for word in sentence.split()
14 )
15
16
17 if __name__ == "__main__":
18 import doctest
19
20 doctest.testmod()
21 print(reverse_long_words("Hey wollef sroirraw"))
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strings/reverse_letters.py b/strings/reverse_letters.py
--- a/strings/reverse_letters.py
+++ b/strings/reverse_letters.py
@@ -1,19 +1,24 @@
-def reverse_letters(input_str: str) -> str:
+def reverse_letters(sentence: str, length: int = 0) -> str:
"""
- Reverses letters in a given string without adjusting the position of the words
- >>> reverse_letters('The cat in the hat')
- 'ehT tac ni eht tah'
- >>> reverse_letters('The quick brown fox jumped over the lazy dog.')
- 'ehT kciuq nworb xof depmuj revo eht yzal .god'
- >>> reverse_letters('Is this true?')
- 'sI siht ?eurt'
- >>> reverse_letters("I love Python")
- 'I evol nohtyP'
+ Reverse all words that are longer than the given length of characters in a sentence.
+ If unspecified, length is taken as 0
+
+ >>> reverse_letters("Hey wollef sroirraw", 3)
+ 'Hey fellow warriors'
+ >>> reverse_letters("nohtyP is nohtyP", 2)
+ 'Python is Python'
+ >>> reverse_letters("1 12 123 1234 54321 654321", 0)
+ '1 21 321 4321 12345 123456'
+ >>> reverse_letters("racecar")
+ 'racecar'
"""
- return " ".join([word[::-1] for word in input_str.split()])
+ return " ".join(
+ "".join(word[::-1]) if len(word) > length else word for word in sentence.split()
+ )
if __name__ == "__main__":
import doctest
doctest.testmod()
+ print(reverse_letters("Hey wollef sroirraw"))
diff --git a/strings/reverse_long_words.py b/strings/reverse_long_words.py
deleted file mode 100644
--- a/strings/reverse_long_words.py
+++ /dev/null
@@ -1,21 +0,0 @@
-def reverse_long_words(sentence: str) -> str:
- """
- Reverse all words that are longer than 4 characters in a sentence.
-
- >>> reverse_long_words("Hey wollef sroirraw")
- 'Hey fellow warriors'
- >>> reverse_long_words("nohtyP is nohtyP")
- 'Python is Python'
- >>> reverse_long_words("1 12 123 1234 54321 654321")
- '1 12 123 1234 12345 123456'
- """
- return " ".join(
- "".join(word[::-1]) if len(word) > 4 else word for word in sentence.split()
- )
-
-
-if __name__ == "__main__":
- import doctest
-
- doctest.testmod()
- print(reverse_long_words("Hey wollef sroirraw"))
| {"golden_diff": "diff --git a/strings/reverse_letters.py b/strings/reverse_letters.py\n--- a/strings/reverse_letters.py\n+++ b/strings/reverse_letters.py\n@@ -1,19 +1,24 @@\n-def reverse_letters(input_str: str) -> str:\n+def reverse_letters(sentence: str, length: int = 0) -> str:\n \"\"\"\n- Reverses letters in a given string without adjusting the position of the words\n- >>> reverse_letters('The cat in the hat')\n- 'ehT tac ni eht tah'\n- >>> reverse_letters('The quick brown fox jumped over the lazy dog.')\n- 'ehT kciuq nworb xof depmuj revo eht yzal .god'\n- >>> reverse_letters('Is this true?')\n- 'sI siht ?eurt'\n- >>> reverse_letters(\"I love Python\")\n- 'I evol nohtyP'\n+ Reverse all words that are longer than the given length of characters in a sentence.\n+ If unspecified, length is taken as 0\n+\n+ >>> reverse_letters(\"Hey wollef sroirraw\", 3)\n+ 'Hey fellow warriors'\n+ >>> reverse_letters(\"nohtyP is nohtyP\", 2)\n+ 'Python is Python'\n+ >>> reverse_letters(\"1 12 123 1234 54321 654321\", 0)\n+ '1 21 321 4321 12345 123456'\n+ >>> reverse_letters(\"racecar\")\n+ 'racecar'\n \"\"\"\n- return \" \".join([word[::-1] for word in input_str.split()])\n+ return \" \".join(\n+ \"\".join(word[::-1]) if len(word) > length else word for word in sentence.split()\n+ )\n \n \n if __name__ == \"__main__\":\n import doctest\n \n doctest.testmod()\n+ print(reverse_letters(\"Hey wollef sroirraw\"))\ndiff --git a/strings/reverse_long_words.py b/strings/reverse_long_words.py\ndeleted file mode 100644\n--- a/strings/reverse_long_words.py\n+++ /dev/null\n@@ -1,21 +0,0 @@\n-def reverse_long_words(sentence: str) -> str:\n- \"\"\"\n- Reverse all words that are longer than 4 characters in a sentence.\n-\n- >>> reverse_long_words(\"Hey wollef sroirraw\")\n- 'Hey fellow warriors'\n- >>> reverse_long_words(\"nohtyP is nohtyP\")\n- 'Python is Python'\n- >>> reverse_long_words(\"1 12 123 1234 54321 654321\")\n- '1 12 123 1234 12345 123456'\n- \"\"\"\n- return \" \".join(\n- \"\".join(word[::-1]) if len(word) > 4 else word for word in sentence.split()\n- )\n-\n-\n-if __name__ == \"__main__\":\n- import doctest\n-\n- doctest.testmod()\n- print(reverse_long_words(\"Hey wollef sroirraw\"))\n", "issue": "Concatenate/consolidate all algorithms with different implementations\n### Feature description\n\nThere are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file\n", "before_files": [{"content": "def reverse_letters(input_str: str) -> str:\n \"\"\"\n Reverses letters in a given string without adjusting the position of the words\n >>> reverse_letters('The cat in the hat')\n 'ehT tac ni eht tah'\n >>> reverse_letters('The quick brown fox jumped over the lazy dog.')\n 'ehT kciuq nworb xof depmuj revo eht yzal .god'\n >>> reverse_letters('Is this true?')\n 'sI siht ?eurt'\n >>> reverse_letters(\"I love Python\")\n 'I evol nohtyP'\n \"\"\"\n return \" \".join([word[::-1] for word in input_str.split()])\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "strings/reverse_letters.py"}, {"content": "def reverse_long_words(sentence: str) -> str:\n \"\"\"\n Reverse all words that are longer than 4 characters in a sentence.\n\n >>> reverse_long_words(\"Hey wollef sroirraw\")\n 'Hey fellow warriors'\n >>> reverse_long_words(\"nohtyP is nohtyP\")\n 'Python is Python'\n >>> reverse_long_words(\"1 12 123 1234 54321 654321\")\n '1 12 123 1234 12345 123456'\n \"\"\"\n return \" \".join(\n \"\".join(word[::-1]) if len(word) > 4 else word for word in sentence.split()\n )\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n print(reverse_long_words(\"Hey wollef sroirraw\"))\n", "path": "strings/reverse_long_words.py"}], "after_files": [{"content": "def reverse_letters(sentence: str, length: int = 0) -> str:\n \"\"\"\n Reverse all words that are longer than the given length of characters in a sentence.\n If unspecified, length is taken as 0\n\n >>> reverse_letters(\"Hey wollef sroirraw\", 3)\n 'Hey fellow warriors'\n >>> reverse_letters(\"nohtyP is nohtyP\", 2)\n 'Python is Python'\n >>> reverse_letters(\"1 12 123 1234 54321 654321\", 0)\n '1 21 321 4321 12345 123456'\n >>> reverse_letters(\"racecar\")\n 'racecar'\n \"\"\"\n return \" \".join(\n \"\".join(word[::-1]) if len(word) > length else word for word in sentence.split()\n )\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n print(reverse_letters(\"Hey wollef sroirraw\"))\n", "path": "strings/reverse_letters.py"}, {"content": null, "path": "strings/reverse_long_words.py"}]} | 761 | 732 |
gh_patches_debug_28763 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-2196 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DD_TAGS separator inconsistent with heroku-buildpack-datadog
https://github.com/DataDog/heroku-buildpack-datadog sets `DD_TAGS` separated by spaces.
However, dd-trace-py splits the tags by commas:
https://github.com/DataDog/dd-trace-py/blob/master/ddtrace/utils/formats.py#L87-L116
```
def parse_tags_str(tags_str):
"""Parse a string of tags typically provided via environment variables.
The expected string is of the form::
"key1:value1,key2:value2"
:param tags_str: A string of the above form to parse tags from.
:return: A dict containing the tags that were parsed.
"""
parsed_tags = {}
if not tags_str:
return parsed_tags
for tag in tags_str.split(","):
try:
key, value = tag.split(":", 1)
# Validate the tag
if key == "" or value == "" or value.endswith(":"):
raise ValueError
except ValueError:
log.error(
"Malformed tag in tag pair '%s' from tag string '%s'.",
tag,
tags_str,
)
else:
parsed_tags[key] = value
return parsed_tags
```
This results in all of the tags being set as the value of the first tag.
This looks to have been previously surfaced for the java tracer here:
https://github.com/DataDog/heroku-buildpack-datadog/issues/187
And was initially changed in the buildpack but was reverted and dd-trace-java updated to accommodate commas or spaces:
https://github.com/DataDog/dd-trace-java/pull/2011
### Which version of dd-trace-py are you using?
0.47.0
### How can we reproduce your problem?
Perform a trace with `DD_TAGS` set by https://github.com/DataDog/heroku-buildpack-datadog.
### What is the result that you get?
`dyno` tag contains all other tags.
### What is the result that you expected?
Tags to be properly separated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/utils/formats.py`
Content:
```
1 import logging
2 import os
3
4 from .deprecation import deprecation
5
6
7 log = logging.getLogger(__name__)
8
9
10 def get_env(*parts, **kwargs):
11 """Retrieves environment variables value for the given integration. It must be used
12 for consistency between integrations. The implementation is backward compatible
13 with legacy nomenclature:
14
15 * `DATADOG_` is a legacy prefix with lower priority
16 * `DD_` environment variables have the highest priority
17 * the environment variable is built concatenating `integration` and `variable`
18 arguments
19 * return `default` otherwise
20
21 :param parts: evironment variable parts that will be joined with ``_`` to generate the name
22 :type parts: :obj:`str`
23 :param kwargs: ``default`` is the only supported keyword argument which sets the default value
24 if no environment variable is found
25 :rtype: :obj:`str` | ``kwargs["default"]``
26 :returns: The string environment variable value or the value of ``kwargs["default"]`` if not found
27 """
28 default = kwargs.get("default")
29
30 key = "_".join(parts)
31 key = key.upper()
32 legacy_env = "DATADOG_{}".format(key)
33 env = "DD_{}".format(key)
34
35 value = os.getenv(env)
36 legacy = os.getenv(legacy_env)
37 if legacy:
38 # Deprecation: `DATADOG_` variables are deprecated
39 deprecation(
40 name="DATADOG_",
41 message="Use `DD_` prefix instead",
42 version="1.0.0",
43 )
44
45 value = value or legacy
46 return value if value else default
47
48
49 def deep_getattr(obj, attr_string, default=None):
50 """
51 Returns the attribute of `obj` at the dotted path given by `attr_string`
52 If no such attribute is reachable, returns `default`
53
54 >>> deep_getattr(cass, 'cluster')
55 <cassandra.cluster.Cluster object at 0xa20c350
56
57 >>> deep_getattr(cass, 'cluster.metadata.partitioner')
58 u'org.apache.cassandra.dht.Murmur3Partitioner'
59
60 >>> deep_getattr(cass, 'i.dont.exist', default='default')
61 'default'
62 """
63 attrs = attr_string.split(".")
64 for attr in attrs:
65 try:
66 obj = getattr(obj, attr)
67 except AttributeError:
68 return default
69
70 return obj
71
72
73 def asbool(value):
74 """Convert the given String to a boolean object.
75
76 Accepted values are `True` and `1`.
77 """
78 if value is None:
79 return False
80
81 if isinstance(value, bool):
82 return value
83
84 return value.lower() in ("true", "1")
85
86
87 def parse_tags_str(tags_str):
88 """Parse a string of tags typically provided via environment variables.
89
90 The expected string is of the form::
91 "key1:value1,key2:value2"
92
93 :param tags_str: A string of the above form to parse tags from.
94 :return: A dict containing the tags that were parsed.
95 """
96 parsed_tags = {}
97 if not tags_str:
98 return parsed_tags
99
100 for tag in tags_str.split(","):
101 try:
102 key, value = tag.split(":", 1)
103
104 # Validate the tag
105 if key == "" or value == "" or value.endswith(":"):
106 raise ValueError
107 except ValueError:
108 log.error(
109 "Malformed tag in tag pair '%s' from tag string '%s'.",
110 tag,
111 tags_str,
112 )
113 else:
114 parsed_tags[key] = value
115
116 return parsed_tags
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/utils/formats.py b/ddtrace/utils/formats.py
--- a/ddtrace/utils/formats.py
+++ b/ddtrace/utils/formats.py
@@ -1,9 +1,13 @@
import logging
import os
+import re
from .deprecation import deprecation
+# Tags `key:value` must be separated by either comma or space
+_TAGS_NOT_SEPARATED = re.compile(r":[^,\s]+:")
+
log = logging.getLogger(__name__)
@@ -89,6 +93,7 @@
The expected string is of the form::
"key1:value1,key2:value2"
+ "key1:value1 key2:value2"
:param tags_str: A string of the above form to parse tags from.
:return: A dict containing the tags that were parsed.
@@ -97,7 +102,29 @@
if not tags_str:
return parsed_tags
- for tag in tags_str.split(","):
+ if _TAGS_NOT_SEPARATED.search(tags_str):
+ log.error("Malformed tag string with tags not separated by comma or space '%s'.", tags_str)
+ return parsed_tags
+
+ # Identify separator based on which successfully identifies the correct
+ # number of valid tags
+ numtagseps = tags_str.count(":")
+ for sep in [",", " "]:
+ if sum(":" in _ for _ in tags_str.split(sep)) == numtagseps:
+ break
+ else:
+ log.error(
+ (
+ "Failed to find separator for tag string: '%s'.\n"
+ "Tag strings must be comma or space separated:\n"
+ " key1:value1,key2:value2\n"
+ " key1:value1 key2:value2"
+ ),
+ tags_str,
+ )
+ return parsed_tags
+
+ for tag in tags_str.split(sep):
try:
key, value = tag.split(":", 1)
| {"golden_diff": "diff --git a/ddtrace/utils/formats.py b/ddtrace/utils/formats.py\n--- a/ddtrace/utils/formats.py\n+++ b/ddtrace/utils/formats.py\n@@ -1,9 +1,13 @@\n import logging\n import os\n+import re\n \n from .deprecation import deprecation\n \n \n+# Tags `key:value` must be separated by either comma or space\n+_TAGS_NOT_SEPARATED = re.compile(r\":[^,\\s]+:\")\n+\n log = logging.getLogger(__name__)\n \n \n@@ -89,6 +93,7 @@\n \n The expected string is of the form::\n \"key1:value1,key2:value2\"\n+ \"key1:value1 key2:value2\"\n \n :param tags_str: A string of the above form to parse tags from.\n :return: A dict containing the tags that were parsed.\n@@ -97,7 +102,29 @@\n if not tags_str:\n return parsed_tags\n \n- for tag in tags_str.split(\",\"):\n+ if _TAGS_NOT_SEPARATED.search(tags_str):\n+ log.error(\"Malformed tag string with tags not separated by comma or space '%s'.\", tags_str)\n+ return parsed_tags\n+\n+ # Identify separator based on which successfully identifies the correct\n+ # number of valid tags\n+ numtagseps = tags_str.count(\":\")\n+ for sep in [\",\", \" \"]:\n+ if sum(\":\" in _ for _ in tags_str.split(sep)) == numtagseps:\n+ break\n+ else:\n+ log.error(\n+ (\n+ \"Failed to find separator for tag string: '%s'.\\n\"\n+ \"Tag strings must be comma or space separated:\\n\"\n+ \" key1:value1,key2:value2\\n\"\n+ \" key1:value1 key2:value2\"\n+ ),\n+ tags_str,\n+ )\n+ return parsed_tags\n+\n+ for tag in tags_str.split(sep):\n try:\n key, value = tag.split(\":\", 1)\n", "issue": "DD_TAGS separator inconsistent with heroku-buildpack-datadog\nhttps://github.com/DataDog/heroku-buildpack-datadog sets `DD_TAGS` separated by spaces.\r\n\r\nHowever, dd-trace-py splits the tags by commas:\r\n\r\nhttps://github.com/DataDog/dd-trace-py/blob/master/ddtrace/utils/formats.py#L87-L116\r\n```\r\ndef parse_tags_str(tags_str):\r\n \"\"\"Parse a string of tags typically provided via environment variables.\r\n The expected string is of the form::\r\n \"key1:value1,key2:value2\"\r\n :param tags_str: A string of the above form to parse tags from.\r\n :return: A dict containing the tags that were parsed.\r\n \"\"\"\r\n parsed_tags = {}\r\n if not tags_str:\r\n return parsed_tags\r\n\r\n for tag in tags_str.split(\",\"):\r\n try:\r\n key, value = tag.split(\":\", 1)\r\n\r\n # Validate the tag\r\n if key == \"\" or value == \"\" or value.endswith(\":\"):\r\n raise ValueError\r\n except ValueError:\r\n log.error(\r\n \"Malformed tag in tag pair '%s' from tag string '%s'.\",\r\n tag,\r\n tags_str,\r\n )\r\n else:\r\n parsed_tags[key] = value\r\n\r\n return parsed_tags\r\n```\r\n\r\nThis results in all of the tags being set as the value of the first tag.\r\n\r\nThis looks to have been previously surfaced for the java tracer here:\r\nhttps://github.com/DataDog/heroku-buildpack-datadog/issues/187\r\n\r\nAnd was initially changed in the buildpack but was reverted and dd-trace-java updated to accommodate commas or spaces:\r\nhttps://github.com/DataDog/dd-trace-java/pull/2011\r\n\r\n### Which version of dd-trace-py are you using?\r\n0.47.0\r\n\r\n### How can we reproduce your problem?\r\nPerform a trace with `DD_TAGS` set by https://github.com/DataDog/heroku-buildpack-datadog.\r\n\r\n### What is the result that you get?\r\n`dyno` tag contains all other tags.\r\n\r\n### What is the result that you expected?\r\nTags to be properly separated.\r\n\n", "before_files": [{"content": "import logging\nimport os\n\nfrom .deprecation import deprecation\n\n\nlog = logging.getLogger(__name__)\n\n\ndef get_env(*parts, **kwargs):\n \"\"\"Retrieves environment variables value for the given integration. It must be used\n for consistency between integrations. The implementation is backward compatible\n with legacy nomenclature:\n\n * `DATADOG_` is a legacy prefix with lower priority\n * `DD_` environment variables have the highest priority\n * the environment variable is built concatenating `integration` and `variable`\n arguments\n * return `default` otherwise\n\n :param parts: evironment variable parts that will be joined with ``_`` to generate the name\n :type parts: :obj:`str`\n :param kwargs: ``default`` is the only supported keyword argument which sets the default value\n if no environment variable is found\n :rtype: :obj:`str` | ``kwargs[\"default\"]``\n :returns: The string environment variable value or the value of ``kwargs[\"default\"]`` if not found\n \"\"\"\n default = kwargs.get(\"default\")\n\n key = \"_\".join(parts)\n key = key.upper()\n legacy_env = \"DATADOG_{}\".format(key)\n env = \"DD_{}\".format(key)\n\n value = os.getenv(env)\n legacy = os.getenv(legacy_env)\n if legacy:\n # Deprecation: `DATADOG_` variables are deprecated\n deprecation(\n name=\"DATADOG_\",\n message=\"Use `DD_` prefix instead\",\n version=\"1.0.0\",\n )\n\n value = value or legacy\n return value if value else default\n\n\ndef deep_getattr(obj, attr_string, default=None):\n \"\"\"\n Returns the attribute of `obj` at the dotted path given by `attr_string`\n If no such attribute is reachable, returns `default`\n\n >>> deep_getattr(cass, 'cluster')\n <cassandra.cluster.Cluster object at 0xa20c350\n\n >>> deep_getattr(cass, 'cluster.metadata.partitioner')\n u'org.apache.cassandra.dht.Murmur3Partitioner'\n\n >>> deep_getattr(cass, 'i.dont.exist', default='default')\n 'default'\n \"\"\"\n attrs = attr_string.split(\".\")\n for attr in attrs:\n try:\n obj = getattr(obj, attr)\n except AttributeError:\n return default\n\n return obj\n\n\ndef asbool(value):\n \"\"\"Convert the given String to a boolean object.\n\n Accepted values are `True` and `1`.\n \"\"\"\n if value is None:\n return False\n\n if isinstance(value, bool):\n return value\n\n return value.lower() in (\"true\", \"1\")\n\n\ndef parse_tags_str(tags_str):\n \"\"\"Parse a string of tags typically provided via environment variables.\n\n The expected string is of the form::\n \"key1:value1,key2:value2\"\n\n :param tags_str: A string of the above form to parse tags from.\n :return: A dict containing the tags that were parsed.\n \"\"\"\n parsed_tags = {}\n if not tags_str:\n return parsed_tags\n\n for tag in tags_str.split(\",\"):\n try:\n key, value = tag.split(\":\", 1)\n\n # Validate the tag\n if key == \"\" or value == \"\" or value.endswith(\":\"):\n raise ValueError\n except ValueError:\n log.error(\n \"Malformed tag in tag pair '%s' from tag string '%s'.\",\n tag,\n tags_str,\n )\n else:\n parsed_tags[key] = value\n\n return parsed_tags\n", "path": "ddtrace/utils/formats.py"}], "after_files": [{"content": "import logging\nimport os\nimport re\n\nfrom .deprecation import deprecation\n\n\n# Tags `key:value` must be separated by either comma or space\n_TAGS_NOT_SEPARATED = re.compile(r\":[^,\\s]+:\")\n\nlog = logging.getLogger(__name__)\n\n\ndef get_env(*parts, **kwargs):\n \"\"\"Retrieves environment variables value for the given integration. It must be used\n for consistency between integrations. The implementation is backward compatible\n with legacy nomenclature:\n\n * `DATADOG_` is a legacy prefix with lower priority\n * `DD_` environment variables have the highest priority\n * the environment variable is built concatenating `integration` and `variable`\n arguments\n * return `default` otherwise\n\n :param parts: evironment variable parts that will be joined with ``_`` to generate the name\n :type parts: :obj:`str`\n :param kwargs: ``default`` is the only supported keyword argument which sets the default value\n if no environment variable is found\n :rtype: :obj:`str` | ``kwargs[\"default\"]``\n :returns: The string environment variable value or the value of ``kwargs[\"default\"]`` if not found\n \"\"\"\n default = kwargs.get(\"default\")\n\n key = \"_\".join(parts)\n key = key.upper()\n legacy_env = \"DATADOG_{}\".format(key)\n env = \"DD_{}\".format(key)\n\n value = os.getenv(env)\n legacy = os.getenv(legacy_env)\n if legacy:\n # Deprecation: `DATADOG_` variables are deprecated\n deprecation(\n name=\"DATADOG_\",\n message=\"Use `DD_` prefix instead\",\n version=\"1.0.0\",\n )\n\n value = value or legacy\n return value if value else default\n\n\ndef deep_getattr(obj, attr_string, default=None):\n \"\"\"\n Returns the attribute of `obj` at the dotted path given by `attr_string`\n If no such attribute is reachable, returns `default`\n\n >>> deep_getattr(cass, 'cluster')\n <cassandra.cluster.Cluster object at 0xa20c350\n\n >>> deep_getattr(cass, 'cluster.metadata.partitioner')\n u'org.apache.cassandra.dht.Murmur3Partitioner'\n\n >>> deep_getattr(cass, 'i.dont.exist', default='default')\n 'default'\n \"\"\"\n attrs = attr_string.split(\".\")\n for attr in attrs:\n try:\n obj = getattr(obj, attr)\n except AttributeError:\n return default\n\n return obj\n\n\ndef asbool(value):\n \"\"\"Convert the given String to a boolean object.\n\n Accepted values are `True` and `1`.\n \"\"\"\n if value is None:\n return False\n\n if isinstance(value, bool):\n return value\n\n return value.lower() in (\"true\", \"1\")\n\n\ndef parse_tags_str(tags_str):\n \"\"\"Parse a string of tags typically provided via environment variables.\n\n The expected string is of the form::\n \"key1:value1,key2:value2\"\n \"key1:value1 key2:value2\"\n\n :param tags_str: A string of the above form to parse tags from.\n :return: A dict containing the tags that were parsed.\n \"\"\"\n parsed_tags = {}\n if not tags_str:\n return parsed_tags\n\n if _TAGS_NOT_SEPARATED.search(tags_str):\n log.error(\"Malformed tag string with tags not separated by comma or space '%s'.\", tags_str)\n return parsed_tags\n\n # Identify separator based on which successfully identifies the correct\n # number of valid tags\n numtagseps = tags_str.count(\":\")\n for sep in [\",\", \" \"]:\n if sum(\":\" in _ for _ in tags_str.split(sep)) == numtagseps:\n break\n else:\n log.error(\n (\n \"Failed to find separator for tag string: '%s'.\\n\"\n \"Tag strings must be comma or space separated:\\n\"\n \" key1:value1,key2:value2\\n\"\n \" key1:value1 key2:value2\"\n ),\n tags_str,\n )\n return parsed_tags\n\n for tag in tags_str.split(sep):\n try:\n key, value = tag.split(\":\", 1)\n\n # Validate the tag\n if key == \"\" or value == \"\" or value.endswith(\":\"):\n raise ValueError\n except ValueError:\n log.error(\n \"Malformed tag in tag pair '%s' from tag string '%s'.\",\n tag,\n tags_str,\n )\n else:\n parsed_tags[key] = value\n\n return parsed_tags\n", "path": "ddtrace/utils/formats.py"}]} | 1,745 | 448 |
gh_patches_debug_21814 | rasdani/github-patches | git_diff | mne-tools__mne-python-6690 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: joblib error in spatio_temporal_cluster_1samp_test
Running `spatio_temporal_cluster_1samp_test` with `n_jobs=8` using the default stat_fun `ttest_1samp_no_p` from the current master, I'm getting the error "The task could not be sent to the workers as it is too large for `send_bytes`."
The matrix is not that all that large, 33x545x20484, and the function runs fine if I remove subjects to reduce it to 23x545x20484 or if I set `n_jobs=1`. I continue to get the error after doing `mne.set_cache_dir('/tmp')`. The error message looks exactly like one reported in an old joblib issue: https://github.com/joblib/joblib/issues/344
Operator error is always a possibility, but nothing obvious stands out to me (but then again I am the operator). Complete command line and error message is below:
``T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_1samp_test(X1, connectivity=connectivity, n_permutations=256, n_jobs=8, threshold=t_threshold, check_disjoint=True, verbose='DEBUG')``
Traceback (most recent call last):
File "/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/site-packages/joblib/externals/loky/backend/queues.py", line 156, in _feed
send_bytes(obj_)
File "/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/multiprocessing/connection.py", line 200, in send_bytes
self._send_bytes(m[offset:offset + size])
File "/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/multiprocessing/connection.py", line 393, in _send_bytes
header = struct.pack("!i", n)
struct.error: 'i' format requires -2147483648 <= number <= 2147483647
"""
The above exception was the direct cause of the following exception:
RuntimeError Traceback (most recent call last)
<ipython-input-31-b9583e70ef55> in <module>
2 spatio_temporal_cluster_1samp_test(X1a, connectivity=connectivity, n_permutations=1, n_jobs=8,
3 threshold=t_threshold, buffer_size=2000, check_disjoint=True,
----> 4 verbose='DEBUG')
</export/research/analysis/human/jhouck/shared/tools/mne-python/mne/externals/decorator.py:decorator-gen-186> in spatio_temporal_cluster_1samp_test(X, threshold, n_permutations, tail, stat_fun, connectivity, n_jobs, seed, max_step, spatial_exclude, step_down_p, t_power, out_type, check_disjoint, buffer_size, verbose)
/export/research/analysis/human/jhouck/shared/tools/mne-python/mne/utils/_logging.py in wrapper(*args, **kwargs)
86 # set it back if we get an exception
87 with use_log_level(verbose_level):
---> 88 return function(*args, **kwargs)
89 return function(*args, **kwargs)
90 return FunctionMaker.create(
/export/research/analysis/human/jhouck/shared/tools/mne-python/mne/stats/cluster_level.py in spatio_temporal_cluster_1samp_test(X, threshold, n_permutations, tail, stat_fun, connectivity, n_jobs, seed, max_step, spatial_exclude, step_down_p, t_power, out_type, check_disjoint, buffer_size, verbose)
1322 n_jobs=n_jobs, seed=seed, max_step=max_step, exclude=exclude,
1323 step_down_p=step_down_p, t_power=t_power, out_type=out_type,
-> 1324 check_disjoint=check_disjoint, buffer_size=buffer_size)
1325
1326
</export/research/analysis/human/jhouck/shared/tools/mne-python/mne/externals/decorator.py:decorator-gen-185> in permutation_cluster_1samp_test(X, threshold, n_permutations, tail, stat_fun, connectivity, verbose, n_jobs, seed, max_step, exclude, step_down_p, t_power, out_type, check_disjoint, buffer_size)
/export/research/analysis/human/jhouck/shared/tools/mne-python/mne/utils/_logging.py in wrapper(*args, **kwargs)
87 with use_log_level(verbose_level):
88 return function(*args, **kwargs)
---> 89 return function(*args, **kwargs)
90 return FunctionMaker.create(
91 function, 'return decfunc(%(signature)s)',
/export/research/analysis/human/jhouck/shared/tools/mne-python/mne/stats/cluster_level.py in permutation_cluster_1samp_test(X, threshold, n_permutations, tail, stat_fun, connectivity, verbose, n_jobs, seed, max_step, exclude, step_down_p, t_power, out_type, check_disjoint, buffer_size)
1202 max_step=max_step, exclude=exclude, step_down_p=step_down_p,
1203 t_power=t_power, out_type=out_type, check_disjoint=check_disjoint,
-> 1204 buffer_size=buffer_size)
1205
1206
/export/research/analysis/human/jhouck/shared/tools/mne-python/mne/stats/cluster_level.py in _permutation_cluster_test(***failed resolving arguments***)
883 t_power, order, sample_shape, buffer_size,
884 progress_bar.subset(idx))
--> 885 for idx, order in split_list(orders, n_jobs, idx=True))
886 # include original (true) ordering
887 if tail == -1: # up tail
/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/site-packages/joblib/parallel.py in __call__(self, iterable)
928
929 with self._backend.retrieval_context():
--> 930 self.retrieve()
931 # Make sure that we get a last message telling us we are done
932 elapsed_time = time.time() - self._start_time
/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/site-packages/joblib/parallel.py in retrieve(self)
831 try:
832 if getattr(self._backend, 'supports_timeout', False):
--> 833 self._output.extend(job.get(timeout=self.timeout))
834 else:
835 self._output.extend(job.get())
/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/site-packages/joblib/_parallel_backends.py in wrap_future_result(future, timeout)
519 AsyncResults.get from multiprocessing."""
520 try:
--> 521 return future.result(timeout=timeout)
522 except LokyTimeoutError:
523 raise TimeoutError()
/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433 else:
434 raise TimeoutError()
/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
RuntimeError: The task could not be sent to the workers as it is too large for `send_bytes`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mne/parallel.py`
Content:
```
1 """Parallel util function."""
2
3 # Author: Alexandre Gramfort <[email protected]>
4 #
5 # License: Simplified BSD
6
7 import logging
8 import os
9
10 from . import get_config
11 from .utils import logger, verbose, warn, ProgressBar
12 from .fixes import _get_args
13
14 if 'MNE_FORCE_SERIAL' in os.environ:
15 _force_serial = True
16 else:
17 _force_serial = None
18
19
20 @verbose
21 def parallel_func(func, n_jobs, max_nbytes='auto', pre_dispatch='n_jobs',
22 total=None, prefer=None, verbose=None):
23 """Return parallel instance with delayed function.
24
25 Util function to use joblib only if available
26
27 Parameters
28 ----------
29 func: callable
30 A function
31 n_jobs: int
32 Number of jobs to run in parallel
33 max_nbytes : int, str, or None
34 Threshold on the minimum size of arrays passed to the workers that
35 triggers automated memory mapping. Can be an int in Bytes,
36 or a human-readable string, e.g., '1M' for 1 megabyte.
37 Use None to disable memmaping of large arrays. Use 'auto' to
38 use the value set using mne.set_memmap_min_size.
39 pre_dispatch : int, or string, optional
40 See :class:`joblib.Parallel`.
41 total : int | None
42 If int, use a progress bar to display the progress of dispatched
43 jobs. This should only be used when directly iterating, not when
44 using ``split_list`` or :func:`np.array_split`.
45 If None (default), do not add a progress bar.
46 prefer : str | None
47 If str, can be "processes" or "threads". See :class:`joblib.Parallel`.
48 Ignored if the joblib version is too old to support this.
49
50 .. versionadded:: 0.18
51 %(verbose)s INFO or DEBUG
52 will print parallel status, others will not.
53
54 Returns
55 -------
56 parallel: instance of joblib.Parallel or list
57 The parallel object
58 my_func: callable
59 func if not parallel or delayed(func)
60 n_jobs: int
61 Number of jobs >= 0
62 """
63 should_print = (logger.level <= logging.INFO)
64 # for a single job, we don't need joblib
65 if n_jobs != 1:
66 try:
67 from joblib import Parallel, delayed
68 except ImportError:
69 try:
70 from sklearn.externals.joblib import Parallel, delayed
71 except ImportError:
72 warn('joblib not installed. Cannot run in parallel.')
73 n_jobs = 1
74 if n_jobs == 1:
75 n_jobs = 1
76 my_func = func
77 parallel = list
78 else:
79 # check if joblib is recent enough to support memmaping
80 p_args = _get_args(Parallel.__init__)
81 joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)
82
83 cache_dir = get_config('MNE_CACHE_DIR', None)
84 if isinstance(max_nbytes, str) and max_nbytes == 'auto':
85 max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)
86
87 if max_nbytes is not None:
88 if not joblib_mmap and cache_dir is not None:
89 warn('"MNE_CACHE_DIR" is set but a newer version of joblib is '
90 'needed to use the memmapping pool.')
91 if joblib_mmap and cache_dir is None:
92 logger.info(
93 'joblib supports memapping pool but "MNE_CACHE_DIR" '
94 'is not set in MNE-Python config. To enable it, use, '
95 'e.g., mne.set_cache_dir(\'/tmp/shm\'). This will '
96 'store temporary files under /dev/shm and can result '
97 'in large memory savings.')
98
99 # create keyword arguments for Parallel
100 kwargs = {'verbose': 5 if should_print and total is None else 0}
101 kwargs['pre_dispatch'] = pre_dispatch
102 if 'prefer' in p_args:
103 kwargs['prefer'] = prefer
104
105 if joblib_mmap:
106 if cache_dir is None:
107 max_nbytes = None # disable memmaping
108 kwargs['temp_folder'] = cache_dir
109 kwargs['max_nbytes'] = max_nbytes
110
111 n_jobs = check_n_jobs(n_jobs)
112 parallel = Parallel(n_jobs, **kwargs)
113 my_func = delayed(func)
114
115 if total is not None:
116 def parallel_progress(op_iter):
117 pb = ProgressBar(total, verbose_bool=should_print)
118 return parallel(pb(op_iter))
119 parallel_out = parallel_progress
120 else:
121 parallel_out = parallel
122 return parallel_out, my_func, n_jobs
123
124
125 def check_n_jobs(n_jobs, allow_cuda=False):
126 """Check n_jobs in particular for negative values.
127
128 Parameters
129 ----------
130 n_jobs : int
131 The number of jobs.
132 allow_cuda : bool
133 Allow n_jobs to be 'cuda'. Default: False.
134
135 Returns
136 -------
137 n_jobs : int
138 The checked number of jobs. Always positive (or 'cuda' if
139 applicable.)
140 """
141 if not isinstance(n_jobs, int):
142 if not allow_cuda:
143 raise ValueError('n_jobs must be an integer')
144 elif not isinstance(n_jobs, str) or n_jobs != 'cuda':
145 raise ValueError('n_jobs must be an integer, or "cuda"')
146 # else, we have n_jobs='cuda' and this is okay, so do nothing
147 elif _force_serial:
148 n_jobs = 1
149 logger.info('... MNE_FORCE_SERIAL set. Processing in forced '
150 'serial mode.')
151 elif n_jobs <= 0:
152 try:
153 import multiprocessing
154 n_cores = multiprocessing.cpu_count()
155 n_jobs = min(n_cores + n_jobs + 1, n_cores)
156 if n_jobs <= 0:
157 raise ValueError('If n_jobs has a negative value it must not '
158 'be less than the number of CPUs present. '
159 'You\'ve got %s CPUs' % n_cores)
160 except ImportError:
161 # only warn if they tried to use something other than 1 job
162 if n_jobs != 1:
163 warn('multiprocessing not installed. Cannot run in parallel.')
164 n_jobs = 1
165
166 return n_jobs
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mne/parallel.py b/mne/parallel.py
--- a/mne/parallel.py
+++ b/mne/parallel.py
@@ -109,7 +109,7 @@
kwargs['max_nbytes'] = max_nbytes
n_jobs = check_n_jobs(n_jobs)
- parallel = Parallel(n_jobs, **kwargs)
+ parallel = _check_wrapper(Parallel(n_jobs, **kwargs))
my_func = delayed(func)
if total is not None:
@@ -122,6 +122,22 @@
return parallel_out, my_func, n_jobs
+def _check_wrapper(fun):
+ def run(*args, **kwargs):
+ try:
+ return fun(*args, **kwargs)
+ except RuntimeError as err:
+ msg = str(err.args[0]) if err.args else ''
+ if msg.startswith('The task could not be sent to the workers'):
+ raise RuntimeError(
+ msg + ' Consider using joblib memmap caching to get '
+ 'around this problem. See mne.set_mmap_min_size, '
+ 'mne.set_cache_dir, and buffer_size parallel function '
+ 'arguments (if applicable).')
+ raise
+ return run
+
+
def check_n_jobs(n_jobs, allow_cuda=False):
"""Check n_jobs in particular for negative values.
| {"golden_diff": "diff --git a/mne/parallel.py b/mne/parallel.py\n--- a/mne/parallel.py\n+++ b/mne/parallel.py\n@@ -109,7 +109,7 @@\n kwargs['max_nbytes'] = max_nbytes\n \n n_jobs = check_n_jobs(n_jobs)\n- parallel = Parallel(n_jobs, **kwargs)\n+ parallel = _check_wrapper(Parallel(n_jobs, **kwargs))\n my_func = delayed(func)\n \n if total is not None:\n@@ -122,6 +122,22 @@\n return parallel_out, my_func, n_jobs\n \n \n+def _check_wrapper(fun):\n+ def run(*args, **kwargs):\n+ try:\n+ return fun(*args, **kwargs)\n+ except RuntimeError as err:\n+ msg = str(err.args[0]) if err.args else ''\n+ if msg.startswith('The task could not be sent to the workers'):\n+ raise RuntimeError(\n+ msg + ' Consider using joblib memmap caching to get '\n+ 'around this problem. See mne.set_mmap_min_size, '\n+ 'mne.set_cache_dir, and buffer_size parallel function '\n+ 'arguments (if applicable).')\n+ raise\n+ return run\n+\n+\n def check_n_jobs(n_jobs, allow_cuda=False):\n \"\"\"Check n_jobs in particular for negative values.\n", "issue": "BUG: joblib error in spatio_temporal_cluster_1samp_test\nRunning `spatio_temporal_cluster_1samp_test` with `n_jobs=8` using the default stat_fun `ttest_1samp_no_p` from the current master, I'm getting the error \"The task could not be sent to the workers as it is too large for `send_bytes`.\" \r\n\r\nThe matrix is not that all that large, 33x545x20484, and the function runs fine if I remove subjects to reduce it to 23x545x20484 or if I set `n_jobs=1`. I continue to get the error after doing `mne.set_cache_dir('/tmp')`. The error message looks exactly like one reported in an old joblib issue: https://github.com/joblib/joblib/issues/344 \r\n\r\nOperator error is always a possibility, but nothing obvious stands out to me (but then again I am the operator). Complete command line and error message is below:\r\n\r\n``T_obs, clusters, cluster_p_values, H0 = clu = \\\r\nspatio_temporal_cluster_1samp_test(X1, connectivity=connectivity, n_permutations=256, n_jobs=8, threshold=t_threshold, check_disjoint=True, verbose='DEBUG')`` \r\n\r\nTraceback (most recent call last):\r\n File \"/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/site-packages/joblib/externals/loky/backend/queues.py\", line 156, in _feed\r\n send_bytes(obj_)\r\n File \"/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/multiprocessing/connection.py\", line 200, in send_bytes\r\n self._send_bytes(m[offset:offset + size])\r\n File \"/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/multiprocessing/connection.py\", line 393, in _send_bytes\r\n header = struct.pack(\"!i\", n)\r\nstruct.error: 'i' format requires -2147483648 <= number <= 2147483647\r\n\"\"\"\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nRuntimeError Traceback (most recent call last)\r\n<ipython-input-31-b9583e70ef55> in <module>\r\n 2 spatio_temporal_cluster_1samp_test(X1a, connectivity=connectivity, n_permutations=1, n_jobs=8,\r\n 3 threshold=t_threshold, buffer_size=2000, check_disjoint=True,\r\n----> 4 verbose='DEBUG') \r\n\r\n</export/research/analysis/human/jhouck/shared/tools/mne-python/mne/externals/decorator.py:decorator-gen-186> in spatio_temporal_cluster_1samp_test(X, threshold, n_permutations, tail, stat_fun, connectivity, n_jobs, seed, max_step, spatial_exclude, step_down_p, t_power, out_type, check_disjoint, buffer_size, verbose)\r\n\r\n/export/research/analysis/human/jhouck/shared/tools/mne-python/mne/utils/_logging.py in wrapper(*args, **kwargs)\r\n 86 # set it back if we get an exception\r\n 87 with use_log_level(verbose_level):\r\n---> 88 return function(*args, **kwargs)\r\n 89 return function(*args, **kwargs)\r\n 90 return FunctionMaker.create(\r\n\r\n/export/research/analysis/human/jhouck/shared/tools/mne-python/mne/stats/cluster_level.py in spatio_temporal_cluster_1samp_test(X, threshold, n_permutations, tail, stat_fun, connectivity, n_jobs, seed, max_step, spatial_exclude, step_down_p, t_power, out_type, check_disjoint, buffer_size, verbose)\r\n 1322 n_jobs=n_jobs, seed=seed, max_step=max_step, exclude=exclude,\r\n 1323 step_down_p=step_down_p, t_power=t_power, out_type=out_type,\r\n-> 1324 check_disjoint=check_disjoint, buffer_size=buffer_size)\r\n 1325 \r\n 1326 \r\n\r\n</export/research/analysis/human/jhouck/shared/tools/mne-python/mne/externals/decorator.py:decorator-gen-185> in permutation_cluster_1samp_test(X, threshold, n_permutations, tail, stat_fun, connectivity, verbose, n_jobs, seed, max_step, exclude, step_down_p, t_power, out_type, check_disjoint, buffer_size)\r\n\r\n/export/research/analysis/human/jhouck/shared/tools/mne-python/mne/utils/_logging.py in wrapper(*args, **kwargs)\r\n 87 with use_log_level(verbose_level):\r\n 88 return function(*args, **kwargs)\r\n---> 89 return function(*args, **kwargs)\r\n 90 return FunctionMaker.create(\r\n 91 function, 'return decfunc(%(signature)s)',\r\n\r\n/export/research/analysis/human/jhouck/shared/tools/mne-python/mne/stats/cluster_level.py in permutation_cluster_1samp_test(X, threshold, n_permutations, tail, stat_fun, connectivity, verbose, n_jobs, seed, max_step, exclude, step_down_p, t_power, out_type, check_disjoint, buffer_size)\r\n 1202 max_step=max_step, exclude=exclude, step_down_p=step_down_p,\r\n 1203 t_power=t_power, out_type=out_type, check_disjoint=check_disjoint,\r\n-> 1204 buffer_size=buffer_size)\r\n 1205 \r\n 1206 \r\n\r\n/export/research/analysis/human/jhouck/shared/tools/mne-python/mne/stats/cluster_level.py in _permutation_cluster_test(***failed resolving arguments***)\r\n 883 t_power, order, sample_shape, buffer_size,\r\n 884 progress_bar.subset(idx))\r\n--> 885 for idx, order in split_list(orders, n_jobs, idx=True))\r\n 886 # include original (true) ordering\r\n 887 if tail == -1: # up tail\r\n\r\n/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/site-packages/joblib/parallel.py in __call__(self, iterable)\r\n 928 \r\n 929 with self._backend.retrieval_context():\r\n--> 930 self.retrieve()\r\n 931 # Make sure that we get a last message telling us we are done\r\n 932 elapsed_time = time.time() - self._start_time\r\n\r\n/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/site-packages/joblib/parallel.py in retrieve(self)\r\n 831 try:\r\n 832 if getattr(self._backend, 'supports_timeout', False):\r\n--> 833 self._output.extend(job.get(timeout=self.timeout))\r\n 834 else:\r\n 835 self._output.extend(job.get())\r\n\r\n/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/site-packages/joblib/_parallel_backends.py in wrap_future_result(future, timeout)\r\n 519 AsyncResults.get from multiprocessing.\"\"\"\r\n 520 try:\r\n--> 521 return future.result(timeout=timeout)\r\n 522 except LokyTimeoutError:\r\n 523 raise TimeoutError()\r\n\r\n/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/concurrent/futures/_base.py in result(self, timeout)\r\n 430 raise CancelledError()\r\n 431 elif self._state == FINISHED:\r\n--> 432 return self.__get_result()\r\n 433 else:\r\n 434 raise TimeoutError()\r\n\r\n/export/research/analysis/human/jhouck/shared/tools/anaconda/envs/mnedev/lib/python3.6/concurrent/futures/_base.py in __get_result(self)\r\n 382 def __get_result(self):\r\n 383 if self._exception:\r\n--> 384 raise self._exception\r\n 385 else:\r\n 386 return self._result\r\n\r\nRuntimeError: The task could not be sent to the workers as it is too large for `send_bytes`.\n", "before_files": [{"content": "\"\"\"Parallel util function.\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n#\n# License: Simplified BSD\n\nimport logging\nimport os\n\nfrom . import get_config\nfrom .utils import logger, verbose, warn, ProgressBar\nfrom .fixes import _get_args\n\nif 'MNE_FORCE_SERIAL' in os.environ:\n _force_serial = True\nelse:\n _force_serial = None\n\n\n@verbose\ndef parallel_func(func, n_jobs, max_nbytes='auto', pre_dispatch='n_jobs',\n total=None, prefer=None, verbose=None):\n \"\"\"Return parallel instance with delayed function.\n\n Util function to use joblib only if available\n\n Parameters\n ----------\n func: callable\n A function\n n_jobs: int\n Number of jobs to run in parallel\n max_nbytes : int, str, or None\n Threshold on the minimum size of arrays passed to the workers that\n triggers automated memory mapping. Can be an int in Bytes,\n or a human-readable string, e.g., '1M' for 1 megabyte.\n Use None to disable memmaping of large arrays. Use 'auto' to\n use the value set using mne.set_memmap_min_size.\n pre_dispatch : int, or string, optional\n See :class:`joblib.Parallel`.\n total : int | None\n If int, use a progress bar to display the progress of dispatched\n jobs. This should only be used when directly iterating, not when\n using ``split_list`` or :func:`np.array_split`.\n If None (default), do not add a progress bar.\n prefer : str | None\n If str, can be \"processes\" or \"threads\". See :class:`joblib.Parallel`.\n Ignored if the joblib version is too old to support this.\n\n .. versionadded:: 0.18\n %(verbose)s INFO or DEBUG\n will print parallel status, others will not.\n\n Returns\n -------\n parallel: instance of joblib.Parallel or list\n The parallel object\n my_func: callable\n func if not parallel or delayed(func)\n n_jobs: int\n Number of jobs >= 0\n \"\"\"\n should_print = (logger.level <= logging.INFO)\n # for a single job, we don't need joblib\n if n_jobs != 1:\n try:\n from joblib import Parallel, delayed\n except ImportError:\n try:\n from sklearn.externals.joblib import Parallel, delayed\n except ImportError:\n warn('joblib not installed. Cannot run in parallel.')\n n_jobs = 1\n if n_jobs == 1:\n n_jobs = 1\n my_func = func\n parallel = list\n else:\n # check if joblib is recent enough to support memmaping\n p_args = _get_args(Parallel.__init__)\n joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)\n\n cache_dir = get_config('MNE_CACHE_DIR', None)\n if isinstance(max_nbytes, str) and max_nbytes == 'auto':\n max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)\n\n if max_nbytes is not None:\n if not joblib_mmap and cache_dir is not None:\n warn('\"MNE_CACHE_DIR\" is set but a newer version of joblib is '\n 'needed to use the memmapping pool.')\n if joblib_mmap and cache_dir is None:\n logger.info(\n 'joblib supports memapping pool but \"MNE_CACHE_DIR\" '\n 'is not set in MNE-Python config. To enable it, use, '\n 'e.g., mne.set_cache_dir(\\'/tmp/shm\\'). This will '\n 'store temporary files under /dev/shm and can result '\n 'in large memory savings.')\n\n # create keyword arguments for Parallel\n kwargs = {'verbose': 5 if should_print and total is None else 0}\n kwargs['pre_dispatch'] = pre_dispatch\n if 'prefer' in p_args:\n kwargs['prefer'] = prefer\n\n if joblib_mmap:\n if cache_dir is None:\n max_nbytes = None # disable memmaping\n kwargs['temp_folder'] = cache_dir\n kwargs['max_nbytes'] = max_nbytes\n\n n_jobs = check_n_jobs(n_jobs)\n parallel = Parallel(n_jobs, **kwargs)\n my_func = delayed(func)\n\n if total is not None:\n def parallel_progress(op_iter):\n pb = ProgressBar(total, verbose_bool=should_print)\n return parallel(pb(op_iter))\n parallel_out = parallel_progress\n else:\n parallel_out = parallel\n return parallel_out, my_func, n_jobs\n\n\ndef check_n_jobs(n_jobs, allow_cuda=False):\n \"\"\"Check n_jobs in particular for negative values.\n\n Parameters\n ----------\n n_jobs : int\n The number of jobs.\n allow_cuda : bool\n Allow n_jobs to be 'cuda'. Default: False.\n\n Returns\n -------\n n_jobs : int\n The checked number of jobs. Always positive (or 'cuda' if\n applicable.)\n \"\"\"\n if not isinstance(n_jobs, int):\n if not allow_cuda:\n raise ValueError('n_jobs must be an integer')\n elif not isinstance(n_jobs, str) or n_jobs != 'cuda':\n raise ValueError('n_jobs must be an integer, or \"cuda\"')\n # else, we have n_jobs='cuda' and this is okay, so do nothing\n elif _force_serial:\n n_jobs = 1\n logger.info('... MNE_FORCE_SERIAL set. Processing in forced '\n 'serial mode.')\n elif n_jobs <= 0:\n try:\n import multiprocessing\n n_cores = multiprocessing.cpu_count()\n n_jobs = min(n_cores + n_jobs + 1, n_cores)\n if n_jobs <= 0:\n raise ValueError('If n_jobs has a negative value it must not '\n 'be less than the number of CPUs present. '\n 'You\\'ve got %s CPUs' % n_cores)\n except ImportError:\n # only warn if they tried to use something other than 1 job\n if n_jobs != 1:\n warn('multiprocessing not installed. Cannot run in parallel.')\n n_jobs = 1\n\n return n_jobs\n", "path": "mne/parallel.py"}], "after_files": [{"content": "\"\"\"Parallel util function.\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n#\n# License: Simplified BSD\n\nimport logging\nimport os\n\nfrom . import get_config\nfrom .utils import logger, verbose, warn, ProgressBar\nfrom .fixes import _get_args\n\nif 'MNE_FORCE_SERIAL' in os.environ:\n _force_serial = True\nelse:\n _force_serial = None\n\n\n@verbose\ndef parallel_func(func, n_jobs, max_nbytes='auto', pre_dispatch='n_jobs',\n total=None, prefer=None, verbose=None):\n \"\"\"Return parallel instance with delayed function.\n\n Util function to use joblib only if available\n\n Parameters\n ----------\n func: callable\n A function\n n_jobs: int\n Number of jobs to run in parallel\n max_nbytes : int, str, or None\n Threshold on the minimum size of arrays passed to the workers that\n triggers automated memory mapping. Can be an int in Bytes,\n or a human-readable string, e.g., '1M' for 1 megabyte.\n Use None to disable memmaping of large arrays. Use 'auto' to\n use the value set using mne.set_memmap_min_size.\n pre_dispatch : int, or string, optional\n See :class:`joblib.Parallel`.\n total : int | None\n If int, use a progress bar to display the progress of dispatched\n jobs. This should only be used when directly iterating, not when\n using ``split_list`` or :func:`np.array_split`.\n If None (default), do not add a progress bar.\n prefer : str | None\n If str, can be \"processes\" or \"threads\". See :class:`joblib.Parallel`.\n Ignored if the joblib version is too old to support this.\n\n .. versionadded:: 0.18\n %(verbose)s INFO or DEBUG\n will print parallel status, others will not.\n\n Returns\n -------\n parallel: instance of joblib.Parallel or list\n The parallel object\n my_func: callable\n func if not parallel or delayed(func)\n n_jobs: int\n Number of jobs >= 0\n \"\"\"\n should_print = (logger.level <= logging.INFO)\n # for a single job, we don't need joblib\n if n_jobs != 1:\n try:\n from joblib import Parallel, delayed\n except ImportError:\n try:\n from sklearn.externals.joblib import Parallel, delayed\n except ImportError:\n warn('joblib not installed. Cannot run in parallel.')\n n_jobs = 1\n if n_jobs == 1:\n n_jobs = 1\n my_func = func\n parallel = list\n else:\n # check if joblib is recent enough to support memmaping\n p_args = _get_args(Parallel.__init__)\n joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)\n\n cache_dir = get_config('MNE_CACHE_DIR', None)\n if isinstance(max_nbytes, str) and max_nbytes == 'auto':\n max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)\n\n if max_nbytes is not None:\n if not joblib_mmap and cache_dir is not None:\n warn('\"MNE_CACHE_DIR\" is set but a newer version of joblib is '\n 'needed to use the memmapping pool.')\n if joblib_mmap and cache_dir is None:\n logger.info(\n 'joblib supports memapping pool but \"MNE_CACHE_DIR\" '\n 'is not set in MNE-Python config. To enable it, use, '\n 'e.g., mne.set_cache_dir(\\'/tmp/shm\\'). This will '\n 'store temporary files under /dev/shm and can result '\n 'in large memory savings.')\n\n # create keyword arguments for Parallel\n kwargs = {'verbose': 5 if should_print and total is None else 0}\n kwargs['pre_dispatch'] = pre_dispatch\n if 'prefer' in p_args:\n kwargs['prefer'] = prefer\n\n if joblib_mmap:\n if cache_dir is None:\n max_nbytes = None # disable memmaping\n kwargs['temp_folder'] = cache_dir\n kwargs['max_nbytes'] = max_nbytes\n\n n_jobs = check_n_jobs(n_jobs)\n parallel = _check_wrapper(Parallel(n_jobs, **kwargs))\n my_func = delayed(func)\n\n if total is not None:\n def parallel_progress(op_iter):\n pb = ProgressBar(total, verbose_bool=should_print)\n return parallel(pb(op_iter))\n parallel_out = parallel_progress\n else:\n parallel_out = parallel\n return parallel_out, my_func, n_jobs\n\n\ndef _check_wrapper(fun):\n def run(*args, **kwargs):\n try:\n return fun(*args, **kwargs)\n except RuntimeError as err:\n msg = str(err.args[0]) if err.args else ''\n if msg.startswith('The task could not be sent to the workers'):\n raise RuntimeError(\n msg + ' Consider using joblib memmap caching to get '\n 'around this problem. See mne.set_mmap_min_size, '\n 'mne.set_cache_dir, and buffer_size parallel function '\n 'arguments (if applicable).')\n raise\n return run\n\n\ndef check_n_jobs(n_jobs, allow_cuda=False):\n \"\"\"Check n_jobs in particular for negative values.\n\n Parameters\n ----------\n n_jobs : int\n The number of jobs.\n allow_cuda : bool\n Allow n_jobs to be 'cuda'. Default: False.\n\n Returns\n -------\n n_jobs : int\n The checked number of jobs. Always positive (or 'cuda' if\n applicable.)\n \"\"\"\n if not isinstance(n_jobs, int):\n if not allow_cuda:\n raise ValueError('n_jobs must be an integer')\n elif not isinstance(n_jobs, str) or n_jobs != 'cuda':\n raise ValueError('n_jobs must be an integer, or \"cuda\"')\n # else, we have n_jobs='cuda' and this is okay, so do nothing\n elif _force_serial:\n n_jobs = 1\n logger.info('... MNE_FORCE_SERIAL set. Processing in forced '\n 'serial mode.')\n elif n_jobs <= 0:\n try:\n import multiprocessing\n n_cores = multiprocessing.cpu_count()\n n_jobs = min(n_cores + n_jobs + 1, n_cores)\n if n_jobs <= 0:\n raise ValueError('If n_jobs has a negative value it must not '\n 'be less than the number of CPUs present. '\n 'You\\'ve got %s CPUs' % n_cores)\n except ImportError:\n # only warn if they tried to use something other than 1 job\n if n_jobs != 1:\n warn('multiprocessing not installed. Cannot run in parallel.')\n n_jobs = 1\n\n return n_jobs\n", "path": "mne/parallel.py"}]} | 3,953 | 299 |
gh_patches_debug_38474 | rasdani/github-patches | git_diff | kubeflow__pipelines-2250 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
build_python_component does not work in KF 6.2
compiler.build_python_component uses the default name space for pod creation. As of KF 6.2 the notebooks are running under a user name space( ex kubeflow-<username>) with editor service account. This service account does not have access to default kubeflow name space by design.
SDK needs to be updated to use the user service account instead.
sample error:
`HTTP response body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"pods is forbidden: User \"system:serviceaccount:kubeflow-chavoshi:default-editor\" cannot create resource \"pods\" in API group \"\" in the namespace \"kubeflow\"","reason":"Forbidden","details":{"kind":"pods"},"code":403}
`
screen shot:
https://screenshot.googleplex.com/QotFfe0hgnW.png
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/kfp/containers/_component_builder.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import inspect
17 import re
18 import sys
19 import tempfile
20 import logging
21 import shutil
22 from collections import OrderedDict
23 from pathlib import Path
24 from typing import Callable
25 from ..components._components import _create_task_factory_from_component_spec
26 from ..components._python_op import _func_to_component_spec
27 from ..components._yaml_utils import dump_yaml
28 from ._container_builder import ContainerBuilder
29
30 class VersionedDependency(object):
31 """ DependencyVersion specifies the versions """
32 def __init__(self, name, version=None, min_version=None, max_version=None):
33 """ if version is specified, no need for min_version or max_version;
34 if both are specified, version is adopted """
35 self._name = name
36 if version is not None:
37 self._min_version = version
38 self._max_version = version
39 else:
40 self._min_version = min_version
41 self._max_version = max_version
42
43 @property
44 def name(self):
45 return self._name
46
47 @property
48 def min_version(self):
49 return self._min_version
50
51 @min_version.setter
52 def min_version(self, min_version):
53 self._min_version = min_version
54
55 def has_min_version(self):
56 return self._min_version != None
57
58 @property
59 def max_version(self):
60 return self._max_version
61
62 @max_version.setter
63 def max_version(self, max_version):
64 self._max_version = max_version
65
66 def has_max_version(self):
67 return self._max_version != None
68
69 def has_versions(self):
70 return (self.has_min_version()) or (self.has_max_version())
71
72 class DependencyHelper(object):
73 """ DependencyHelper manages software dependency information """
74 def __init__(self):
75 self._PYTHON_PACKAGE = 'PYTHON_PACKAGE'
76 self._dependency = {self._PYTHON_PACKAGE:OrderedDict()}
77
78 @property
79 def python_packages(self):
80 return self._dependency[self._PYTHON_PACKAGE]
81
82 def add_python_package(self, dependency, override=True):
83 """ add_single_python_package adds a dependency for the python package
84
85 Args:
86 name: package name
87 version: it could be a specific version(1.10.0), or a range(>=1.0,<=2.0)
88 if not specified, the default is resolved automatically by the pip system.
89 override: whether to override the version if already existing in the dependency.
90 """
91 if dependency.name in self.python_packages and not override:
92 return
93 self.python_packages[dependency.name] = dependency
94
95 def generate_pip_requirements(self, target_file):
96 """ write the python packages to a requirement file
97 the generated file follows the order of which the packages are added """
98 with open(target_file, 'w') as f:
99 for name, version in self.python_packages.items():
100 version = self.python_packages[name]
101 version_str = ''
102 if version.has_min_version():
103 version_str += ' >= ' + version.min_version + ','
104 if version.has_max_version():
105 version_str += ' <= ' + version.max_version + ','
106 f.write(name + version_str.rstrip(',') + '\n')
107
108 def _dependency_to_requirements(dependency=[], filename='requirements.txt'):
109 """
110 Generates a requirement file based on the dependency
111 Args:
112 dependency (list): a list of VersionedDependency, which includes the package name and versions
113 filename (str): requirement file name, default as requirements.txt
114 """
115 dependency_helper = DependencyHelper()
116 for version in dependency:
117 dependency_helper.add_python_package(version)
118 dependency_helper.generate_pip_requirements(filename)
119
120 def _generate_dockerfile(filename, base_image, python_version, requirement_filename=None, add_files=None):
121 """
122 generates dockerfiles
123 Args:
124 filename (str): target file name for the dockerfile.
125 base_image (str): the base image name.
126 python_version (str): choose python2 or python3
127 requirement_filename (str): requirement file name
128 add_files (Dict[str, str]): Map containing the files thats should be added to the container. add_files maps the build context relative source paths to the container destination paths.
129 """
130 if python_version not in ['python2', 'python3']:
131 raise ValueError('python_version has to be either python2 or python3')
132 with open(filename, 'w') as f:
133 f.write('FROM ' + base_image + '\n')
134 if python_version == 'python3':
135 f.write('RUN apt-get update -y && apt-get install --no-install-recommends -y -q python3 python3-pip python3-setuptools\n')
136 else:
137 f.write('RUN apt-get update -y && apt-get install --no-install-recommends -y -q python python-pip python-setuptools\n')
138 if requirement_filename is not None:
139 f.write('ADD ' + requirement_filename + ' /ml/requirements.txt\n')
140 if python_version == 'python3':
141 f.write('RUN pip3 install -r /ml/requirements.txt\n')
142 else:
143 f.write('RUN pip install -r /ml/requirements.txt\n')
144
145 for src_path, dst_path in (add_files or {}).items():
146 f.write('ADD ' + src_path + ' ' + dst_path + '\n')
147
148
149 def _configure_logger(logger):
150 """ _configure_logger configures the logger such that the info level logs
151 go to the stdout and the error(or above) level logs go to the stderr.
152 It is important for the Jupyter notebook log rendering """
153 if hasattr(_configure_logger, 'configured'):
154 # Skip the logger configuration the second time this function
155 # is called to avoid multiple streamhandlers bound to the logger.
156 return
157 setattr(_configure_logger, 'configured', 'true')
158 logger.setLevel(logging.INFO)
159 info_handler = logging.StreamHandler(stream=sys.stdout)
160 info_handler.addFilter(lambda record: record.levelno <= logging.INFO)
161 info_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(message)s', datefmt='%Y-%m-%d %H:%M:%S'))
162 error_handler = logging.StreamHandler(sys.stderr)
163 error_handler.addFilter(lambda record: record.levelno > logging.INFO)
164 error_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(message)s', datefmt='%Y-%m-%d %H:%M:%S'))
165 logger.addHandler(info_handler)
166 logger.addHandler(error_handler)
167
168
169 def build_python_component(component_func, target_image, base_image=None, dependency=[], staging_gcs_path=None, timeout=600, namespace='kubeflow', target_component_file=None, python_version='python3'):
170 """ build_component automatically builds a container image for the component_func
171 based on the base_image and pushes to the target_image.
172
173 Args:
174 component_func (python function): The python function to build components upon
175 base_image (str): Docker image to use as a base image
176 target_image (str): Full URI to push the target image
177 staging_gcs_path (str): GCS blob that can store temporary build files
178 target_image (str): target image path
179 timeout (int): the timeout for the image build(in secs), default is 600 seconds
180 namespace (str): the namespace within which to run the kubernetes kaniko job, default is "kubeflow"
181 dependency (list): a list of VersionedDependency, which includes the package name and versions, default is empty
182 python_version (str): choose python2 or python3, default is python3
183 Raises:
184 ValueError: The function is not decorated with python_component decorator or the python_version is neither python2 nor python3
185 """
186
187 _configure_logger(logging.getLogger())
188
189 if component_func is None:
190 raise ValueError('component_func must not be None')
191 if target_image is None:
192 raise ValueError('target_image must not be None')
193
194 if python_version not in ['python2', 'python3']:
195 raise ValueError('python_version has to be either python2 or python3')
196
197 if staging_gcs_path is None:
198 raise ValueError('staging_gcs_path must not be None')
199
200 if base_image is None:
201 base_image = getattr(component_func, '_component_base_image', None)
202 if base_image is None:
203 from ..components._python_op import get_default_base_image
204 base_image = get_default_base_image()
205 if isinstance(base_image, Callable):
206 base_image = base_image()
207
208 logging.info('Build an image that is based on ' +
209 base_image +
210 ' and push the image to ' +
211 target_image)
212
213 component_spec = _func_to_component_spec(component_func, base_image=base_image)
214 command_line_args = component_spec.implementation.container.command
215
216 dash_c_index = command_line_args.index('-c')
217 program_code_index = dash_c_index + 1
218 program_code = command_line_args[program_code_index]
219 program_rel_path = 'ml/main.py'
220 program_container_path = '/' + program_rel_path
221
222 # Replacing the inline code with calling a local program
223 # Before: python3 -u -c 'import sys ...' --param1 ...
224 # After: python3 -u main.py --param1 ...
225 command_line_args[program_code_index] = program_container_path
226 command_line_args.pop(dash_c_index)
227
228 if python_version == 'python2':
229 import warnings
230 warnings.warn('Python2 is not longer supported')
231 # Replacing the python interpreter
232 python_interpreter_index = command_line_args.index('python3')
233 command_line_args[python_interpreter_index] = python_version
234
235 arc_docker_filename = 'Dockerfile'
236 arc_requirement_filename = 'requirements.txt'
237
238 with tempfile.TemporaryDirectory() as local_build_dir:
239 # Write the program code to a file in the context directory
240 local_python_filepath = os.path.join(local_build_dir, program_rel_path)
241 os.makedirs(os.path.dirname(local_python_filepath), exist_ok=True)
242 with open(local_python_filepath, 'w') as f:
243 f.write(program_code)
244
245 # Generate the python package requirements file in the context directory
246 local_requirement_filepath = os.path.join(local_build_dir, arc_requirement_filename)
247 _dependency_to_requirements(dependency, local_requirement_filepath)
248
249 # Generate Dockerfile in the context directory
250 local_docker_filepath = os.path.join(local_build_dir, arc_docker_filename)
251 _generate_dockerfile(local_docker_filepath, base_image, python_version, arc_requirement_filename, add_files={program_rel_path: program_container_path})
252
253 logging.info('Building and pushing container image.')
254 container_builder = ContainerBuilder(staging_gcs_path, target_image, namespace)
255 image_name_with_digest = container_builder.build(local_build_dir, arc_docker_filename, target_image, timeout)
256
257 component_spec.implementation.container.image = image_name_with_digest
258
259 # Optionally writing the component definition to a local file for sharing
260 target_component_file = target_component_file or getattr(component_func, '_component_target_component_file', None)
261 if target_component_file:
262 component_text = dump_yaml(component_spec.to_dict())
263 Path(target_component_file).write_text(component_text)
264
265 task_factory_function = _create_task_factory_from_component_spec(component_spec)
266 return task_factory_function
267
268
269 def build_docker_image(staging_gcs_path, target_image, dockerfile_path, timeout=600, namespace='kubeflow'):
270 """ build_docker_image automatically builds a container image based on the specification in the dockerfile and
271 pushes to the target_image.
272
273 Args:
274 staging_gcs_path (str): GCS blob that can store temporary build files
275 target_image (str): gcr path to push the final image
276 dockerfile_path (str): local path to the dockerfile
277 timeout (int): the timeout for the image build(in secs), default is 600 seconds
278 namespace (str): the namespace within which to run the kubernetes kaniko job, default is "kubeflow"
279 """
280 _configure_logger(logging.getLogger())
281
282 with tempfile.TemporaryDirectory() as local_build_dir:
283 dockerfile_rel_path = 'Dockerfile'
284 dst_dockerfile_path = os.path.join(local_build_dir, dockerfile_rel_path)
285 shutil.copyfile(dockerfile_path, dst_dockerfile_path)
286
287 container_builder = ContainerBuilder(staging_gcs_path, target_image, namespace=namespace)
288 image_name_with_digest = container_builder.build(local_build_dir, dockerfile_rel_path, target_image, timeout)
289
290 logging.info('Build image complete.')
291 return image_name_with_digest
292
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/kfp/containers/_component_builder.py b/sdk/python/kfp/containers/_component_builder.py
--- a/sdk/python/kfp/containers/_component_builder.py
+++ b/sdk/python/kfp/containers/_component_builder.py
@@ -166,7 +166,7 @@
logger.addHandler(error_handler)
-def build_python_component(component_func, target_image, base_image=None, dependency=[], staging_gcs_path=None, timeout=600, namespace='kubeflow', target_component_file=None, python_version='python3'):
+def build_python_component(component_func, target_image, base_image=None, dependency=[], staging_gcs_path=None, timeout=600, namespace=None, target_component_file=None, python_version='python3'):
""" build_component automatically builds a container image for the component_func
based on the base_image and pushes to the target_image.
@@ -177,7 +177,8 @@
staging_gcs_path (str): GCS blob that can store temporary build files
target_image (str): target image path
timeout (int): the timeout for the image build(in secs), default is 600 seconds
- namespace (str): the namespace within which to run the kubernetes kaniko job, default is "kubeflow"
+ namespace (str): the namespace within which to run the kubernetes kaniko job. If the
+ job is running on GKE and value is None the underlying functions will use the default namespace from GKE. .
dependency (list): a list of VersionedDependency, which includes the package name and versions, default is empty
python_version (str): choose python2 or python3, default is python3
Raises:
@@ -266,7 +267,7 @@
return task_factory_function
-def build_docker_image(staging_gcs_path, target_image, dockerfile_path, timeout=600, namespace='kubeflow'):
+def build_docker_image(staging_gcs_path, target_image, dockerfile_path, timeout=600, namespace=None):
""" build_docker_image automatically builds a container image based on the specification in the dockerfile and
pushes to the target_image.
@@ -275,7 +276,8 @@
target_image (str): gcr path to push the final image
dockerfile_path (str): local path to the dockerfile
timeout (int): the timeout for the image build(in secs), default is 600 seconds
- namespace (str): the namespace within which to run the kubernetes kaniko job, default is "kubeflow"
+ namespace (str): the namespace within which to run the kubernetes kaniko job. Default is None. If the
+ job is running on GKE and value is None the underlying functions will use the default namespace from GKE.
"""
_configure_logger(logging.getLogger())
| {"golden_diff": "diff --git a/sdk/python/kfp/containers/_component_builder.py b/sdk/python/kfp/containers/_component_builder.py\n--- a/sdk/python/kfp/containers/_component_builder.py\n+++ b/sdk/python/kfp/containers/_component_builder.py\n@@ -166,7 +166,7 @@\n logger.addHandler(error_handler)\n \n \n-def build_python_component(component_func, target_image, base_image=None, dependency=[], staging_gcs_path=None, timeout=600, namespace='kubeflow', target_component_file=None, python_version='python3'):\n+def build_python_component(component_func, target_image, base_image=None, dependency=[], staging_gcs_path=None, timeout=600, namespace=None, target_component_file=None, python_version='python3'):\n \"\"\" build_component automatically builds a container image for the component_func\n based on the base_image and pushes to the target_image.\n \n@@ -177,7 +177,8 @@\n staging_gcs_path (str): GCS blob that can store temporary build files\n target_image (str): target image path\n timeout (int): the timeout for the image build(in secs), default is 600 seconds\n- namespace (str): the namespace within which to run the kubernetes kaniko job, default is \"kubeflow\"\n+ namespace (str): the namespace within which to run the kubernetes kaniko job. If the\n+ job is running on GKE and value is None the underlying functions will use the default namespace from GKE. .\n dependency (list): a list of VersionedDependency, which includes the package name and versions, default is empty\n python_version (str): choose python2 or python3, default is python3\n Raises:\n@@ -266,7 +267,7 @@\n return task_factory_function\n \n \n-def build_docker_image(staging_gcs_path, target_image, dockerfile_path, timeout=600, namespace='kubeflow'):\n+def build_docker_image(staging_gcs_path, target_image, dockerfile_path, timeout=600, namespace=None):\n \"\"\" build_docker_image automatically builds a container image based on the specification in the dockerfile and\n pushes to the target_image.\n \n@@ -275,7 +276,8 @@\n target_image (str): gcr path to push the final image\n dockerfile_path (str): local path to the dockerfile\n timeout (int): the timeout for the image build(in secs), default is 600 seconds\n- namespace (str): the namespace within which to run the kubernetes kaniko job, default is \"kubeflow\"\n+ namespace (str): the namespace within which to run the kubernetes kaniko job. Default is None. If the\n+ job is running on GKE and value is None the underlying functions will use the default namespace from GKE. \n \"\"\"\n _configure_logger(logging.getLogger())\n", "issue": "build_python_component does not work in KF 6.2 \ncompiler.build_python_component uses the default name space for pod creation. As of KF 6.2 the notebooks are running under a user name space( ex kubeflow-<username>) with editor service account. This service account does not have access to default kubeflow name space by design. \r\n\r\nSDK needs to be updated to use the user service account instead. \r\n\r\nsample error: \r\n\r\n`HTTP response body: {\"kind\":\"Status\",\"apiVersion\":\"v1\",\"metadata\":{},\"status\":\"Failure\",\"message\":\"pods is forbidden: User \\\"system:serviceaccount:kubeflow-chavoshi:default-editor\\\" cannot create resource \\\"pods\\\" in API group \\\"\\\" in the namespace \\\"kubeflow\\\"\",\"reason\":\"Forbidden\",\"details\":{\"kind\":\"pods\"},\"code\":403}\r\n`\r\nscreen shot: \r\nhttps://screenshot.googleplex.com/QotFfe0hgnW.png \n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport inspect\nimport re\nimport sys\nimport tempfile\nimport logging\nimport shutil\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom typing import Callable\nfrom ..components._components import _create_task_factory_from_component_spec\nfrom ..components._python_op import _func_to_component_spec\nfrom ..components._yaml_utils import dump_yaml\nfrom ._container_builder import ContainerBuilder\n\nclass VersionedDependency(object):\n \"\"\" DependencyVersion specifies the versions \"\"\"\n def __init__(self, name, version=None, min_version=None, max_version=None):\n \"\"\" if version is specified, no need for min_version or max_version;\n if both are specified, version is adopted \"\"\"\n self._name = name\n if version is not None:\n self._min_version = version\n self._max_version = version\n else:\n self._min_version = min_version\n self._max_version = max_version\n\n @property\n def name(self):\n return self._name\n\n @property\n def min_version(self):\n return self._min_version\n\n @min_version.setter\n def min_version(self, min_version):\n self._min_version = min_version\n\n def has_min_version(self):\n return self._min_version != None\n\n @property\n def max_version(self):\n return self._max_version\n\n @max_version.setter\n def max_version(self, max_version):\n self._max_version = max_version\n\n def has_max_version(self):\n return self._max_version != None\n\n def has_versions(self):\n return (self.has_min_version()) or (self.has_max_version())\n\nclass DependencyHelper(object):\n \"\"\" DependencyHelper manages software dependency information \"\"\"\n def __init__(self):\n self._PYTHON_PACKAGE = 'PYTHON_PACKAGE'\n self._dependency = {self._PYTHON_PACKAGE:OrderedDict()}\n\n @property\n def python_packages(self):\n return self._dependency[self._PYTHON_PACKAGE]\n\n def add_python_package(self, dependency, override=True):\n \"\"\" add_single_python_package adds a dependency for the python package\n\n Args:\n name: package name\n version: it could be a specific version(1.10.0), or a range(>=1.0,<=2.0)\n if not specified, the default is resolved automatically by the pip system.\n override: whether to override the version if already existing in the dependency.\n \"\"\"\n if dependency.name in self.python_packages and not override:\n return\n self.python_packages[dependency.name] = dependency\n\n def generate_pip_requirements(self, target_file):\n \"\"\" write the python packages to a requirement file\n the generated file follows the order of which the packages are added \"\"\"\n with open(target_file, 'w') as f:\n for name, version in self.python_packages.items():\n version = self.python_packages[name]\n version_str = ''\n if version.has_min_version():\n version_str += ' >= ' + version.min_version + ','\n if version.has_max_version():\n version_str += ' <= ' + version.max_version + ','\n f.write(name + version_str.rstrip(',') + '\\n')\n\ndef _dependency_to_requirements(dependency=[], filename='requirements.txt'):\n \"\"\"\n Generates a requirement file based on the dependency\n Args:\n dependency (list): a list of VersionedDependency, which includes the package name and versions\n filename (str): requirement file name, default as requirements.txt\n \"\"\"\n dependency_helper = DependencyHelper()\n for version in dependency:\n dependency_helper.add_python_package(version)\n dependency_helper.generate_pip_requirements(filename)\n\ndef _generate_dockerfile(filename, base_image, python_version, requirement_filename=None, add_files=None):\n \"\"\"\n generates dockerfiles\n Args:\n filename (str): target file name for the dockerfile.\n base_image (str): the base image name.\n python_version (str): choose python2 or python3\n requirement_filename (str): requirement file name\n add_files (Dict[str, str]): Map containing the files thats should be added to the container. add_files maps the build context relative source paths to the container destination paths.\n \"\"\"\n if python_version not in ['python2', 'python3']:\n raise ValueError('python_version has to be either python2 or python3')\n with open(filename, 'w') as f:\n f.write('FROM ' + base_image + '\\n')\n if python_version == 'python3':\n f.write('RUN apt-get update -y && apt-get install --no-install-recommends -y -q python3 python3-pip python3-setuptools\\n')\n else:\n f.write('RUN apt-get update -y && apt-get install --no-install-recommends -y -q python python-pip python-setuptools\\n')\n if requirement_filename is not None:\n f.write('ADD ' + requirement_filename + ' /ml/requirements.txt\\n')\n if python_version == 'python3':\n f.write('RUN pip3 install -r /ml/requirements.txt\\n')\n else:\n f.write('RUN pip install -r /ml/requirements.txt\\n')\n \n for src_path, dst_path in (add_files or {}).items(): \n f.write('ADD ' + src_path + ' ' + dst_path + '\\n')\n\n\ndef _configure_logger(logger):\n \"\"\" _configure_logger configures the logger such that the info level logs\n go to the stdout and the error(or above) level logs go to the stderr.\n It is important for the Jupyter notebook log rendering \"\"\"\n if hasattr(_configure_logger, 'configured'):\n # Skip the logger configuration the second time this function\n # is called to avoid multiple streamhandlers bound to the logger.\n return\n setattr(_configure_logger, 'configured', 'true')\n logger.setLevel(logging.INFO)\n info_handler = logging.StreamHandler(stream=sys.stdout)\n info_handler.addFilter(lambda record: record.levelno <= logging.INFO)\n info_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(message)s', datefmt='%Y-%m-%d %H:%M:%S'))\n error_handler = logging.StreamHandler(sys.stderr)\n error_handler.addFilter(lambda record: record.levelno > logging.INFO)\n error_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(message)s', datefmt='%Y-%m-%d %H:%M:%S'))\n logger.addHandler(info_handler)\n logger.addHandler(error_handler)\n\n\ndef build_python_component(component_func, target_image, base_image=None, dependency=[], staging_gcs_path=None, timeout=600, namespace='kubeflow', target_component_file=None, python_version='python3'):\n \"\"\" build_component automatically builds a container image for the component_func\n based on the base_image and pushes to the target_image.\n\n Args:\n component_func (python function): The python function to build components upon\n base_image (str): Docker image to use as a base image\n target_image (str): Full URI to push the target image\n staging_gcs_path (str): GCS blob that can store temporary build files\n target_image (str): target image path\n timeout (int): the timeout for the image build(in secs), default is 600 seconds\n namespace (str): the namespace within which to run the kubernetes kaniko job, default is \"kubeflow\"\n dependency (list): a list of VersionedDependency, which includes the package name and versions, default is empty\n python_version (str): choose python2 or python3, default is python3\n Raises:\n ValueError: The function is not decorated with python_component decorator or the python_version is neither python2 nor python3\n \"\"\"\n\n _configure_logger(logging.getLogger())\n\n if component_func is None:\n raise ValueError('component_func must not be None')\n if target_image is None:\n raise ValueError('target_image must not be None')\n\n if python_version not in ['python2', 'python3']:\n raise ValueError('python_version has to be either python2 or python3')\n\n if staging_gcs_path is None:\n raise ValueError('staging_gcs_path must not be None')\n\n if base_image is None:\n base_image = getattr(component_func, '_component_base_image', None)\n if base_image is None:\n from ..components._python_op import get_default_base_image\n base_image = get_default_base_image()\n if isinstance(base_image, Callable):\n base_image = base_image()\n\n logging.info('Build an image that is based on ' +\n base_image +\n ' and push the image to ' +\n target_image)\n\n component_spec = _func_to_component_spec(component_func, base_image=base_image)\n command_line_args = component_spec.implementation.container.command\n\n dash_c_index = command_line_args.index('-c')\n program_code_index = dash_c_index + 1\n program_code = command_line_args[program_code_index]\n program_rel_path = 'ml/main.py'\n program_container_path = '/' + program_rel_path\n\n # Replacing the inline code with calling a local program\n # Before: python3 -u -c 'import sys ...' --param1 ...\n # After: python3 -u main.py --param1 ...\n command_line_args[program_code_index] = program_container_path\n command_line_args.pop(dash_c_index)\n\n if python_version == 'python2':\n import warnings\n warnings.warn('Python2 is not longer supported')\n # Replacing the python interpreter\n python_interpreter_index = command_line_args.index('python3')\n command_line_args[python_interpreter_index] = python_version\n\n arc_docker_filename = 'Dockerfile'\n arc_requirement_filename = 'requirements.txt'\n\n with tempfile.TemporaryDirectory() as local_build_dir:\n # Write the program code to a file in the context directory\n local_python_filepath = os.path.join(local_build_dir, program_rel_path)\n os.makedirs(os.path.dirname(local_python_filepath), exist_ok=True)\n with open(local_python_filepath, 'w') as f:\n f.write(program_code)\n\n # Generate the python package requirements file in the context directory\n local_requirement_filepath = os.path.join(local_build_dir, arc_requirement_filename)\n _dependency_to_requirements(dependency, local_requirement_filepath)\n\n # Generate Dockerfile in the context directory\n local_docker_filepath = os.path.join(local_build_dir, arc_docker_filename)\n _generate_dockerfile(local_docker_filepath, base_image, python_version, arc_requirement_filename, add_files={program_rel_path: program_container_path})\n\n logging.info('Building and pushing container image.')\n container_builder = ContainerBuilder(staging_gcs_path, target_image, namespace)\n image_name_with_digest = container_builder.build(local_build_dir, arc_docker_filename, target_image, timeout)\n\n component_spec.implementation.container.image = image_name_with_digest\n\n # Optionally writing the component definition to a local file for sharing\n target_component_file = target_component_file or getattr(component_func, '_component_target_component_file', None)\n if target_component_file:\n component_text = dump_yaml(component_spec.to_dict())\n Path(target_component_file).write_text(component_text)\n\n task_factory_function = _create_task_factory_from_component_spec(component_spec)\n return task_factory_function\n\n\ndef build_docker_image(staging_gcs_path, target_image, dockerfile_path, timeout=600, namespace='kubeflow'):\n \"\"\" build_docker_image automatically builds a container image based on the specification in the dockerfile and\n pushes to the target_image.\n\n Args:\n staging_gcs_path (str): GCS blob that can store temporary build files\n target_image (str): gcr path to push the final image\n dockerfile_path (str): local path to the dockerfile\n timeout (int): the timeout for the image build(in secs), default is 600 seconds\n namespace (str): the namespace within which to run the kubernetes kaniko job, default is \"kubeflow\"\n \"\"\"\n _configure_logger(logging.getLogger())\n\n with tempfile.TemporaryDirectory() as local_build_dir:\n dockerfile_rel_path = 'Dockerfile'\n dst_dockerfile_path = os.path.join(local_build_dir, dockerfile_rel_path)\n shutil.copyfile(dockerfile_path, dst_dockerfile_path)\n\n container_builder = ContainerBuilder(staging_gcs_path, target_image, namespace=namespace)\n image_name_with_digest = container_builder.build(local_build_dir, dockerfile_rel_path, target_image, timeout)\n\n logging.info('Build image complete.')\n return image_name_with_digest\n", "path": "sdk/python/kfp/containers/_component_builder.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport inspect\nimport re\nimport sys\nimport tempfile\nimport logging\nimport shutil\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom typing import Callable\nfrom ..components._components import _create_task_factory_from_component_spec\nfrom ..components._python_op import _func_to_component_spec\nfrom ..components._yaml_utils import dump_yaml\nfrom ._container_builder import ContainerBuilder\n\nclass VersionedDependency(object):\n \"\"\" DependencyVersion specifies the versions \"\"\"\n def __init__(self, name, version=None, min_version=None, max_version=None):\n \"\"\" if version is specified, no need for min_version or max_version;\n if both are specified, version is adopted \"\"\"\n self._name = name\n if version is not None:\n self._min_version = version\n self._max_version = version\n else:\n self._min_version = min_version\n self._max_version = max_version\n\n @property\n def name(self):\n return self._name\n\n @property\n def min_version(self):\n return self._min_version\n\n @min_version.setter\n def min_version(self, min_version):\n self._min_version = min_version\n\n def has_min_version(self):\n return self._min_version != None\n\n @property\n def max_version(self):\n return self._max_version\n\n @max_version.setter\n def max_version(self, max_version):\n self._max_version = max_version\n\n def has_max_version(self):\n return self._max_version != None\n\n def has_versions(self):\n return (self.has_min_version()) or (self.has_max_version())\n\nclass DependencyHelper(object):\n \"\"\" DependencyHelper manages software dependency information \"\"\"\n def __init__(self):\n self._PYTHON_PACKAGE = 'PYTHON_PACKAGE'\n self._dependency = {self._PYTHON_PACKAGE:OrderedDict()}\n\n @property\n def python_packages(self):\n return self._dependency[self._PYTHON_PACKAGE]\n\n def add_python_package(self, dependency, override=True):\n \"\"\" add_single_python_package adds a dependency for the python package\n\n Args:\n name: package name\n version: it could be a specific version(1.10.0), or a range(>=1.0,<=2.0)\n if not specified, the default is resolved automatically by the pip system.\n override: whether to override the version if already existing in the dependency.\n \"\"\"\n if dependency.name in self.python_packages and not override:\n return\n self.python_packages[dependency.name] = dependency\n\n def generate_pip_requirements(self, target_file):\n \"\"\" write the python packages to a requirement file\n the generated file follows the order of which the packages are added \"\"\"\n with open(target_file, 'w') as f:\n for name, version in self.python_packages.items():\n version = self.python_packages[name]\n version_str = ''\n if version.has_min_version():\n version_str += ' >= ' + version.min_version + ','\n if version.has_max_version():\n version_str += ' <= ' + version.max_version + ','\n f.write(name + version_str.rstrip(',') + '\\n')\n\ndef _dependency_to_requirements(dependency=[], filename='requirements.txt'):\n \"\"\"\n Generates a requirement file based on the dependency\n Args:\n dependency (list): a list of VersionedDependency, which includes the package name and versions\n filename (str): requirement file name, default as requirements.txt\n \"\"\"\n dependency_helper = DependencyHelper()\n for version in dependency:\n dependency_helper.add_python_package(version)\n dependency_helper.generate_pip_requirements(filename)\n\ndef _generate_dockerfile(filename, base_image, python_version, requirement_filename=None, add_files=None):\n \"\"\"\n generates dockerfiles\n Args:\n filename (str): target file name for the dockerfile.\n base_image (str): the base image name.\n python_version (str): choose python2 or python3\n requirement_filename (str): requirement file name\n add_files (Dict[str, str]): Map containing the files thats should be added to the container. add_files maps the build context relative source paths to the container destination paths.\n \"\"\"\n if python_version not in ['python2', 'python3']:\n raise ValueError('python_version has to be either python2 or python3')\n with open(filename, 'w') as f:\n f.write('FROM ' + base_image + '\\n')\n if python_version == 'python3':\n f.write('RUN apt-get update -y && apt-get install --no-install-recommends -y -q python3 python3-pip python3-setuptools\\n')\n else:\n f.write('RUN apt-get update -y && apt-get install --no-install-recommends -y -q python python-pip python-setuptools\\n')\n if requirement_filename is not None:\n f.write('ADD ' + requirement_filename + ' /ml/requirements.txt\\n')\n if python_version == 'python3':\n f.write('RUN pip3 install -r /ml/requirements.txt\\n')\n else:\n f.write('RUN pip install -r /ml/requirements.txt\\n')\n \n for src_path, dst_path in (add_files or {}).items(): \n f.write('ADD ' + src_path + ' ' + dst_path + '\\n')\n\n\ndef _configure_logger(logger):\n \"\"\" _configure_logger configures the logger such that the info level logs\n go to the stdout and the error(or above) level logs go to the stderr.\n It is important for the Jupyter notebook log rendering \"\"\"\n if hasattr(_configure_logger, 'configured'):\n # Skip the logger configuration the second time this function\n # is called to avoid multiple streamhandlers bound to the logger.\n return\n setattr(_configure_logger, 'configured', 'true')\n logger.setLevel(logging.INFO)\n info_handler = logging.StreamHandler(stream=sys.stdout)\n info_handler.addFilter(lambda record: record.levelno <= logging.INFO)\n info_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(message)s', datefmt='%Y-%m-%d %H:%M:%S'))\n error_handler = logging.StreamHandler(sys.stderr)\n error_handler.addFilter(lambda record: record.levelno > logging.INFO)\n error_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(message)s', datefmt='%Y-%m-%d %H:%M:%S'))\n logger.addHandler(info_handler)\n logger.addHandler(error_handler)\n\n\ndef build_python_component(component_func, target_image, base_image=None, dependency=[], staging_gcs_path=None, timeout=600, namespace=None, target_component_file=None, python_version='python3'):\n \"\"\" build_component automatically builds a container image for the component_func\n based on the base_image and pushes to the target_image.\n\n Args:\n component_func (python function): The python function to build components upon\n base_image (str): Docker image to use as a base image\n target_image (str): Full URI to push the target image\n staging_gcs_path (str): GCS blob that can store temporary build files\n target_image (str): target image path\n timeout (int): the timeout for the image build(in secs), default is 600 seconds\n namespace (str): the namespace within which to run the kubernetes kaniko job. If the\n job is running on GKE and value is None the underlying functions will use the default namespace from GKE. .\n dependency (list): a list of VersionedDependency, which includes the package name and versions, default is empty\n python_version (str): choose python2 or python3, default is python3\n Raises:\n ValueError: The function is not decorated with python_component decorator or the python_version is neither python2 nor python3\n \"\"\"\n\n _configure_logger(logging.getLogger())\n\n if component_func is None:\n raise ValueError('component_func must not be None')\n if target_image is None:\n raise ValueError('target_image must not be None')\n\n if python_version not in ['python2', 'python3']:\n raise ValueError('python_version has to be either python2 or python3')\n\n if staging_gcs_path is None:\n raise ValueError('staging_gcs_path must not be None')\n\n if base_image is None:\n base_image = getattr(component_func, '_component_base_image', None)\n if base_image is None:\n from ..components._python_op import get_default_base_image\n base_image = get_default_base_image()\n if isinstance(base_image, Callable):\n base_image = base_image()\n\n logging.info('Build an image that is based on ' +\n base_image +\n ' and push the image to ' +\n target_image)\n\n component_spec = _func_to_component_spec(component_func, base_image=base_image)\n command_line_args = component_spec.implementation.container.command\n\n dash_c_index = command_line_args.index('-c')\n program_code_index = dash_c_index + 1\n program_code = command_line_args[program_code_index]\n program_rel_path = 'ml/main.py'\n program_container_path = '/' + program_rel_path\n\n # Replacing the inline code with calling a local program\n # Before: python3 -u -c 'import sys ...' --param1 ...\n # After: python3 -u main.py --param1 ...\n command_line_args[program_code_index] = program_container_path\n command_line_args.pop(dash_c_index)\n\n if python_version == 'python2':\n import warnings\n warnings.warn('Python2 is not longer supported')\n # Replacing the python interpreter\n python_interpreter_index = command_line_args.index('python3')\n command_line_args[python_interpreter_index] = python_version\n\n arc_docker_filename = 'Dockerfile'\n arc_requirement_filename = 'requirements.txt'\n\n with tempfile.TemporaryDirectory() as local_build_dir:\n # Write the program code to a file in the context directory\n local_python_filepath = os.path.join(local_build_dir, program_rel_path)\n os.makedirs(os.path.dirname(local_python_filepath), exist_ok=True)\n with open(local_python_filepath, 'w') as f:\n f.write(program_code)\n\n # Generate the python package requirements file in the context directory\n local_requirement_filepath = os.path.join(local_build_dir, arc_requirement_filename)\n _dependency_to_requirements(dependency, local_requirement_filepath)\n\n # Generate Dockerfile in the context directory\n local_docker_filepath = os.path.join(local_build_dir, arc_docker_filename)\n _generate_dockerfile(local_docker_filepath, base_image, python_version, arc_requirement_filename, add_files={program_rel_path: program_container_path})\n\n logging.info('Building and pushing container image.')\n container_builder = ContainerBuilder(staging_gcs_path, target_image, namespace)\n image_name_with_digest = container_builder.build(local_build_dir, arc_docker_filename, target_image, timeout)\n\n component_spec.implementation.container.image = image_name_with_digest\n\n # Optionally writing the component definition to a local file for sharing\n target_component_file = target_component_file or getattr(component_func, '_component_target_component_file', None)\n if target_component_file:\n component_text = dump_yaml(component_spec.to_dict())\n Path(target_component_file).write_text(component_text)\n\n task_factory_function = _create_task_factory_from_component_spec(component_spec)\n return task_factory_function\n\n\ndef build_docker_image(staging_gcs_path, target_image, dockerfile_path, timeout=600, namespace=None):\n \"\"\" build_docker_image automatically builds a container image based on the specification in the dockerfile and\n pushes to the target_image.\n\n Args:\n staging_gcs_path (str): GCS blob that can store temporary build files\n target_image (str): gcr path to push the final image\n dockerfile_path (str): local path to the dockerfile\n timeout (int): the timeout for the image build(in secs), default is 600 seconds\n namespace (str): the namespace within which to run the kubernetes kaniko job. Default is None. If the\n job is running on GKE and value is None the underlying functions will use the default namespace from GKE. \n \"\"\"\n _configure_logger(logging.getLogger())\n\n with tempfile.TemporaryDirectory() as local_build_dir:\n dockerfile_rel_path = 'Dockerfile'\n dst_dockerfile_path = os.path.join(local_build_dir, dockerfile_rel_path)\n shutil.copyfile(dockerfile_path, dst_dockerfile_path)\n\n container_builder = ContainerBuilder(staging_gcs_path, target_image, namespace=namespace)\n image_name_with_digest = container_builder.build(local_build_dir, dockerfile_rel_path, target_image, timeout)\n\n logging.info('Build image complete.')\n return image_name_with_digest\n", "path": "sdk/python/kfp/containers/_component_builder.py"}]} | 4,071 | 635 |
gh_patches_debug_33706 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-470 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Length limit check on Route53 TXT records doesn't allow multiple values
*cfn-lint version: (`cfn-lint --version`)* master
*Description of issue.*
The length limit check on TXT records is not quite right. Multiple values of up to 255 characters *are* allowed, separated by spaces.
This valid template is thus categorized as invalid:
```
$ cat test.yml
Resources:
Example:
Type: AWS::Route53::RecordSet
Properties:
HostedZoneId: abc123
Name: example.com.
Type: TXT
TTL: '14400'
ResourceRecords:
# 255 "a" characters within appropriate quotes, then a "b"
- '"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "b"'
$ cfn-lint test.yml
E3020 The length of the TXT record (261) exceeds the limit (255)
test.yml:9:7
```
Verified it's valid by creating an equivalent record on the console.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/route53/RecordSet.py`
Content:
```
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import re
18 from cfnlint import CloudFormationLintRule
19 from cfnlint import RuleMatch
20
21 from cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC
22
23 class RecordSet(CloudFormationLintRule):
24 """Check Route53 Recordset Configuration"""
25 id = 'E3020'
26 shortdesc = 'Validate Route53 RecordSets'
27 description = 'Check if all RecordSets are correctly configured'
28 source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html'
29 tags = ['resources', 'route53', 'record_set']
30
31 # https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html
32 VALID_RECORD_TYPES = [
33 'A',
34 'AAAA',
35 'CAA',
36 'CNAME',
37 'MX',
38 'NAPTR',
39 'NS',
40 'PTR',
41 'SOA'
42 'SPF',
43 'SRV',
44 'TXT'
45 ]
46
47 REGEX_CNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])(.)$')
48
49 def check_a_record(self, path, recordset):
50 """Check A record Configuration"""
51 matches = []
52
53 resource_records = recordset.get('ResourceRecords')
54 for index, record in enumerate(resource_records):
55
56 if not isinstance(record, dict):
57 tree = path[:] + ['ResourceRecords', index]
58
59 # Check if a valid IPv4 address is specified
60 if not re.match(REGEX_IPV4, record):
61 message = 'A record ({}) is not a valid IPv4 address'
62 matches.append(RuleMatch(tree, message.format(record)))
63
64 return matches
65
66 def check_aaaa_record(self, path, recordset):
67 """Check AAAA record Configuration"""
68 matches = []
69
70 resource_records = recordset.get('ResourceRecords')
71 for index, record in enumerate(resource_records):
72
73 if not isinstance(record, dict):
74 tree = path[:] + ['ResourceRecords', index]
75
76 # Check if a valid IPv4 address is specified
77 if not re.match(REGEX_IPV6, record):
78 message = 'AAAA record ({}) is not a valid IPv6 address'
79 matches.append(RuleMatch(tree, message.format(record)))
80
81 return matches
82
83 def check_caa_record(self, path, recordset):
84 """Check CAA record Configuration"""
85 matches = []
86
87 resource_records = recordset.get('ResourceRecords')
88
89 for index, record in enumerate(resource_records):
90 tree = path[:] + ['ResourceRecords', index]
91
92 if not isinstance(record, dict):
93 # Split the record up to the mandatory settings (flags tag "value")
94 items = record.split(' ', 2)
95
96 # Check if the 3 settings are given.
97 if len(items) != 3:
98 message = 'CAA record must contain 3 settings (flags tag "value"), record contains {} settings.'
99 matches.append(RuleMatch(tree, message.format(len(items))))
100 else:
101 # Check the flag value
102 if not items[0].isdigit():
103 message = 'CAA record flag setting ({}) should be of type Integer.'
104 matches.append(RuleMatch(tree, message.format(items[0])))
105 else:
106 if int(items[0]) not in [0, 128]:
107 message = 'Invalid CAA record flag setting ({}) given, must be 0 or 128.'
108 matches.append(RuleMatch(tree, message.format(items[0])))
109
110 # Check the tag value
111 if not re.match(REGEX_ALPHANUMERIC, items[1]):
112 message = 'Invalid CAA record tag setting {}. Value has to be alphanumeric.'
113 matches.append(RuleMatch(tree, message.format(items[0])))
114
115 # Check the value
116 if not items[2].startswith('"') or not items[2].endswith('"'):
117 message = 'CAA record value setting has to be enclosed in double quotation marks (").'
118 matches.append(RuleMatch(tree, message))
119
120 return matches
121
122 def check_cname_record(self, path, recordset):
123 """Check CNAME record Configuration"""
124 matches = []
125
126 resource_records = recordset.get('ResourceRecords')
127 if len(resource_records) > 1:
128 message = 'A CNAME recordset can only contain 1 value'
129 matches.append(RuleMatch(path + ['ResourceRecords'], message))
130 else:
131 for index, record in enumerate(resource_records):
132 if not isinstance(record, dict):
133 tree = path[:] + ['ResourceRecords', index]
134 if (not re.match(self.REGEX_CNAME, record)
135 # ACM Route 53 validation uses invalid CNAMEs starting with `_`,
136 # special-case them rather than complicate the regex.
137 and not record.endswith('.acm-validations.aws.')):
138 message = 'CNAME record ({}) does not contain a valid domain name'
139 matches.append(RuleMatch(tree, message.format(record)))
140
141 return matches
142
143 def check_txt_record(self, path, recordset):
144 """Check TXT record Configuration"""
145 matches = []
146
147 # Check quotation of the records
148 resource_records = recordset.get('ResourceRecords')
149
150 for index, record in enumerate(resource_records):
151 tree = path[:] + ['ResourceRecords', index]
152
153 if not isinstance(record, dict):
154 if not record.startswith('"') or not record.endswith('"'):
155 message = 'TXT record ({}) has to be enclosed in double quotation marks (")'
156 matches.append(RuleMatch(tree, message.format(record)))
157 elif len(record) > 257: # 2 extra characters for start and end double quotation marks
158 message = 'The length of the TXT record ({}) exceeds the limit (255)'
159 matches.append(RuleMatch(tree, message.format(len(record))))
160
161 return matches
162
163 def check_recordset(self, path, recordset):
164 """Check record configuration"""
165
166 matches = []
167 recordset_type = recordset.get('Type')
168
169 # Skip Intrinsic functions
170 if not isinstance(recordset_type, dict):
171 if recordset_type not in self.VALID_RECORD_TYPES:
172 message = 'Invalid record type "{0}" specified'
173 matches.append(RuleMatch(path + ['Type'], message.format(recordset_type)))
174 elif not recordset.get('AliasTarget'):
175 # Record type specific checks
176 if recordset_type == 'A':
177 matches.extend(self.check_a_record(path, recordset))
178 elif recordset_type == 'AAAA':
179 matches.extend(self.check_aaaa_record(path, recordset))
180 elif recordset_type == 'CAA':
181 matches.extend(self.check_caa_record(path, recordset))
182 elif recordset_type == 'CNAME':
183 matches.extend(self.check_cname_record(path, recordset))
184 elif recordset_type == 'TXT':
185 matches.extend(self.check_txt_record(path, recordset))
186
187 return matches
188
189 def match(self, cfn):
190 """Check RecordSets and RecordSetGroups Properties"""
191
192 matches = []
193
194 recordsets = cfn.get_resources(['AWS::Route53::RecordSet'])
195
196 for name, recordset in recordsets.items():
197 path = ['Resources', name, 'Properties']
198
199 if isinstance(recordset, dict):
200 props = recordset.get('Properties')
201 if props:
202 matches.extend(self.check_recordset(path, props))
203
204 recordsetgroups = cfn.get_resource_properties(['AWS::Route53::RecordSetGroup', 'RecordSets'])
205
206 for recordsetgroup in recordsetgroups:
207 path = recordsetgroup['Path']
208 value = recordsetgroup['Value']
209 if isinstance(value, list):
210 for index, recordset in enumerate(value):
211 tree = path[:] + [index]
212 matches.extend(self.check_recordset(tree, recordset))
213
214 return matches
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/route53/RecordSet.py b/src/cfnlint/rules/resources/route53/RecordSet.py
--- a/src/cfnlint/rules/resources/route53/RecordSet.py
+++ b/src/cfnlint/rules/resources/route53/RecordSet.py
@@ -17,9 +17,9 @@
import re
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
-
from cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC
+
class RecordSet(CloudFormationLintRule):
"""Check Route53 Recordset Configuration"""
id = 'E3020'
@@ -45,6 +45,7 @@
]
REGEX_CNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])(.)$')
+ REGEX_TXT = re.compile(r'^("[^"]{1,255}" *)*"[^"]{1,255}"$')
def check_a_record(self, path, recordset):
"""Check A record Configuration"""
@@ -150,13 +151,15 @@
for index, record in enumerate(resource_records):
tree = path[:] + ['ResourceRecords', index]
- if not isinstance(record, dict):
- if not record.startswith('"') or not record.endswith('"'):
- message = 'TXT record ({}) has to be enclosed in double quotation marks (")'
- matches.append(RuleMatch(tree, message.format(record)))
- elif len(record) > 257: # 2 extra characters for start and end double quotation marks
- message = 'The length of the TXT record ({}) exceeds the limit (255)'
- matches.append(RuleMatch(tree, message.format(len(record))))
+ if not isinstance(record, dict) and not re.match(self.REGEX_TXT, record):
+ message = 'TXT record is not structured as one or more items up to 255 characters ' \
+ 'enclosed in double quotation marks at {0}'
+ matches.append(RuleMatch(
+ tree,
+ (
+ message.format('/'.join(map(str, tree)))
+ ),
+ ))
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/route53/RecordSet.py b/src/cfnlint/rules/resources/route53/RecordSet.py\n--- a/src/cfnlint/rules/resources/route53/RecordSet.py\n+++ b/src/cfnlint/rules/resources/route53/RecordSet.py\n@@ -17,9 +17,9 @@\n import re\n from cfnlint import CloudFormationLintRule\n from cfnlint import RuleMatch\n-\n from cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC\n \n+\n class RecordSet(CloudFormationLintRule):\n \"\"\"Check Route53 Recordset Configuration\"\"\"\n id = 'E3020'\n@@ -45,6 +45,7 @@\n ]\n \n REGEX_CNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])(.)$')\n+ REGEX_TXT = re.compile(r'^(\"[^\"]{1,255}\" *)*\"[^\"]{1,255}\"$')\n \n def check_a_record(self, path, recordset):\n \"\"\"Check A record Configuration\"\"\"\n@@ -150,13 +151,15 @@\n for index, record in enumerate(resource_records):\n tree = path[:] + ['ResourceRecords', index]\n \n- if not isinstance(record, dict):\n- if not record.startswith('\"') or not record.endswith('\"'):\n- message = 'TXT record ({}) has to be enclosed in double quotation marks (\")'\n- matches.append(RuleMatch(tree, message.format(record)))\n- elif len(record) > 257: # 2 extra characters for start and end double quotation marks\n- message = 'The length of the TXT record ({}) exceeds the limit (255)'\n- matches.append(RuleMatch(tree, message.format(len(record))))\n+ if not isinstance(record, dict) and not re.match(self.REGEX_TXT, record):\n+ message = 'TXT record is not structured as one or more items up to 255 characters ' \\\n+ 'enclosed in double quotation marks at {0}'\n+ matches.append(RuleMatch(\n+ tree,\n+ (\n+ message.format('/'.join(map(str, tree)))\n+ ),\n+ ))\n \n return matches\n", "issue": "Length limit check on Route53 TXT records doesn't allow multiple values\n*cfn-lint version: (`cfn-lint --version`)* master\r\n\r\n*Description of issue.*\r\n\r\nThe length limit check on TXT records is not quite right. Multiple values of up to 255 characters *are* allowed, separated by spaces.\r\n\r\nThis valid template is thus categorized as invalid:\r\n\r\n```\r\n$ cat test.yml\r\nResources:\r\n Example:\r\n Type: AWS::Route53::RecordSet\r\n Properties:\r\n HostedZoneId: abc123\r\n Name: example.com.\r\n Type: TXT\r\n TTL: '14400'\r\n ResourceRecords:\r\n # 255 \"a\" characters within appropriate quotes, then a \"b\"\r\n - '\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" \"b\"'\r\n$ cfn-lint test.yml\r\nE3020 The length of the TXT record (261) exceeds the limit (255)\r\ntest.yml:9:7\r\n```\r\n\r\nVerified it's valid by creating an equivalent record on the console.\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\nfrom cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC\n\nclass RecordSet(CloudFormationLintRule):\n \"\"\"Check Route53 Recordset Configuration\"\"\"\n id = 'E3020'\n shortdesc = 'Validate Route53 RecordSets'\n description = 'Check if all RecordSets are correctly configured'\n source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html'\n tags = ['resources', 'route53', 'record_set']\n\n # https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html\n VALID_RECORD_TYPES = [\n 'A',\n 'AAAA',\n 'CAA',\n 'CNAME',\n 'MX',\n 'NAPTR',\n 'NS',\n 'PTR',\n 'SOA'\n 'SPF',\n 'SRV',\n 'TXT'\n ]\n\n REGEX_CNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])(.)$')\n\n def check_a_record(self, path, recordset):\n \"\"\"Check A record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n for index, record in enumerate(resource_records):\n\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV4, record):\n message = 'A record ({}) is not a valid IPv4 address'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_aaaa_record(self, path, recordset):\n \"\"\"Check AAAA record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n for index, record in enumerate(resource_records):\n\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV6, record):\n message = 'AAAA record ({}) is not a valid IPv6 address'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_caa_record(self, path, recordset):\n \"\"\"Check CAA record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n\n for index, record in enumerate(resource_records):\n tree = path[:] + ['ResourceRecords', index]\n\n if not isinstance(record, dict):\n # Split the record up to the mandatory settings (flags tag \"value\")\n items = record.split(' ', 2)\n\n # Check if the 3 settings are given.\n if len(items) != 3:\n message = 'CAA record must contain 3 settings (flags tag \"value\"), record contains {} settings.'\n matches.append(RuleMatch(tree, message.format(len(items))))\n else:\n # Check the flag value\n if not items[0].isdigit():\n message = 'CAA record flag setting ({}) should be of type Integer.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n else:\n if int(items[0]) not in [0, 128]:\n message = 'Invalid CAA record flag setting ({}) given, must be 0 or 128.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n\n # Check the tag value\n if not re.match(REGEX_ALPHANUMERIC, items[1]):\n message = 'Invalid CAA record tag setting {}. Value has to be alphanumeric.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n\n # Check the value\n if not items[2].startswith('\"') or not items[2].endswith('\"'):\n message = 'CAA record value setting has to be enclosed in double quotation marks (\").'\n matches.append(RuleMatch(tree, message))\n\n return matches\n\n def check_cname_record(self, path, recordset):\n \"\"\"Check CNAME record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n if len(resource_records) > 1:\n message = 'A CNAME recordset can only contain 1 value'\n matches.append(RuleMatch(path + ['ResourceRecords'], message))\n else:\n for index, record in enumerate(resource_records):\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n if (not re.match(self.REGEX_CNAME, record)\n # ACM Route 53 validation uses invalid CNAMEs starting with `_`,\n # special-case them rather than complicate the regex.\n and not record.endswith('.acm-validations.aws.')):\n message = 'CNAME record ({}) does not contain a valid domain name'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_txt_record(self, path, recordset):\n \"\"\"Check TXT record Configuration\"\"\"\n matches = []\n\n # Check quotation of the records\n resource_records = recordset.get('ResourceRecords')\n\n for index, record in enumerate(resource_records):\n tree = path[:] + ['ResourceRecords', index]\n\n if not isinstance(record, dict):\n if not record.startswith('\"') or not record.endswith('\"'):\n message = 'TXT record ({}) has to be enclosed in double quotation marks (\")'\n matches.append(RuleMatch(tree, message.format(record)))\n elif len(record) > 257: # 2 extra characters for start and end double quotation marks\n message = 'The length of the TXT record ({}) exceeds the limit (255)'\n matches.append(RuleMatch(tree, message.format(len(record))))\n\n return matches\n\n def check_recordset(self, path, recordset):\n \"\"\"Check record configuration\"\"\"\n\n matches = []\n recordset_type = recordset.get('Type')\n\n # Skip Intrinsic functions\n if not isinstance(recordset_type, dict):\n if recordset_type not in self.VALID_RECORD_TYPES:\n message = 'Invalid record type \"{0}\" specified'\n matches.append(RuleMatch(path + ['Type'], message.format(recordset_type)))\n elif not recordset.get('AliasTarget'):\n # Record type specific checks\n if recordset_type == 'A':\n matches.extend(self.check_a_record(path, recordset))\n elif recordset_type == 'AAAA':\n matches.extend(self.check_aaaa_record(path, recordset))\n elif recordset_type == 'CAA':\n matches.extend(self.check_caa_record(path, recordset))\n elif recordset_type == 'CNAME':\n matches.extend(self.check_cname_record(path, recordset))\n elif recordset_type == 'TXT':\n matches.extend(self.check_txt_record(path, recordset))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check RecordSets and RecordSetGroups Properties\"\"\"\n\n matches = []\n\n recordsets = cfn.get_resources(['AWS::Route53::RecordSet'])\n\n for name, recordset in recordsets.items():\n path = ['Resources', name, 'Properties']\n\n if isinstance(recordset, dict):\n props = recordset.get('Properties')\n if props:\n matches.extend(self.check_recordset(path, props))\n\n recordsetgroups = cfn.get_resource_properties(['AWS::Route53::RecordSetGroup', 'RecordSets'])\n\n for recordsetgroup in recordsetgroups:\n path = recordsetgroup['Path']\n value = recordsetgroup['Value']\n if isinstance(value, list):\n for index, recordset in enumerate(value):\n tree = path[:] + [index]\n matches.extend(self.check_recordset(tree, recordset))\n\n return matches\n", "path": "src/cfnlint/rules/resources/route53/RecordSet.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nfrom cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC\n\n\nclass RecordSet(CloudFormationLintRule):\n \"\"\"Check Route53 Recordset Configuration\"\"\"\n id = 'E3020'\n shortdesc = 'Validate Route53 RecordSets'\n description = 'Check if all RecordSets are correctly configured'\n source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html'\n tags = ['resources', 'route53', 'record_set']\n\n # https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html\n VALID_RECORD_TYPES = [\n 'A',\n 'AAAA',\n 'CAA',\n 'CNAME',\n 'MX',\n 'NAPTR',\n 'NS',\n 'PTR',\n 'SOA'\n 'SPF',\n 'SRV',\n 'TXT'\n ]\n\n REGEX_CNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])(.)$')\n REGEX_TXT = re.compile(r'^(\"[^\"]{1,255}\" *)*\"[^\"]{1,255}\"$')\n\n def check_a_record(self, path, recordset):\n \"\"\"Check A record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n for index, record in enumerate(resource_records):\n\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV4, record):\n message = 'A record ({}) is not a valid IPv4 address'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_aaaa_record(self, path, recordset):\n \"\"\"Check AAAA record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n for index, record in enumerate(resource_records):\n\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV6, record):\n message = 'AAAA record ({}) is not a valid IPv6 address'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_caa_record(self, path, recordset):\n \"\"\"Check CAA record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n\n for index, record in enumerate(resource_records):\n tree = path[:] + ['ResourceRecords', index]\n\n if not isinstance(record, dict):\n # Split the record up to the mandatory settings (flags tag \"value\")\n items = record.split(' ', 2)\n\n # Check if the 3 settings are given.\n if len(items) != 3:\n message = 'CAA record must contain 3 settings (flags tag \"value\"), record contains {} settings.'\n matches.append(RuleMatch(tree, message.format(len(items))))\n else:\n # Check the flag value\n if not items[0].isdigit():\n message = 'CAA record flag setting ({}) should be of type Integer.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n else:\n if int(items[0]) not in [0, 128]:\n message = 'Invalid CAA record flag setting ({}) given, must be 0 or 128.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n\n # Check the tag value\n if not re.match(REGEX_ALPHANUMERIC, items[1]):\n message = 'Invalid CAA record tag setting {}. Value has to be alphanumeric.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n\n # Check the value\n if not items[2].startswith('\"') or not items[2].endswith('\"'):\n message = 'CAA record value setting has to be enclosed in double quotation marks (\").'\n matches.append(RuleMatch(tree, message))\n\n return matches\n\n def check_cname_record(self, path, recordset):\n \"\"\"Check CNAME record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n if len(resource_records) > 1:\n message = 'A CNAME recordset can only contain 1 value'\n matches.append(RuleMatch(path + ['ResourceRecords'], message))\n else:\n for index, record in enumerate(resource_records):\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n if (not re.match(self.REGEX_CNAME, record)\n # ACM Route 53 validation uses invalid CNAMEs starting with `_`,\n # special-case them rather than complicate the regex.\n and not record.endswith('.acm-validations.aws.')):\n message = 'CNAME record ({}) does not contain a valid domain name'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_txt_record(self, path, recordset):\n \"\"\"Check TXT record Configuration\"\"\"\n matches = []\n\n # Check quotation of the records\n resource_records = recordset.get('ResourceRecords')\n\n for index, record in enumerate(resource_records):\n tree = path[:] + ['ResourceRecords', index]\n\n if not isinstance(record, dict) and not re.match(self.REGEX_TXT, record):\n message = 'TXT record is not structured as one or more items up to 255 characters ' \\\n 'enclosed in double quotation marks at {0}'\n matches.append(RuleMatch(\n tree,\n (\n message.format('/'.join(map(str, tree)))\n ),\n ))\n\n return matches\n\n def check_recordset(self, path, recordset):\n \"\"\"Check record configuration\"\"\"\n\n matches = []\n recordset_type = recordset.get('Type')\n\n # Skip Intrinsic functions\n if not isinstance(recordset_type, dict):\n if recordset_type not in self.VALID_RECORD_TYPES:\n message = 'Invalid record type \"{0}\" specified'\n matches.append(RuleMatch(path + ['Type'], message.format(recordset_type)))\n elif not recordset.get('AliasTarget'):\n # Record type specific checks\n if recordset_type == 'A':\n matches.extend(self.check_a_record(path, recordset))\n elif recordset_type == 'AAAA':\n matches.extend(self.check_aaaa_record(path, recordset))\n elif recordset_type == 'CAA':\n matches.extend(self.check_caa_record(path, recordset))\n elif recordset_type == 'CNAME':\n matches.extend(self.check_cname_record(path, recordset))\n elif recordset_type == 'TXT':\n matches.extend(self.check_txt_record(path, recordset))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check RecordSets and RecordSetGroups Properties\"\"\"\n\n matches = []\n\n recordsets = cfn.get_resources(['AWS::Route53::RecordSet'])\n\n for name, recordset in recordsets.items():\n path = ['Resources', name, 'Properties']\n\n if isinstance(recordset, dict):\n props = recordset.get('Properties')\n if props:\n matches.extend(self.check_recordset(path, props))\n\n recordsetgroups = cfn.get_resource_properties(['AWS::Route53::RecordSetGroup', 'RecordSets'])\n\n for recordsetgroup in recordsetgroups:\n path = recordsetgroup['Path']\n value = recordsetgroup['Value']\n if isinstance(value, list):\n for index, recordset in enumerate(value):\n tree = path[:] + [index]\n matches.extend(self.check_recordset(tree, recordset))\n\n return matches\n", "path": "src/cfnlint/rules/resources/route53/RecordSet.py"}]} | 3,007 | 553 |
gh_patches_debug_12437 | rasdani/github-patches | git_diff | elastic__ecs-2248 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Generator does not add "ignore_above" property for field type: flattened
<!--
GitHub is reserved for bug reports and feature requests; it is not the place
for general questions. If you have a general question, please visit the
discuss forums: https://discuss.elastic.co/tag/ecs-elastic-common-schema.
Please fill in the following details to help us reproduce the bug:
-->
**Description of the problem including expected versus actual behavior**:
Generator does not write the property: `ignore_above` for field type: `flattened`
**Steps to reproduce**:
Create a field in a field set that is flattened with the ignore above key
```yaml
- name: enrichment_data
level: custom
type: flattened
description: >-
Flattened object of enrichment data.
ignore_above: 1024
```
Please include a *minimal* but *complete* recreation of the problem.
1. Created field set ecs file
2. Added field set to subset.yml
3. Generated artifacts
4. Examine generated component template with flattened field.
**Provide logs (if relevant)**:
Generated component template
```json
{
"_meta": {
"ecs_version": "8.8.0"
},
"template": {
"mappings": {
"properties": {
"enrichment_data": {
"type": "flattened"
}
}
}
}
}
```
**Any additional context:**
This key is necessary so elasticsearch does not try to index fields with large values.
Note: Adding the property after the component template is generated works as a temporary workaround.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/generators/es_template.py`
Content:
```
1 # Licensed to Elasticsearch B.V. under one or more contributor
2 # license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright
4 # ownership. Elasticsearch B.V. licenses this file to you under
5 # the Apache License, Version 2.0 (the "License"); you may
6 # not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17
18 import json
19 import sys
20 from typing import (
21 Dict,
22 List,
23 Optional,
24 Union
25 )
26
27 from os.path import join
28
29 from generators import ecs_helpers
30 from _types import (
31 Field,
32 FieldNestedEntry,
33 )
34
35 # Composable Template
36
37
38 def generate(
39 ecs_nested: Dict[str, FieldNestedEntry],
40 ecs_version: str,
41 out_dir: str,
42 mapping_settings_file: str,
43 template_settings_file: str
44 ) -> None:
45 """This generates all artifacts for the composable template approach"""
46 all_component_templates(ecs_nested, ecs_version, out_dir)
47 component_names = component_name_convention(ecs_version, ecs_nested)
48 save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file, template_settings_file)
49
50
51 def save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file, template_settings_file):
52 mappings_section = mapping_settings(mapping_settings_file)
53 template = template_settings(ecs_version, mappings_section, template_settings_file, component_names=component_names)
54
55 filename = join(out_dir, "elasticsearch/composable/template.json")
56 save_json(filename, template)
57
58
59 def all_component_templates(
60 ecs_nested: Dict[str, FieldNestedEntry],
61 ecs_version: str,
62 out_dir: str
63 ) -> None:
64 """Generate one component template per field set"""
65 component_dir: str = join(out_dir, 'elasticsearch/composable/component')
66 ecs_helpers.make_dirs(component_dir)
67
68 for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():
69 field_mappings = {}
70 for (flat_name, field) in fieldset['fields'].items():
71 name_parts = flat_name.split('.')
72 dict_add_nested(field_mappings, name_parts, entry_for(field))
73
74 save_component_template(fieldset_name, field['level'], ecs_version, component_dir, field_mappings)
75
76
77 def save_component_template(
78 template_name: str,
79 field_level: str,
80 ecs_version: str,
81 out_dir: str,
82 field_mappings: Dict
83 ) -> None:
84 filename: str = join(out_dir, template_name) + ".json"
85 reference_url: str = "https://www.elastic.co/guide/en/ecs/current/ecs-{}.html".format(template_name)
86
87 template: Dict = {
88 'template': {'mappings': {'properties': field_mappings}},
89 '_meta': {
90 'ecs_version': ecs_version,
91 }
92 }
93
94 """Only generate a documentation link for ECS fields"""
95 if (field_level != 'custom'):
96 template['_meta']['documentation'] = reference_url
97
98 save_json(filename, template)
99
100
101 def component_name_convention(
102 ecs_version: str,
103 ecs_nested: Dict[str, FieldNestedEntry]
104 ) -> List[str]:
105 version: str = ecs_version.replace('+', '-')
106 names: List[str] = []
107 for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():
108 names.append("ecs_{}_{}".format(version, fieldset_name.lower()))
109 return names
110
111
112 def candidate_components(ecs_nested: Dict[str, FieldNestedEntry]) -> Dict[str, FieldNestedEntry]:
113 """Returns same structure as ecs_nested, but skips all field sets with reusable.top_level: False"""
114 components: Dict[str, FieldNestedEntry] = {}
115 for (fieldset_name, fieldset) in ecs_nested.items():
116 if fieldset.get('reusable', None):
117 if not fieldset['reusable']['top_level']:
118 continue
119 components[fieldset_name] = fieldset
120 return components
121
122
123 # Legacy template
124
125
126 def generate_legacy(
127 ecs_flat: Dict[str, Field],
128 ecs_version: str,
129 out_dir: str,
130 mapping_settings_file: str,
131 template_settings_file: str
132 ) -> None:
133 """Generate the legacy index template"""
134 field_mappings = {}
135 for flat_name in sorted(ecs_flat):
136 field = ecs_flat[flat_name]
137 name_parts = flat_name.split('.')
138 dict_add_nested(field_mappings, name_parts, entry_for(field))
139
140 mappings_section: Dict = mapping_settings(mapping_settings_file)
141 mappings_section['properties'] = field_mappings
142
143 generate_legacy_template_version(ecs_version, mappings_section, out_dir, template_settings_file)
144
145
146 def generate_legacy_template_version(
147 ecs_version: str,
148 mappings_section: Dict,
149 out_dir: str,
150 template_settings_file: str
151 ) -> None:
152 ecs_helpers.make_dirs(join(out_dir, 'elasticsearch', "legacy"))
153 template: Dict = template_settings(ecs_version, mappings_section, template_settings_file, is_legacy=True)
154
155 filename: str = join(out_dir, "elasticsearch/legacy/template.json")
156 save_json(filename, template)
157
158
159 # Common helpers
160
161
162 def dict_add_nested(
163 dct: Dict,
164 name_parts: List[str],
165 value: Dict
166 ) -> None:
167 current_nesting: str = name_parts[0]
168 rest_name_parts: List[str] = name_parts[1:]
169 if len(rest_name_parts) > 0:
170 dct.setdefault(current_nesting, {})
171 dct[current_nesting].setdefault('properties', {})
172
173 dict_add_nested(
174 dct[current_nesting]['properties'],
175 rest_name_parts,
176 value)
177
178 else:
179 if current_nesting in dct and 'type' in value and 'object' == value['type']:
180 return
181 dct[current_nesting] = value
182
183
184 def entry_for(field: Field) -> Dict:
185 field_entry: Dict = {'type': field['type']}
186 try:
187 if field['type'] == 'object' or field['type'] == 'nested':
188 if 'enabled' in field and not field['enabled']:
189 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['enabled'])
190 # the index field is only valid for field types that are not object and nested
191 elif 'index' in field and not field['index']:
192 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values'])
193
194 if field['type'] == 'keyword':
195 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above'])
196 elif field['type'] == 'constant_keyword':
197 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value'])
198 elif field['type'] == 'text':
199 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['norms'])
200 elif field['type'] == 'alias':
201 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['path'])
202 elif field['type'] == 'scaled_float':
203 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['scaling_factor'])
204
205 if 'multi_fields' in field:
206 field_entry['fields'] = {}
207 for mf in field['multi_fields']:
208 mf_type = mf['type']
209 mf_entry = {'type': mf_type}
210 if mf_type == 'keyword':
211 ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['normalizer', 'ignore_above'])
212 elif mf_type == 'text':
213 ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['norms', 'analyzer'])
214 if 'parameters' in mf:
215 mf_entry.update(mf['parameters'])
216 field_entry['fields'][mf['name']] = mf_entry
217
218 if 'parameters' in field:
219 field_entry.update(field['parameters'])
220
221 except KeyError as ex:
222 print("Exception {} occurred for field {}".format(ex, field))
223 raise ex
224 return field_entry
225
226
227 def mapping_settings(mapping_settings_file: str) -> Dict:
228 if mapping_settings_file:
229 with open(mapping_settings_file) as f:
230 mappings = json.load(f)
231 else:
232 mappings = default_mapping_settings()
233 return mappings
234
235
236 def template_settings(
237 ecs_version: str,
238 mappings_section: Dict,
239 template_settings_file: Union[str, None],
240 is_legacy: Optional[bool] = False,
241 component_names: Optional[List[str]] = None
242 ) -> Dict:
243 if template_settings_file:
244 with open(template_settings_file) as f:
245 template = json.load(f)
246 else:
247 if is_legacy:
248 template = default_legacy_template_settings(ecs_version)
249 else:
250 template = default_template_settings(ecs_version)
251
252 finalize_template(template, ecs_version, is_legacy, mappings_section, component_names)
253
254 return template
255
256
257 def finalize_template(
258 template: Dict,
259 ecs_version: str,
260 is_legacy: bool,
261 mappings_section: Dict,
262 component_names: List[str]
263 ) -> None:
264 if is_legacy:
265 if mappings_section:
266 template['mappings'] = mappings_section
267
268 # _meta can't be at template root in legacy templates, so moving back to mappings section
269 # if present
270 if '_meta' in template:
271 mappings_section['_meta'] = template.pop('_meta')
272
273 else:
274 template['template']['mappings'] = mappings_section
275 template['composed_of'] = component_names
276 template['_meta'] = {
277 "ecs_version": ecs_version,
278 "description": "Sample composable template that includes all ECS fields"
279 }
280
281
282 def save_json(file: str, data: Dict) -> None:
283 open_mode = "wb"
284 if sys.version_info >= (3, 0):
285 open_mode = "w"
286 with open(file, open_mode) as jsonfile:
287 json.dump(data, jsonfile, indent=2, sort_keys=True)
288 jsonfile.write('\n')
289
290
291 def default_template_settings(ecs_version: str) -> Dict:
292 return {
293 "index_patterns": ["try-ecs-*"],
294 "_meta": {
295 "ecs_version": ecs_version,
296 "description": "Sample composable template that includes all ECS fields"
297 },
298 "priority": 1, # Very low, as this is a sample template
299 "template": {
300 "settings": {
301 "index": {
302 "codec": "best_compression",
303 "mapping": {
304 "total_fields": {
305 "limit": 2000
306 }
307 }
308 }
309 },
310 }
311 }
312
313
314 def default_legacy_template_settings(ecs_version: str) -> Dict:
315 return {
316 "index_patterns": ["try-ecs-*"],
317 "_meta": {"version": ecs_version},
318 "order": 1,
319 "settings": {
320 "index": {
321 "mapping": {
322 "total_fields": {
323 "limit": 10000
324 }
325 },
326 "refresh_interval": "5s"
327 }
328 }
329 }
330
331
332 def default_mapping_settings() -> Dict:
333 return {
334 "date_detection": False,
335 "dynamic_templates": [
336 {
337 "strings_as_keyword": {
338 "mapping": {
339 "ignore_above": 1024,
340 "type": "keyword"
341 },
342 "match_mapping_type": "string"
343 }
344 }
345 ]
346 }
347
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/generators/es_template.py b/scripts/generators/es_template.py
--- a/scripts/generators/es_template.py
+++ b/scripts/generators/es_template.py
@@ -191,7 +191,7 @@
elif 'index' in field and not field['index']:
ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values'])
- if field['type'] == 'keyword':
+ if field['type'] == 'keyword' or field['type'] == 'flattened':
ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above'])
elif field['type'] == 'constant_keyword':
ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value'])
| {"golden_diff": "diff --git a/scripts/generators/es_template.py b/scripts/generators/es_template.py\n--- a/scripts/generators/es_template.py\n+++ b/scripts/generators/es_template.py\n@@ -191,7 +191,7 @@\n elif 'index' in field and not field['index']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values'])\n \n- if field['type'] == 'keyword':\n+ if field['type'] == 'keyword' or field['type'] == 'flattened':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above'])\n elif field['type'] == 'constant_keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value'])\n", "issue": "Generator does not add \"ignore_above\" property for field type: flattened\n<!--\r\nGitHub is reserved for bug reports and feature requests; it is not the place\r\nfor general questions. If you have a general question, please visit the\r\ndiscuss forums: https://discuss.elastic.co/tag/ecs-elastic-common-schema.\r\n\r\nPlease fill in the following details to help us reproduce the bug:\r\n-->\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\nGenerator does not write the property: `ignore_above` for field type: `flattened`\r\n\r\n\r\n**Steps to reproduce**:\r\n\r\nCreate a field in a field set that is flattened with the ignore above key\r\n\r\n```yaml\r\n\r\n - name: enrichment_data\r\n level: custom\r\n type: flattened\r\n description: >-\r\n Flattened object of enrichment data.\r\n ignore_above: 1024\r\n\r\n```\r\n\r\nPlease include a *minimal* but *complete* recreation of the problem.\r\n\r\n1. Created field set ecs file\r\n2. Added field set to subset.yml\r\n3. Generated artifacts\r\n4. Examine generated component template with flattened field.\r\n\r\n**Provide logs (if relevant)**:\r\n\r\nGenerated component template\r\n\r\n```json\r\n{\r\n \"_meta\": {\r\n \"ecs_version\": \"8.8.0\"\r\n },\r\n \"template\": {\r\n \"mappings\": {\r\n \"properties\": {\r\n \"enrichment_data\": {\r\n \"type\": \"flattened\"\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\n```\r\n\r\n**Any additional context:**\r\nThis key is necessary so elasticsearch does not try to index fields with large values. \r\n\r\nNote: Adding the property after the component template is generated works as a temporary workaround. \n", "before_files": [{"content": "# Licensed to Elasticsearch B.V. under one or more contributor\n# license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright\n# ownership. Elasticsearch B.V. licenses this file to you under\n# the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# \thttp://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport sys\nfrom typing import (\n Dict,\n List,\n Optional,\n Union\n)\n\nfrom os.path import join\n\nfrom generators import ecs_helpers\nfrom _types import (\n Field,\n FieldNestedEntry,\n)\n\n# Composable Template\n\n\ndef generate(\n ecs_nested: Dict[str, FieldNestedEntry],\n ecs_version: str,\n out_dir: str,\n mapping_settings_file: str,\n template_settings_file: str\n) -> None:\n \"\"\"This generates all artifacts for the composable template approach\"\"\"\n all_component_templates(ecs_nested, ecs_version, out_dir)\n component_names = component_name_convention(ecs_version, ecs_nested)\n save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file, template_settings_file)\n\n\ndef save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file, template_settings_file):\n mappings_section = mapping_settings(mapping_settings_file)\n template = template_settings(ecs_version, mappings_section, template_settings_file, component_names=component_names)\n\n filename = join(out_dir, \"elasticsearch/composable/template.json\")\n save_json(filename, template)\n\n\ndef all_component_templates(\n ecs_nested: Dict[str, FieldNestedEntry],\n ecs_version: str,\n out_dir: str\n) -> None:\n \"\"\"Generate one component template per field set\"\"\"\n component_dir: str = join(out_dir, 'elasticsearch/composable/component')\n ecs_helpers.make_dirs(component_dir)\n\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n field_mappings = {}\n for (flat_name, field) in fieldset['fields'].items():\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n save_component_template(fieldset_name, field['level'], ecs_version, component_dir, field_mappings)\n\n\ndef save_component_template(\n template_name: str,\n field_level: str,\n ecs_version: str,\n out_dir: str,\n field_mappings: Dict\n) -> None:\n filename: str = join(out_dir, template_name) + \".json\"\n reference_url: str = \"https://www.elastic.co/guide/en/ecs/current/ecs-{}.html\".format(template_name)\n\n template: Dict = {\n 'template': {'mappings': {'properties': field_mappings}},\n '_meta': {\n 'ecs_version': ecs_version,\n }\n }\n\n \"\"\"Only generate a documentation link for ECS fields\"\"\"\n if (field_level != 'custom'):\n template['_meta']['documentation'] = reference_url\n\n save_json(filename, template)\n\n\ndef component_name_convention(\n ecs_version: str,\n ecs_nested: Dict[str, FieldNestedEntry]\n) -> List[str]:\n version: str = ecs_version.replace('+', '-')\n names: List[str] = []\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n names.append(\"ecs_{}_{}\".format(version, fieldset_name.lower()))\n return names\n\n\ndef candidate_components(ecs_nested: Dict[str, FieldNestedEntry]) -> Dict[str, FieldNestedEntry]:\n \"\"\"Returns same structure as ecs_nested, but skips all field sets with reusable.top_level: False\"\"\"\n components: Dict[str, FieldNestedEntry] = {}\n for (fieldset_name, fieldset) in ecs_nested.items():\n if fieldset.get('reusable', None):\n if not fieldset['reusable']['top_level']:\n continue\n components[fieldset_name] = fieldset\n return components\n\n\n# Legacy template\n\n\ndef generate_legacy(\n ecs_flat: Dict[str, Field],\n ecs_version: str,\n out_dir: str,\n mapping_settings_file: str,\n template_settings_file: str\n) -> None:\n \"\"\"Generate the legacy index template\"\"\"\n field_mappings = {}\n for flat_name in sorted(ecs_flat):\n field = ecs_flat[flat_name]\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n mappings_section: Dict = mapping_settings(mapping_settings_file)\n mappings_section['properties'] = field_mappings\n\n generate_legacy_template_version(ecs_version, mappings_section, out_dir, template_settings_file)\n\n\ndef generate_legacy_template_version(\n ecs_version: str,\n mappings_section: Dict,\n out_dir: str,\n template_settings_file: str\n) -> None:\n ecs_helpers.make_dirs(join(out_dir, 'elasticsearch', \"legacy\"))\n template: Dict = template_settings(ecs_version, mappings_section, template_settings_file, is_legacy=True)\n\n filename: str = join(out_dir, \"elasticsearch/legacy/template.json\")\n save_json(filename, template)\n\n\n# Common helpers\n\n\ndef dict_add_nested(\n dct: Dict,\n name_parts: List[str],\n value: Dict\n) -> None:\n current_nesting: str = name_parts[0]\n rest_name_parts: List[str] = name_parts[1:]\n if len(rest_name_parts) > 0:\n dct.setdefault(current_nesting, {})\n dct[current_nesting].setdefault('properties', {})\n\n dict_add_nested(\n dct[current_nesting]['properties'],\n rest_name_parts,\n value)\n\n else:\n if current_nesting in dct and 'type' in value and 'object' == value['type']:\n return\n dct[current_nesting] = value\n\n\ndef entry_for(field: Field) -> Dict:\n field_entry: Dict = {'type': field['type']}\n try:\n if field['type'] == 'object' or field['type'] == 'nested':\n if 'enabled' in field and not field['enabled']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['enabled'])\n # the index field is only valid for field types that are not object and nested\n elif 'index' in field and not field['index']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values'])\n\n if field['type'] == 'keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above'])\n elif field['type'] == 'constant_keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value'])\n elif field['type'] == 'text':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['norms'])\n elif field['type'] == 'alias':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['path'])\n elif field['type'] == 'scaled_float':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['scaling_factor'])\n\n if 'multi_fields' in field:\n field_entry['fields'] = {}\n for mf in field['multi_fields']:\n mf_type = mf['type']\n mf_entry = {'type': mf_type}\n if mf_type == 'keyword':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['normalizer', 'ignore_above'])\n elif mf_type == 'text':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['norms', 'analyzer'])\n if 'parameters' in mf:\n mf_entry.update(mf['parameters'])\n field_entry['fields'][mf['name']] = mf_entry\n\n if 'parameters' in field:\n field_entry.update(field['parameters'])\n\n except KeyError as ex:\n print(\"Exception {} occurred for field {}\".format(ex, field))\n raise ex\n return field_entry\n\n\ndef mapping_settings(mapping_settings_file: str) -> Dict:\n if mapping_settings_file:\n with open(mapping_settings_file) as f:\n mappings = json.load(f)\n else:\n mappings = default_mapping_settings()\n return mappings\n\n\ndef template_settings(\n ecs_version: str,\n mappings_section: Dict,\n template_settings_file: Union[str, None],\n is_legacy: Optional[bool] = False,\n component_names: Optional[List[str]] = None\n) -> Dict:\n if template_settings_file:\n with open(template_settings_file) as f:\n template = json.load(f)\n else:\n if is_legacy:\n template = default_legacy_template_settings(ecs_version)\n else:\n template = default_template_settings(ecs_version)\n\n finalize_template(template, ecs_version, is_legacy, mappings_section, component_names)\n\n return template\n\n\ndef finalize_template(\n template: Dict,\n ecs_version: str,\n is_legacy: bool,\n mappings_section: Dict,\n component_names: List[str]\n) -> None:\n if is_legacy:\n if mappings_section:\n template['mappings'] = mappings_section\n\n # _meta can't be at template root in legacy templates, so moving back to mappings section\n # if present\n if '_meta' in template:\n mappings_section['_meta'] = template.pop('_meta')\n\n else:\n template['template']['mappings'] = mappings_section\n template['composed_of'] = component_names\n template['_meta'] = {\n \"ecs_version\": ecs_version,\n \"description\": \"Sample composable template that includes all ECS fields\"\n }\n\n\ndef save_json(file: str, data: Dict) -> None:\n open_mode = \"wb\"\n if sys.version_info >= (3, 0):\n open_mode = \"w\"\n with open(file, open_mode) as jsonfile:\n json.dump(data, jsonfile, indent=2, sort_keys=True)\n jsonfile.write('\\n')\n\n\ndef default_template_settings(ecs_version: str) -> Dict:\n return {\n \"index_patterns\": [\"try-ecs-*\"],\n \"_meta\": {\n \"ecs_version\": ecs_version,\n \"description\": \"Sample composable template that includes all ECS fields\"\n },\n \"priority\": 1, # Very low, as this is a sample template\n \"template\": {\n \"settings\": {\n \"index\": {\n \"codec\": \"best_compression\",\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 2000\n }\n }\n }\n },\n }\n }\n\n\ndef default_legacy_template_settings(ecs_version: str) -> Dict:\n return {\n \"index_patterns\": [\"try-ecs-*\"],\n \"_meta\": {\"version\": ecs_version},\n \"order\": 1,\n \"settings\": {\n \"index\": {\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 10000\n }\n },\n \"refresh_interval\": \"5s\"\n }\n }\n }\n\n\ndef default_mapping_settings() -> Dict:\n return {\n \"date_detection\": False,\n \"dynamic_templates\": [\n {\n \"strings_as_keyword\": {\n \"mapping\": {\n \"ignore_above\": 1024,\n \"type\": \"keyword\"\n },\n \"match_mapping_type\": \"string\"\n }\n }\n ]\n }\n", "path": "scripts/generators/es_template.py"}], "after_files": [{"content": "# Licensed to Elasticsearch B.V. under one or more contributor\n# license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright\n# ownership. Elasticsearch B.V. licenses this file to you under\n# the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# \thttp://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport sys\nfrom typing import (\n Dict,\n List,\n Optional,\n Union\n)\n\nfrom os.path import join\n\nfrom generators import ecs_helpers\nfrom _types import (\n Field,\n FieldNestedEntry,\n)\n\n# Composable Template\n\n\ndef generate(\n ecs_nested: Dict[str, FieldNestedEntry],\n ecs_version: str,\n out_dir: str,\n mapping_settings_file: str,\n template_settings_file: str\n) -> None:\n \"\"\"This generates all artifacts for the composable template approach\"\"\"\n all_component_templates(ecs_nested, ecs_version, out_dir)\n component_names = component_name_convention(ecs_version, ecs_nested)\n save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file, template_settings_file)\n\n\ndef save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file, template_settings_file):\n mappings_section = mapping_settings(mapping_settings_file)\n template = template_settings(ecs_version, mappings_section, template_settings_file, component_names=component_names)\n\n filename = join(out_dir, \"elasticsearch/composable/template.json\")\n save_json(filename, template)\n\n\ndef all_component_templates(\n ecs_nested: Dict[str, FieldNestedEntry],\n ecs_version: str,\n out_dir: str\n) -> None:\n \"\"\"Generate one component template per field set\"\"\"\n component_dir: str = join(out_dir, 'elasticsearch/composable/component')\n ecs_helpers.make_dirs(component_dir)\n\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n field_mappings = {}\n for (flat_name, field) in fieldset['fields'].items():\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n save_component_template(fieldset_name, field['level'], ecs_version, component_dir, field_mappings)\n\n\ndef save_component_template(\n template_name: str,\n field_level: str,\n ecs_version: str,\n out_dir: str,\n field_mappings: Dict\n) -> None:\n filename: str = join(out_dir, template_name) + \".json\"\n reference_url: str = \"https://www.elastic.co/guide/en/ecs/current/ecs-{}.html\".format(template_name)\n\n template: Dict = {\n 'template': {'mappings': {'properties': field_mappings}},\n '_meta': {\n 'ecs_version': ecs_version,\n }\n }\n\n \"\"\"Only generate a documentation link for ECS fields\"\"\"\n if (field_level != 'custom'):\n template['_meta']['documentation'] = reference_url\n\n save_json(filename, template)\n\n\ndef component_name_convention(\n ecs_version: str,\n ecs_nested: Dict[str, FieldNestedEntry]\n) -> List[str]:\n version: str = ecs_version.replace('+', '-')\n names: List[str] = []\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n names.append(\"ecs_{}_{}\".format(version, fieldset_name.lower()))\n return names\n\n\ndef candidate_components(ecs_nested: Dict[str, FieldNestedEntry]) -> Dict[str, FieldNestedEntry]:\n \"\"\"Returns same structure as ecs_nested, but skips all field sets with reusable.top_level: False\"\"\"\n components: Dict[str, FieldNestedEntry] = {}\n for (fieldset_name, fieldset) in ecs_nested.items():\n if fieldset.get('reusable', None):\n if not fieldset['reusable']['top_level']:\n continue\n components[fieldset_name] = fieldset\n return components\n\n\n# Legacy template\n\n\ndef generate_legacy(\n ecs_flat: Dict[str, Field],\n ecs_version: str,\n out_dir: str,\n mapping_settings_file: str,\n template_settings_file: str\n) -> None:\n \"\"\"Generate the legacy index template\"\"\"\n field_mappings = {}\n for flat_name in sorted(ecs_flat):\n field = ecs_flat[flat_name]\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n mappings_section: Dict = mapping_settings(mapping_settings_file)\n mappings_section['properties'] = field_mappings\n\n generate_legacy_template_version(ecs_version, mappings_section, out_dir, template_settings_file)\n\n\ndef generate_legacy_template_version(\n ecs_version: str,\n mappings_section: Dict,\n out_dir: str,\n template_settings_file: str\n) -> None:\n ecs_helpers.make_dirs(join(out_dir, 'elasticsearch', \"legacy\"))\n template: Dict = template_settings(ecs_version, mappings_section, template_settings_file, is_legacy=True)\n\n filename: str = join(out_dir, \"elasticsearch/legacy/template.json\")\n save_json(filename, template)\n\n\n# Common helpers\n\n\ndef dict_add_nested(\n dct: Dict,\n name_parts: List[str],\n value: Dict\n) -> None:\n current_nesting: str = name_parts[0]\n rest_name_parts: List[str] = name_parts[1:]\n if len(rest_name_parts) > 0:\n dct.setdefault(current_nesting, {})\n dct[current_nesting].setdefault('properties', {})\n\n dict_add_nested(\n dct[current_nesting]['properties'],\n rest_name_parts,\n value)\n\n else:\n if current_nesting in dct and 'type' in value and 'object' == value['type']:\n return\n dct[current_nesting] = value\n\n\ndef entry_for(field: Field) -> Dict:\n field_entry: Dict = {'type': field['type']}\n try:\n if field['type'] == 'object' or field['type'] == 'nested':\n if 'enabled' in field and not field['enabled']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['enabled'])\n # the index field is only valid for field types that are not object and nested\n elif 'index' in field and not field['index']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values'])\n\n if field['type'] == 'keyword' or field['type'] == 'flattened':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above'])\n elif field['type'] == 'constant_keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value'])\n elif field['type'] == 'text':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['norms'])\n elif field['type'] == 'alias':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['path'])\n elif field['type'] == 'scaled_float':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['scaling_factor'])\n\n if 'multi_fields' in field:\n field_entry['fields'] = {}\n for mf in field['multi_fields']:\n mf_type = mf['type']\n mf_entry = {'type': mf_type}\n if mf_type == 'keyword':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['normalizer', 'ignore_above'])\n elif mf_type == 'text':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['norms', 'analyzer'])\n if 'parameters' in mf:\n mf_entry.update(mf['parameters'])\n field_entry['fields'][mf['name']] = mf_entry\n\n if 'parameters' in field:\n field_entry.update(field['parameters'])\n\n except KeyError as ex:\n print(\"Exception {} occurred for field {}\".format(ex, field))\n raise ex\n return field_entry\n\n\ndef mapping_settings(mapping_settings_file: str) -> Dict:\n if mapping_settings_file:\n with open(mapping_settings_file) as f:\n mappings = json.load(f)\n else:\n mappings = default_mapping_settings()\n return mappings\n\n\ndef template_settings(\n ecs_version: str,\n mappings_section: Dict,\n template_settings_file: Union[str, None],\n is_legacy: Optional[bool] = False,\n component_names: Optional[List[str]] = None\n) -> Dict:\n if template_settings_file:\n with open(template_settings_file) as f:\n template = json.load(f)\n else:\n if is_legacy:\n template = default_legacy_template_settings(ecs_version)\n else:\n template = default_template_settings(ecs_version)\n\n finalize_template(template, ecs_version, is_legacy, mappings_section, component_names)\n\n return template\n\n\ndef finalize_template(\n template: Dict,\n ecs_version: str,\n is_legacy: bool,\n mappings_section: Dict,\n component_names: List[str]\n) -> None:\n if is_legacy:\n if mappings_section:\n template['mappings'] = mappings_section\n\n # _meta can't be at template root in legacy templates, so moving back to mappings section\n # if present\n if '_meta' in template:\n mappings_section['_meta'] = template.pop('_meta')\n\n else:\n template['template']['mappings'] = mappings_section\n template['composed_of'] = component_names\n template['_meta'] = {\n \"ecs_version\": ecs_version,\n \"description\": \"Sample composable template that includes all ECS fields\"\n }\n\n\ndef save_json(file: str, data: Dict) -> None:\n open_mode = \"wb\"\n if sys.version_info >= (3, 0):\n open_mode = \"w\"\n with open(file, open_mode) as jsonfile:\n json.dump(data, jsonfile, indent=2, sort_keys=True)\n jsonfile.write('\\n')\n\n\ndef default_template_settings(ecs_version: str) -> Dict:\n return {\n \"index_patterns\": [\"try-ecs-*\"],\n \"_meta\": {\n \"ecs_version\": ecs_version,\n \"description\": \"Sample composable template that includes all ECS fields\"\n },\n \"priority\": 1, # Very low, as this is a sample template\n \"template\": {\n \"settings\": {\n \"index\": {\n \"codec\": \"best_compression\",\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 2000\n }\n }\n }\n },\n }\n }\n\n\ndef default_legacy_template_settings(ecs_version: str) -> Dict:\n return {\n \"index_patterns\": [\"try-ecs-*\"],\n \"_meta\": {\"version\": ecs_version},\n \"order\": 1,\n \"settings\": {\n \"index\": {\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 10000\n }\n },\n \"refresh_interval\": \"5s\"\n }\n }\n }\n\n\ndef default_mapping_settings() -> Dict:\n return {\n \"date_detection\": False,\n \"dynamic_templates\": [\n {\n \"strings_as_keyword\": {\n \"mapping\": {\n \"ignore_above\": 1024,\n \"type\": \"keyword\"\n },\n \"match_mapping_type\": \"string\"\n }\n }\n ]\n }\n", "path": "scripts/generators/es_template.py"}]} | 4,050 | 162 |
gh_patches_debug_11364 | rasdani/github-patches | git_diff | praw-dev__praw-1259 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PRAW raises prawcore.exceptions.BadRequest when attempting to call send_removal_message()
## Issue Description
Any time I try to use `send_removal_message()`, PRAW gives me a 400 error. Looking at the official Reddit API, it appears the endpoints for that function have been removed.
**Attempt:**
```python
PS C:\Users\Michael\nobackup\Code\Projects\myproject> py
Python 3.7.0 (v3.7.0:1bf9cc5093, Jun 27 2018, 04:06:47) [MSC v.1914 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> import praw
>>> reddit = praw.Reddit(user_agent='[my user agent]')
>>> sub = reddit.comment('[a comment I have the ability to moderate]')
>>> sub
Comment(id='[same as before]')
>>> sub.body
'Test comment'
>>> sub.mod
<praw.models.reddit.comment.CommentModeration object at 0x00F20EF0>
>>> sub.mod.send_removal_message
<bound method ThingModerationMixin.send_removal_message of <praw.models.reddit.comment.CommentModeration object at 0x00F20EF0>>
>>> sub.mod.send_removal_message('hi')
{"item_id": ["t1_ew39cg7"], "message": "hi", "title": "ignored", "type": "public"} # I added this for debugging
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\Michael\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\models\reddit\mixins\__init__.py", line 174, in send_removal_message
return self.thing._reddit.post(url, data={"json": dumps(data)}) or None
File "C:\Users\Michael\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\reddit.py", line 523, in post
"POST", path, data=data or {}, files=files, params=params
File "C:\Users\Michael\AppData\Local\Programs\Python\Python37-32\lib\site-packages\praw\reddit.py", line 577, in request
method, path, data=data, files=files, params=params
File "C:\Users\Michael\AppData\Local\Programs\Python\Python37-32\lib\site-packages\prawcore\sessions.py", line 185, in request
params=params, url=url)
File "C:\Users\Michael\AppData\Local\Programs\Python\Python37-32\lib\site-packages\prawcore\sessions.py", line 130, in _request_with_retries
raise self.STATUS_EXCEPTIONS[response.status_code](response)
prawcore.exceptions.BadRequest: received 400 HTTP response
>>>
```
## System Information
- PRAW Version: 6.3.1
- Python Version: 3.7.0
- Operating System: Windows 10 x64
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `praw/models/reddit/mixins/__init__.py`
Content:
```
1 """Package providing reddit class mixins."""
2 from json import dumps
3
4 from ....const import API_PATH
5 from .editable import EditableMixin
6 from .fullname import FullnameMixin
7 from .gildable import GildableMixin
8 from .inboxable import InboxableMixin
9 from .inboxtoggleable import InboxToggleableMixin
10 from .messageable import MessageableMixin
11 from .replyable import ReplyableMixin
12 from .reportable import ReportableMixin
13 from .savable import SavableMixin
14 from .votable import VotableMixin
15
16
17 class ThingModerationMixin:
18 """Provides moderation methods for Comments and Submissions."""
19
20 REMOVAL_MESSAGE_API = None
21
22 def _add_removal_reason(self, mod_note="", reason_id=None):
23 """Add a removal reason for a Comment or Submission.
24
25 :param mod_note: A message for the other moderators.
26 :param reason_id: The removal reason ID.
27
28 It is necessary to first call :meth:`~.remove` on the
29 :class:`~.Comment` or :class:`~.Submission`.
30
31 If ``reason_id`` is not specified, ``mod_note`` cannot be blank.
32
33 """
34 if not reason_id and not mod_note:
35 raise ValueError(
36 "mod_note cannot be blank if reason_id is not specified"
37 )
38 # Only the first element of the item_id list is used.
39 data = {
40 "item_ids": [self.thing.fullname],
41 "mod_note": mod_note,
42 "reason_id": reason_id,
43 }
44 self.thing._reddit.post(
45 API_PATH["removal_reasons"], data={"json": dumps(data)}
46 )
47
48 def approve(self):
49 """Approve a :class:`~.Comment` or :class:`~.Submission`.
50
51 Approving a comment or submission reverts a removal, resets the report
52 counter, adds a green check mark indicator (only visible to other
53 moderators) on the website view, and sets the ``approved_by`` attribute
54 to the authenticated user.
55
56 Example usage:
57
58 .. code-block:: python
59
60 # approve a comment:
61 comment = reddit.comment('dkk4qjd')
62 comment.mod.approve()
63 # approve a submission:
64 submission = reddit.submission(id='5or86n')
65 submission.mod.approve()
66
67 """
68 self.thing._reddit.post(
69 API_PATH["approve"], data={"id": self.thing.fullname}
70 )
71
72 def distinguish(self, how="yes", sticky=False):
73 """Distinguish a :class:`~.Comment` or :class:`~.Submission`.
74
75 :param how: One of 'yes', 'no', 'admin', 'special'. 'yes' adds a
76 moderator level distinguish. 'no' removes any distinction. 'admin'
77 and 'special' require special user privileges to use.
78 :param sticky: Comment is stickied if ``True``, placing it at the top
79 of the comment page regardless of score. If thing is not a
80 top-level comment, this parameter is silently ignored.
81
82 Example usage:
83
84 .. code-block:: python
85
86 # distinguish and sticky a comment:
87 comment = reddit.comment('dkk4qjd')
88 comment.mod.distinguish(how='yes', sticky=True)
89 # undistinguish a submission:
90 submission = reddit.submission(id='5or86n')
91 submission.mod.distinguish(how='no')
92
93 See also :meth:`~.undistinguish`
94
95 """
96 data = {"how": how, "id": self.thing.fullname}
97 if sticky and getattr(self.thing, "is_root", False):
98 data["sticky"] = True
99 self.thing._reddit.post(API_PATH["distinguish"], data=data)
100
101 def ignore_reports(self):
102 """Ignore future reports on a :class:`~.Comment` or :class:`~.Submission`.
103
104 Calling this method will prevent future reports on this Comment or
105 Submission from both triggering notifications and appearing in the
106 various moderation listings. The report count will still increment on
107 the Comment or Submission.
108
109 Example usage:
110
111 .. code-block:: python
112
113 # ignore future reports on a comment:
114 comment = reddit.comment('dkk4qjd')
115 comment.mod.ignore_reports()
116 # ignore future reports on a submission:
117 submission = reddit.submission(id='5or86n')
118 submission.mod.ignore_reports()
119
120 See also :meth:`~.unignore_reports`
121
122 """
123 self.thing._reddit.post(
124 API_PATH["ignore_reports"], data={"id": self.thing.fullname}
125 )
126
127 def lock(self):
128 """Lock a :class:`~.Comment` or :class:`~.Submission`.
129
130 Example usage:
131
132 .. code-block:: python
133
134 # lock a comment:
135 comment = reddit.comment('dkk4qjd')
136 comment.mod.lock()
137 # lock a submission:
138 submission = reddit.submission(id='5or86n')
139 submission.mod.lock()
140
141 See also :meth:`~.unlock`
142
143 """
144 self.thing._reddit.post(
145 API_PATH["lock"], data={"id": self.thing.fullname}
146 )
147
148 def remove(self, spam=False, mod_note="", reason_id=None):
149 """Remove a :class:`~.Comment` or :class:`~.Submission`.
150
151 :param mod_note: A message for the other moderators.
152 :param spam: When True, use the removal to help train the Subreddit's
153 spam filter (default: False).
154 :param reason_id: The removal reason ID.
155
156 If either ``reason_id`` or ``mod_note`` are provided, a second API
157 call is made to add the removal reason.
158
159 Example usage:
160
161 .. code-block:: python
162
163 # remove a comment and mark as spam:
164 comment = reddit.comment('dkk4qjd')
165 comment.mod.remove(spam=True)
166 # remove a submission
167 submission = reddit.submission(id='5or86n')
168 submission.mod.remove()
169 # remove a submission with a removal reason
170 reason = reddit.subreddit.mod.removal_reasons["110ni21zo23ql"]
171 submission = reddit.submission(id="5or86n")
172 submission.mod.remove(reason_id=reason.id)
173
174 """
175 data = {"id": self.thing.fullname, "spam": bool(spam)}
176 self.thing._reddit.post(API_PATH["remove"], data=data)
177 if any([reason_id, mod_note]):
178 self._add_removal_reason(mod_note, reason_id)
179
180 def send_removal_message(
181 self,
182 message,
183 title="ignored",
184 type="public", # pylint: disable=redefined-builtin
185 ):
186 """Send a removal message for a :class:`~.Comment` or :class:`~.Submission`.
187
188 Reddit adds human-readable information about the object to the message.
189
190 :param type: One of 'public', 'private', 'private_exposed'.
191 'public' leaves a stickied comment on the post.
192 'private' sends a Modmail message with hidden username.
193 'private_exposed' sends a Modmail message without hidden username.
194 :param title: The short reason given in the message.
195 (Ignored if type is 'public'.)
196 :param message: The body of the message.
197
198 If ``type`` is 'public', the new :class:`~.Comment` is returned.
199 """
200 # The API endpoint used to send removal messages is different
201 # for posts and comments, so the derived classes specify which one.
202 if self.REMOVAL_MESSAGE_API is None:
203 raise NotImplementedError("ThingModerationMixin must be extended.")
204 url = API_PATH[self.REMOVAL_MESSAGE_API]
205
206 # Only the first element of the item_id list is used.
207 data = {
208 "item_id": [self.thing.fullname],
209 "message": message,
210 "title": title,
211 "type": type,
212 }
213
214 return self.thing._reddit.post(url, data={"json": dumps(data)}) or None
215
216 def undistinguish(self):
217 """Remove mod, admin, or special distinguishing from an object.
218
219 Also unstickies the object if applicable.
220
221 Example usage:
222
223 .. code-block:: python
224
225 # undistinguish a comment:
226 comment = reddit.comment('dkk4qjd')
227 comment.mod.undistinguish()
228 # undistinguish a submission:
229 submission = reddit.submission(id='5or86n')
230 submission.mod.undistinguish()
231
232 See also :meth:`~.distinguish`
233
234 """
235 self.distinguish(how="no")
236
237 def unignore_reports(self):
238 """Resume receiving future reports on a Comment or Submission.
239
240 Future reports on this :class:`~.Comment` or :class:`~.Submission`
241 will cause notifications, and appear in the various moderation
242 listings.
243
244 Example usage:
245
246 .. code-block:: python
247
248 # accept future reports on a comment:
249 comment = reddit.comment('dkk4qjd')
250 comment.mod.unignore_reports()
251 # accept future reports on a submission:
252 submission = reddit.submission(id='5or86n')
253 submission.mod.unignore_reports()
254
255 See also :meth:`~.ignore_reports`
256
257 """
258 self.thing._reddit.post(
259 API_PATH["unignore_reports"], data={"id": self.thing.fullname}
260 )
261
262 def unlock(self):
263 """Unlock a :class:`~.Comment` or :class:`~.Submission`.
264
265 Example usage:
266
267 # unlock a comment:
268 comment = reddit.comment('dkk4qjd')
269 comment.mod.unlock()
270 # unlock a submission:
271 submission = reddit.submission(id='5or86n')
272 submission.mod.unlock()
273
274 See also :meth:`~.lock`
275
276 """
277 self.thing._reddit.post(
278 API_PATH["unlock"], data={"id": self.thing.fullname}
279 )
280
281
282 class UserContentMixin(
283 EditableMixin,
284 GildableMixin,
285 InboxToggleableMixin,
286 ReplyableMixin,
287 ReportableMixin,
288 SavableMixin,
289 VotableMixin,
290 ):
291 """A convenience mixin that applies to both Comments and Submissions."""
292
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/praw/models/reddit/mixins/__init__.py b/praw/models/reddit/mixins/__init__.py
--- a/praw/models/reddit/mixins/__init__.py
+++ b/praw/models/reddit/mixins/__init__.py
@@ -184,6 +184,11 @@
):
"""Send a removal message for a :class:`~.Comment` or :class:`~.Submission`.
+ .. warning:: The object has to be removed before giving it a removal
+ reason. Remove the object with :meth:`.remove`. Trying to add a
+ removal reason without removing the object will result in
+ ``prawcore.exceptions.BadRequest`` being thrown.
+
Reddit adds human-readable information about the object to the message.
:param type: One of 'public', 'private', 'private_exposed'.
| {"golden_diff": "diff --git a/praw/models/reddit/mixins/__init__.py b/praw/models/reddit/mixins/__init__.py\n--- a/praw/models/reddit/mixins/__init__.py\n+++ b/praw/models/reddit/mixins/__init__.py\n@@ -184,6 +184,11 @@\n ):\n \"\"\"Send a removal message for a :class:`~.Comment` or :class:`~.Submission`.\n \n+ .. warning:: The object has to be removed before giving it a removal\n+ reason. Remove the object with :meth:`.remove`. Trying to add a\n+ removal reason without removing the object will result in\n+ ``prawcore.exceptions.BadRequest`` being thrown.\n+\n Reddit adds human-readable information about the object to the message.\n \n :param type: One of 'public', 'private', 'private_exposed'.\n", "issue": "PRAW raises prawcore.exceptions.BadRequest when attempting to call send_removal_message()\n## Issue Description\r\n\r\nAny time I try to use `send_removal_message()`, PRAW gives me a 400 error. Looking at the official Reddit API, it appears the endpoints for that function have been removed.\r\n\r\n**Attempt:**\r\n```python\r\nPS C:\\Users\\Michael\\nobackup\\Code\\Projects\\myproject> py\r\nPython 3.7.0 (v3.7.0:1bf9cc5093, Jun 27 2018, 04:06:47) [MSC v.1914 32 bit (Intel)] on win32\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import praw\r\n>>> reddit = praw.Reddit(user_agent='[my user agent]')\r\n>>> sub = reddit.comment('[a comment I have the ability to moderate]')\r\n>>> sub\r\nComment(id='[same as before]')\r\n>>> sub.body\r\n'Test comment'\r\n>>> sub.mod\r\n<praw.models.reddit.comment.CommentModeration object at 0x00F20EF0>\r\n>>> sub.mod.send_removal_message\r\n<bound method ThingModerationMixin.send_removal_message of <praw.models.reddit.comment.CommentModeration object at 0x00F20EF0>>\r\n>>> sub.mod.send_removal_message('hi')\r\n{\"item_id\": [\"t1_ew39cg7\"], \"message\": \"hi\", \"title\": \"ignored\", \"type\": \"public\"} # I added this for debugging\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python37-32\\lib\\site-packages\\praw\\models\\reddit\\mixins\\__init__.py\", line 174, in send_removal_message\r\n return self.thing._reddit.post(url, data={\"json\": dumps(data)}) or None\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python37-32\\lib\\site-packages\\praw\\reddit.py\", line 523, in post\r\n \"POST\", path, data=data or {}, files=files, params=params\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python37-32\\lib\\site-packages\\praw\\reddit.py\", line 577, in request\r\n method, path, data=data, files=files, params=params\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python37-32\\lib\\site-packages\\prawcore\\sessions.py\", line 185, in request\r\n params=params, url=url)\r\n File \"C:\\Users\\Michael\\AppData\\Local\\Programs\\Python\\Python37-32\\lib\\site-packages\\prawcore\\sessions.py\", line 130, in _request_with_retries\r\n raise self.STATUS_EXCEPTIONS[response.status_code](response)\r\nprawcore.exceptions.BadRequest: received 400 HTTP response\r\n>>>\r\n```\r\n\r\n## System Information\r\n\r\n- PRAW Version: 6.3.1\r\n- Python Version: 3.7.0\r\n- Operating System: Windows 10 x64\r\n\n", "before_files": [{"content": "\"\"\"Package providing reddit class mixins.\"\"\"\nfrom json import dumps\n\nfrom ....const import API_PATH\nfrom .editable import EditableMixin\nfrom .fullname import FullnameMixin\nfrom .gildable import GildableMixin\nfrom .inboxable import InboxableMixin\nfrom .inboxtoggleable import InboxToggleableMixin\nfrom .messageable import MessageableMixin\nfrom .replyable import ReplyableMixin\nfrom .reportable import ReportableMixin\nfrom .savable import SavableMixin\nfrom .votable import VotableMixin\n\n\nclass ThingModerationMixin:\n \"\"\"Provides moderation methods for Comments and Submissions.\"\"\"\n\n REMOVAL_MESSAGE_API = None\n\n def _add_removal_reason(self, mod_note=\"\", reason_id=None):\n \"\"\"Add a removal reason for a Comment or Submission.\n\n :param mod_note: A message for the other moderators.\n :param reason_id: The removal reason ID.\n\n It is necessary to first call :meth:`~.remove` on the\n :class:`~.Comment` or :class:`~.Submission`.\n\n If ``reason_id`` is not specified, ``mod_note`` cannot be blank.\n\n \"\"\"\n if not reason_id and not mod_note:\n raise ValueError(\n \"mod_note cannot be blank if reason_id is not specified\"\n )\n # Only the first element of the item_id list is used.\n data = {\n \"item_ids\": [self.thing.fullname],\n \"mod_note\": mod_note,\n \"reason_id\": reason_id,\n }\n self.thing._reddit.post(\n API_PATH[\"removal_reasons\"], data={\"json\": dumps(data)}\n )\n\n def approve(self):\n \"\"\"Approve a :class:`~.Comment` or :class:`~.Submission`.\n\n Approving a comment or submission reverts a removal, resets the report\n counter, adds a green check mark indicator (only visible to other\n moderators) on the website view, and sets the ``approved_by`` attribute\n to the authenticated user.\n\n Example usage:\n\n .. code-block:: python\n\n # approve a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.approve()\n # approve a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.approve()\n\n \"\"\"\n self.thing._reddit.post(\n API_PATH[\"approve\"], data={\"id\": self.thing.fullname}\n )\n\n def distinguish(self, how=\"yes\", sticky=False):\n \"\"\"Distinguish a :class:`~.Comment` or :class:`~.Submission`.\n\n :param how: One of 'yes', 'no', 'admin', 'special'. 'yes' adds a\n moderator level distinguish. 'no' removes any distinction. 'admin'\n and 'special' require special user privileges to use.\n :param sticky: Comment is stickied if ``True``, placing it at the top\n of the comment page regardless of score. If thing is not a\n top-level comment, this parameter is silently ignored.\n\n Example usage:\n\n .. code-block:: python\n\n # distinguish and sticky a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.distinguish(how='yes', sticky=True)\n # undistinguish a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.distinguish(how='no')\n\n See also :meth:`~.undistinguish`\n\n \"\"\"\n data = {\"how\": how, \"id\": self.thing.fullname}\n if sticky and getattr(self.thing, \"is_root\", False):\n data[\"sticky\"] = True\n self.thing._reddit.post(API_PATH[\"distinguish\"], data=data)\n\n def ignore_reports(self):\n \"\"\"Ignore future reports on a :class:`~.Comment` or :class:`~.Submission`.\n\n Calling this method will prevent future reports on this Comment or\n Submission from both triggering notifications and appearing in the\n various moderation listings. The report count will still increment on\n the Comment or Submission.\n\n Example usage:\n\n .. code-block:: python\n\n # ignore future reports on a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.ignore_reports()\n # ignore future reports on a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.ignore_reports()\n\n See also :meth:`~.unignore_reports`\n\n \"\"\"\n self.thing._reddit.post(\n API_PATH[\"ignore_reports\"], data={\"id\": self.thing.fullname}\n )\n\n def lock(self):\n \"\"\"Lock a :class:`~.Comment` or :class:`~.Submission`.\n\n Example usage:\n\n .. code-block:: python\n\n # lock a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.lock()\n # lock a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.lock()\n\n See also :meth:`~.unlock`\n\n \"\"\"\n self.thing._reddit.post(\n API_PATH[\"lock\"], data={\"id\": self.thing.fullname}\n )\n\n def remove(self, spam=False, mod_note=\"\", reason_id=None):\n \"\"\"Remove a :class:`~.Comment` or :class:`~.Submission`.\n\n :param mod_note: A message for the other moderators.\n :param spam: When True, use the removal to help train the Subreddit's\n spam filter (default: False).\n :param reason_id: The removal reason ID.\n\n If either ``reason_id`` or ``mod_note`` are provided, a second API\n call is made to add the removal reason.\n\n Example usage:\n\n .. code-block:: python\n\n # remove a comment and mark as spam:\n comment = reddit.comment('dkk4qjd')\n comment.mod.remove(spam=True)\n # remove a submission\n submission = reddit.submission(id='5or86n')\n submission.mod.remove()\n # remove a submission with a removal reason\n reason = reddit.subreddit.mod.removal_reasons[\"110ni21zo23ql\"]\n submission = reddit.submission(id=\"5or86n\")\n submission.mod.remove(reason_id=reason.id)\n\n \"\"\"\n data = {\"id\": self.thing.fullname, \"spam\": bool(spam)}\n self.thing._reddit.post(API_PATH[\"remove\"], data=data)\n if any([reason_id, mod_note]):\n self._add_removal_reason(mod_note, reason_id)\n\n def send_removal_message(\n self,\n message,\n title=\"ignored\",\n type=\"public\", # pylint: disable=redefined-builtin\n ):\n \"\"\"Send a removal message for a :class:`~.Comment` or :class:`~.Submission`.\n\n Reddit adds human-readable information about the object to the message.\n\n :param type: One of 'public', 'private', 'private_exposed'.\n 'public' leaves a stickied comment on the post.\n 'private' sends a Modmail message with hidden username.\n 'private_exposed' sends a Modmail message without hidden username.\n :param title: The short reason given in the message.\n (Ignored if type is 'public'.)\n :param message: The body of the message.\n\n If ``type`` is 'public', the new :class:`~.Comment` is returned.\n \"\"\"\n # The API endpoint used to send removal messages is different\n # for posts and comments, so the derived classes specify which one.\n if self.REMOVAL_MESSAGE_API is None:\n raise NotImplementedError(\"ThingModerationMixin must be extended.\")\n url = API_PATH[self.REMOVAL_MESSAGE_API]\n\n # Only the first element of the item_id list is used.\n data = {\n \"item_id\": [self.thing.fullname],\n \"message\": message,\n \"title\": title,\n \"type\": type,\n }\n\n return self.thing._reddit.post(url, data={\"json\": dumps(data)}) or None\n\n def undistinguish(self):\n \"\"\"Remove mod, admin, or special distinguishing from an object.\n\n Also unstickies the object if applicable.\n\n Example usage:\n\n .. code-block:: python\n\n # undistinguish a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.undistinguish()\n # undistinguish a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.undistinguish()\n\n See also :meth:`~.distinguish`\n\n \"\"\"\n self.distinguish(how=\"no\")\n\n def unignore_reports(self):\n \"\"\"Resume receiving future reports on a Comment or Submission.\n\n Future reports on this :class:`~.Comment` or :class:`~.Submission`\n will cause notifications, and appear in the various moderation\n listings.\n\n Example usage:\n\n .. code-block:: python\n\n # accept future reports on a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.unignore_reports()\n # accept future reports on a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.unignore_reports()\n\n See also :meth:`~.ignore_reports`\n\n \"\"\"\n self.thing._reddit.post(\n API_PATH[\"unignore_reports\"], data={\"id\": self.thing.fullname}\n )\n\n def unlock(self):\n \"\"\"Unlock a :class:`~.Comment` or :class:`~.Submission`.\n\n Example usage:\n\n # unlock a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.unlock()\n # unlock a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.unlock()\n\n See also :meth:`~.lock`\n\n \"\"\"\n self.thing._reddit.post(\n API_PATH[\"unlock\"], data={\"id\": self.thing.fullname}\n )\n\n\nclass UserContentMixin(\n EditableMixin,\n GildableMixin,\n InboxToggleableMixin,\n ReplyableMixin,\n ReportableMixin,\n SavableMixin,\n VotableMixin,\n):\n \"\"\"A convenience mixin that applies to both Comments and Submissions.\"\"\"\n", "path": "praw/models/reddit/mixins/__init__.py"}], "after_files": [{"content": "\"\"\"Package providing reddit class mixins.\"\"\"\nfrom json import dumps\nfrom ....const import API_PATH\nfrom .editable import EditableMixin\nfrom .fullname import FullnameMixin\nfrom .gildable import GildableMixin\nfrom .inboxable import InboxableMixin\nfrom .inboxtoggleable import InboxToggleableMixin\nfrom .messageable import MessageableMixin\nfrom .replyable import ReplyableMixin\nfrom .reportable import ReportableMixin\nfrom .savable import SavableMixin\nfrom .votable import VotableMixin\n\n\nclass ThingModerationMixin:\n \"\"\"Provides moderation methods for Comments and Submissions.\"\"\"\n\n REMOVAL_MESSAGE_API = None\n\n def _add_removal_reason(self, mod_note=\"\", reason_id=None):\n \"\"\"Add a removal reason for a Comment or Submission.\n\n :param mod_note: A message for the other moderators.\n :param reason_id: The removal reason ID.\n\n It is necessary to first call :meth:`~.remove` on the\n :class:`~.Comment` or :class:`~.Submission`.\n\n If ``reason_id`` is not specified, ``mod_note`` cannot be blank.\n\n \"\"\"\n if not reason_id and not mod_note:\n raise ValueError(\n \"mod_note cannot be blank if reason_id is not specified\"\n )\n # Only the first element of the item_id list is used.\n data = {\n \"item_ids\": [self.thing.fullname],\n \"mod_note\": mod_note,\n \"reason_id\": reason_id,\n }\n self.thing._reddit.post(\n API_PATH[\"removal_reasons\"], data={\"json\": dumps(data)}\n )\n\n def approve(self):\n \"\"\"Approve a :class:`~.Comment` or :class:`~.Submission`.\n\n Approving a comment or submission reverts a removal, resets the report\n counter, adds a green check mark indicator (only visible to other\n moderators) on the website view, and sets the ``approved_by`` attribute\n to the authenticated user.\n\n Example usage:\n\n .. code-block:: python\n\n # approve a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.approve()\n # approve a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.approve()\n\n \"\"\"\n self.thing._reddit.post(\n API_PATH[\"approve\"], data={\"id\": self.thing.fullname}\n )\n\n def distinguish(self, how=\"yes\", sticky=False):\n \"\"\"Distinguish a :class:`~.Comment` or :class:`~.Submission`.\n\n :param how: One of 'yes', 'no', 'admin', 'special'. 'yes' adds a\n moderator level distinguish. 'no' removes any distinction. 'admin'\n and 'special' require special user privileges to use.\n :param sticky: Comment is stickied if ``True``, placing it at the top\n of the comment page regardless of score. If thing is not a\n top-level comment, this parameter is silently ignored.\n\n Example usage:\n\n .. code-block:: python\n\n # distinguish and sticky a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.distinguish(how='yes', sticky=True)\n # undistinguish a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.distinguish(how='no')\n\n See also :meth:`~.undistinguish`\n\n \"\"\"\n data = {\"how\": how, \"id\": self.thing.fullname}\n if sticky and getattr(self.thing, \"is_root\", False):\n data[\"sticky\"] = True\n self.thing._reddit.post(API_PATH[\"distinguish\"], data=data)\n\n def ignore_reports(self):\n \"\"\"Ignore future reports on a :class:`~.Comment` or :class:`~.Submission`.\n\n Calling this method will prevent future reports on this Comment or\n Submission from both triggering notifications and appearing in the\n various moderation listings. The report count will still increment on\n the Comment or Submission.\n\n Example usage:\n\n .. code-block:: python\n\n # ignore future reports on a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.ignore_reports()\n # ignore future reports on a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.ignore_reports()\n\n See also :meth:`~.unignore_reports`\n\n \"\"\"\n self.thing._reddit.post(\n API_PATH[\"ignore_reports\"], data={\"id\": self.thing.fullname}\n )\n\n def lock(self):\n \"\"\"Lock a :class:`~.Comment` or :class:`~.Submission`.\n\n Example usage:\n\n .. code-block:: python\n\n # lock a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.lock()\n # lock a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.lock()\n\n See also :meth:`~.unlock`\n\n \"\"\"\n self.thing._reddit.post(\n API_PATH[\"lock\"], data={\"id\": self.thing.fullname}\n )\n\n def remove(self, spam=False, mod_note=\"\", reason_id=None):\n \"\"\"Remove a :class:`~.Comment` or :class:`~.Submission`.\n\n :param mod_note: A message for the other moderators.\n :param spam: When True, use the removal to help train the Subreddit's\n spam filter (default: False).\n :param reason_id: The removal reason ID.\n\n If either ``reason_id`` or ``mod_note`` are provided, a second API\n call is made to add the removal reason.\n\n Example usage:\n\n .. code-block:: python\n\n # remove a comment and mark as spam:\n comment = reddit.comment('dkk4qjd')\n comment.mod.remove(spam=True)\n # remove a submission\n submission = reddit.submission(id='5or86n')\n submission.mod.remove()\n # remove a submission with a removal reason\n reason = reddit.subreddit.mod.removal_reasons[\"110ni21zo23ql\"]\n submission = reddit.submission(id=\"5or86n\")\n submission.mod.remove(reason_id=reason.id)\n\n \"\"\"\n data = {\"id\": self.thing.fullname, \"spam\": bool(spam)}\n self.thing._reddit.post(API_PATH[\"remove\"], data=data)\n if any([reason_id, mod_note]):\n self._add_removal_reason(mod_note, reason_id)\n\n def send_removal_message(\n self,\n message,\n title=\"ignored\",\n type=\"public\", # pylint: disable=redefined-builtin\n ):\n \"\"\"Send a removal message for a :class:`~.Comment` or :class:`~.Submission`.\n\n .. warning:: The object has to be removed before giving it a removal\n reason. Remove the object with :meth:`.remove`. Trying to add a\n removal reason without removing the object will result in\n ``prawcore.exceptions.BadRequest`` being thrown.\n\n Reddit adds human-readable information about the object to the message.\n\n :param type: One of 'public', 'private', 'private_exposed'.\n 'public' leaves a stickied comment on the post.\n 'private' sends a Modmail message with hidden username.\n 'private_exposed' sends a Modmail message without hidden username.\n :param title: The short reason given in the message.\n (Ignored if type is 'public'.)\n :param message: The body of the message.\n\n If ``type`` is 'public', the new :class:`~.Comment` is returned.\n \"\"\"\n # The API endpoint used to send removal messages is different\n # for posts and comments, so the derived classes specify which one.\n if self.REMOVAL_MESSAGE_API is None:\n raise NotImplementedError(\"ThingModerationMixin must be extended.\")\n url = API_PATH[self.REMOVAL_MESSAGE_API]\n\n # Only the first element of the item_id list is used.\n data = {\n \"item_id\": [self.thing.fullname],\n \"message\": message,\n \"title\": title,\n \"type\": type,\n }\n\n return self.thing._reddit.post(url, data={\"json\": dumps(data)}) or None\n\n def undistinguish(self):\n \"\"\"Remove mod, admin, or special distinguishing from an object.\n\n Also unstickies the object if applicable.\n\n Example usage:\n\n .. code-block:: python\n\n # undistinguish a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.undistinguish()\n # undistinguish a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.undistinguish()\n\n See also :meth:`~.distinguish`\n\n \"\"\"\n self.distinguish(how=\"no\")\n\n def unignore_reports(self):\n \"\"\"Resume receiving future reports on a Comment or Submission.\n\n Future reports on this :class:`~.Comment` or :class:`~.Submission`\n will cause notifications, and appear in the various moderation\n listings.\n\n Example usage:\n\n .. code-block:: python\n\n # accept future reports on a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.unignore_reports()\n # accept future reports on a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.unignore_reports()\n\n See also :meth:`~.ignore_reports`\n\n \"\"\"\n self.thing._reddit.post(\n API_PATH[\"unignore_reports\"], data={\"id\": self.thing.fullname}\n )\n\n def unlock(self):\n \"\"\"Unlock a :class:`~.Comment` or :class:`~.Submission`.\n\n Example usage:\n\n # unlock a comment:\n comment = reddit.comment('dkk4qjd')\n comment.mod.unlock()\n # unlock a submission:\n submission = reddit.submission(id='5or86n')\n submission.mod.unlock()\n\n See also :meth:`~.lock`\n\n \"\"\"\n self.thing._reddit.post(\n API_PATH[\"unlock\"], data={\"id\": self.thing.fullname}\n )\n\n\nclass UserContentMixin(\n EditableMixin,\n GildableMixin,\n InboxToggleableMixin,\n ReplyableMixin,\n ReportableMixin,\n SavableMixin,\n VotableMixin,\n):\n \"\"\"A convenience mixin that applies to both Comments and Submissions.\"\"\"\n", "path": "praw/models/reddit/mixins/__init__.py"}]} | 3,979 | 192 |
gh_patches_debug_38116 | rasdani/github-patches | git_diff | sktime__sktime-4439 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[ENH] `SARIMAX` - implement `predict_interval` or `predict_quantiles` (for `statsmodels` `SARIMAX` interface)
From discord, Huseyin Aytug:
hi when I want to produce confidence intervals with `SARIMAX`, I get this error:
`NotImplementedError: SARIMAX does not have the capability to return quantile predictions. If you think this estimator should have the capability, please open an issue on sktime.`
However, it should be possible according to the documentation http://www.sktime.net/en/latest/api_reference/auto_generated/sktime.forecasting.sarimax.SARIMAX.html#sktime.forecasting.sarimax.SARIMAX.predict_interval
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/forecasting/sarimax.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # !/usr/bin/env python3 -u
3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
4 """Implements SARIMAX."""
5
6 __all__ = ["SARIMAX"]
7 __author__ = ["TNTran92"]
8
9 from sktime.forecasting.base.adapters import _StatsModelsAdapter
10
11
12 class SARIMAX(_StatsModelsAdapter):
13 """SARIMAX forecaster.
14
15 Direct interface for `statsmodels.tsa.api.SARIMAX`.
16
17 Parameters
18 ----------
19 order : iterable or iterable of iterables, optional, default=(1,0,0)
20 The (p,d,q) order of the model for the number of AR parameters,
21 differences, and MA parameters. `d` must be an integer
22 indicating the integration order of the process, while
23 `p` and `q` may either be an integers indicating the AR and MA
24 orders (so that all lags up to those orders are included) or else
25 iterables giving specific AR and / or MA lags to include. Default is
26 an AR(1) model: (1,0,0).
27 seasonal_order : iterable, optional, default=(0,0,0,0)
28 The (P,D,Q,s) order of the seasonal component of the model for the
29 AR parameters, differences, MA parameters, and periodicity.
30 `D` must be an integer indicating the integration order of the process,
31 while `P` and `Q` may either be an integers indicating the AR and MA
32 orders (so that all lags up to those orders are included) or else
33 iterables giving specific AR and / or MA lags to include. `s` is an
34 integer giving the periodicity (number of periods in season), often it
35 is 4 for quarterly data or 12 for monthly data. Default is no seasonal
36 effect.
37 trend : str{'n','c','t','ct'} or iterable, optional, default="c"
38 Parameter controlling the deterministic trend polynomial :math:`A(t)`.
39 Can be specified as a string where 'c' indicates a constant (i.e. a
40 degree zero component of the trend polynomial), 't' indicates a
41 linear trend with time, and 'ct' is both. Can also be specified as an
42 iterable defining the non-zero polynomial exponents to include, in
43 increasing order. For example, `[1,1,0,1]` denotes
44 :math:`a + bt + ct^3`. Default is to not include a trend component.
45 measurement_error : bool, optional, default=False
46 Whether or not to assume the endogenous observations `endog` were
47 measured with error.
48 time_varying_regression : bool, optional, default=False
49 Used when an explanatory variables, `exog`, are provided
50 to select whether or not coefficients on the exogenous regressors are
51 allowed to vary over time.
52 mle_regression : bool, optional, default=True
53 Whether or not to use estimate the regression coefficients for the
54 exogenous variables as part of maximum likelihood estimation or through
55 the Kalman filter (i.e. recursive least squares). If
56 `time_varying_regression` is True, this must be set to False.
57 simple_differencing : bool, optional, default=False
58 Whether or not to use partially conditional maximum likelihood
59 estimation. If True, differencing is performed prior to estimation,
60 which discards the first :math:`s D + d` initial rows but results in a
61 smaller state-space formulation. See the Notes section for important
62 details about interpreting results when this option is used. If False,
63 the full SARIMAX model is put in state-space form so that all
64 datapoints can be used in estimation.
65 enforce_stationarity : bool, optional, default=True
66 Whether or not to transform the AR parameters to enforce stationarity
67 in the autoregressive component of the model.
68 enforce_invertibility : bool, optional, default=True
69 Whether or not to transform the MA parameters to enforce invertibility
70 in the moving average component of the model.
71 hamilton_representation : bool, optional, default=False
72 Whether or not to use the Hamilton representation of an ARMA process
73 (if True) or the Harvey representation (if False).
74 concentrate_scale : bool, optional, default=False
75 Whether or not to concentrate the scale (variance of the error term)
76 out of the likelihood. This reduces the number of parameters estimated
77 by maximum likelihood by one, but standard errors will then not
78 be available for the scale parameter.
79 trend_offset : int, optional, default=1
80 The offset at which to start time trend values. Default is 1, so that
81 if `trend='t'` the trend is equal to 1, 2, ..., nobs. Typically is only
82 set when the model created by extending a previous dataset.
83 use_exact_diffuse : bool, optional, default=False
84 Whether or not to use exact diffuse initialization for non-stationary
85 states. Default is False (in which case approximate diffuse
86 initialization is used).
87 random_state : int, RandomState instance or None, optional ,
88 default=None – If int, random_state is the seed used by the random
89 number generator; If RandomState instance, random_state is the random
90 number generator; If None, the random number generator is the
91 RandomState instance used by np.random.
92
93 See Also
94 --------
95 ARIMA
96 AutoARIMA
97 StatsForecastAutoARIMA
98
99 References
100 ----------
101 .. [1] Hyndman, Rob J., and George Athanasopoulos. Forecasting: principles
102 and practice. OTexts, 2014.
103
104 Examples
105 --------
106 >>> from sktime.datasets import load_airline
107 >>> from sktime.forecasting.sarimax import SARIMAX
108 >>> y = load_airline()
109 >>> forecaster = SARIMAX(
110 ... order=(1, 0, 0), trend="t", seasonal_order=(1, 0, 0, 6)) # doctest: +SKIP
111 ... )
112 >>> forecaster.fit(y) # doctest: +SKIP
113 SARIMAX(...)
114 >>> y_pred = forecaster.predict(fh=y.index) # doctest: +SKIP
115 """
116
117 _tags = {
118 "ignores-exogeneous-X": False,
119 }
120
121 def __init__(
122 self,
123 order=(1, 0, 0),
124 seasonal_order=(0, 0, 0, 0),
125 trend="c",
126 measurement_error=False,
127 time_varying_regression=False,
128 mle_regression=True,
129 simple_differencing=False,
130 enforce_stationarity=True,
131 enforce_invertibility=True,
132 hamilton_representation=False,
133 concentrate_scale=False,
134 trend_offset=1,
135 use_exact_diffuse=False,
136 dates=None,
137 freq=None,
138 missing="none",
139 validate_specification=True,
140 random_state=None,
141 ):
142
143 self.order = order
144 self.seasonal_order = seasonal_order
145 self.trend = trend
146 self.measurement_error = measurement_error
147 self.time_varying_regression = time_varying_regression
148 self.mle_regression = mle_regression
149 self.simple_differencing = simple_differencing
150 self.enforce_stationarity = enforce_stationarity
151 self.enforce_invertibility = enforce_invertibility
152 self.hamilton_representation = hamilton_representation
153 self.concentrate_scale = concentrate_scale
154 self.trend_offset = trend_offset
155 self.use_exact_diffuse = use_exact_diffuse
156 self.dates = dates
157 self.freq = freq
158 self.missing = missing
159 self.validate_specification = validate_specification
160
161 super().__init__(random_state=random_state)
162
163 def _fit_forecaster(self, y, X=None):
164 from statsmodels.tsa.api import SARIMAX as _SARIMAX
165
166 self._forecaster = _SARIMAX(
167 endog=y,
168 exog=X,
169 order=self.order,
170 seasonal_order=self.seasonal_order,
171 trend=self.trend,
172 measurement_error=self.measurement_error,
173 time_varying_regression=self.time_varying_regression,
174 mle_regression=self.mle_regression,
175 simple_differencing=self.simple_differencing,
176 enforce_stationarity=self.enforce_stationarity,
177 enforce_invertibility=self.enforce_invertibility,
178 hamilton_representation=self.hamilton_representation,
179 concentrate_scale=self.concentrate_scale,
180 trend_offset=self.trend_offset,
181 use_exact_diffuse=self.use_exact_diffuse,
182 dates=self.dates,
183 freq=self.freq,
184 missing=self.missing,
185 validate_specification=self.validate_specification,
186 )
187 self._fitted_forecaster = self._forecaster.fit()
188
189 def summary(self):
190 """Get a summary of the fitted forecaster.
191
192 This is the same as the implementation in statsmodels:
193 https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_structural_harvey_jaeger.html
194 """
195 return self._fitted_forecaster.summary()
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sktime/forecasting/sarimax.py b/sktime/forecasting/sarimax.py
--- a/sktime/forecasting/sarimax.py
+++ b/sktime/forecasting/sarimax.py
@@ -4,7 +4,9 @@
"""Implements SARIMAX."""
__all__ = ["SARIMAX"]
-__author__ = ["TNTran92"]
+__author__ = ["TNTran92", "yarnabrina"]
+
+import pandas as pd
from sktime.forecasting.base.adapters import _StatsModelsAdapter
@@ -116,6 +118,7 @@
_tags = {
"ignores-exogeneous-X": False,
+ "capability:pred_int": True,
}
def __init__(
@@ -193,3 +196,59 @@
https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_structural_harvey_jaeger.html
"""
return self._fitted_forecaster.summary()
+
+ def _predict_interval(self, fh, X=None, coverage=0.95):
+ """Compute/return prediction interval forecasts.
+
+ private _predict_interval containing the core logic,
+ called from predict_interval and default _predict_quantiles
+
+ Parameters
+ ----------
+ fh : guaranteed to be ForecastingHorizon
+ The forecasting horizon with the steps ahead to to predict.
+ X : optional (default=None)
+ guaranteed to be of a type in self.get_tag("X_inner_mtype")
+ Exogeneous time series to predict from.
+ coverage : float or list of float, optional (default=0.95)
+ nominal coverage(s) of predictive interval(s)
+
+ Returns
+ -------
+ pred_int : pd.DataFrame
+ Column has multi-index: first level is variable name from y in fit,
+ second level coverage fractions for which intervals were computed.
+ in the same order as in input `coverage`.
+ Third level is string "lower" or "upper", for lower/upper interval end.
+ Row index is fh, with additional (upper) levels equal to instance levels,
+ from y seen in fit, if y_inner_mtype is Panel or Hierarchical.
+ Entries are forecasts of lower/upper interval end,
+ for var in col index, at nominal coverage in second col index,
+ lower/upper depending on third col index, for the row index.
+ Upper/lower interval end forecasts are equivalent to
+ quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.
+ """
+ start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
+ valid_indices = fh.to_absolute(self.cutoff).to_pandas()
+
+ prediction_results = self._fitted_forecaster.get_prediction(
+ start=start, end=end, exog=X
+ )
+
+ columns = pd.MultiIndex.from_product(
+ [["Coverage"], coverage, ["lower", "upper"]]
+ )
+ pred_int = pd.DataFrame(index=valid_indices, columns=columns)
+
+ for c in coverage:
+ pred_statsmodels = prediction_results.conf_int(alpha=(1 - c))
+ pred_statsmodels.columns = ["lower", "upper"]
+
+ pred_int[("Coverage", c, "lower")] = pred_statsmodels.loc[
+ valid_indices, "lower"
+ ]
+ pred_int[("Coverage", c, "upper")] = pred_statsmodels.loc[
+ valid_indices, "upper"
+ ]
+
+ return pred_int
| {"golden_diff": "diff --git a/sktime/forecasting/sarimax.py b/sktime/forecasting/sarimax.py\n--- a/sktime/forecasting/sarimax.py\n+++ b/sktime/forecasting/sarimax.py\n@@ -4,7 +4,9 @@\n \"\"\"Implements SARIMAX.\"\"\"\n \n __all__ = [\"SARIMAX\"]\n-__author__ = [\"TNTran92\"]\n+__author__ = [\"TNTran92\", \"yarnabrina\"]\n+\n+import pandas as pd\n \n from sktime.forecasting.base.adapters import _StatsModelsAdapter\n \n@@ -116,6 +118,7 @@\n \n _tags = {\n \"ignores-exogeneous-X\": False,\n+ \"capability:pred_int\": True,\n }\n \n def __init__(\n@@ -193,3 +196,59 @@\n https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_structural_harvey_jaeger.html\n \"\"\"\n return self._fitted_forecaster.summary()\n+\n+ def _predict_interval(self, fh, X=None, coverage=0.95):\n+ \"\"\"Compute/return prediction interval forecasts.\n+\n+ private _predict_interval containing the core logic,\n+ called from predict_interval and default _predict_quantiles\n+\n+ Parameters\n+ ----------\n+ fh : guaranteed to be ForecastingHorizon\n+ The forecasting horizon with the steps ahead to to predict.\n+ X : optional (default=None)\n+ guaranteed to be of a type in self.get_tag(\"X_inner_mtype\")\n+ Exogeneous time series to predict from.\n+ coverage : float or list of float, optional (default=0.95)\n+ nominal coverage(s) of predictive interval(s)\n+\n+ Returns\n+ -------\n+ pred_int : pd.DataFrame\n+ Column has multi-index: first level is variable name from y in fit,\n+ second level coverage fractions for which intervals were computed.\n+ in the same order as in input `coverage`.\n+ Third level is string \"lower\" or \"upper\", for lower/upper interval end.\n+ Row index is fh, with additional (upper) levels equal to instance levels,\n+ from y seen in fit, if y_inner_mtype is Panel or Hierarchical.\n+ Entries are forecasts of lower/upper interval end,\n+ for var in col index, at nominal coverage in second col index,\n+ lower/upper depending on third col index, for the row index.\n+ Upper/lower interval end forecasts are equivalent to\n+ quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.\n+ \"\"\"\n+ start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n+ valid_indices = fh.to_absolute(self.cutoff).to_pandas()\n+\n+ prediction_results = self._fitted_forecaster.get_prediction(\n+ start=start, end=end, exog=X\n+ )\n+\n+ columns = pd.MultiIndex.from_product(\n+ [[\"Coverage\"], coverage, [\"lower\", \"upper\"]]\n+ )\n+ pred_int = pd.DataFrame(index=valid_indices, columns=columns)\n+\n+ for c in coverage:\n+ pred_statsmodels = prediction_results.conf_int(alpha=(1 - c))\n+ pred_statsmodels.columns = [\"lower\", \"upper\"]\n+\n+ pred_int[(\"Coverage\", c, \"lower\")] = pred_statsmodels.loc[\n+ valid_indices, \"lower\"\n+ ]\n+ pred_int[(\"Coverage\", c, \"upper\")] = pred_statsmodels.loc[\n+ valid_indices, \"upper\"\n+ ]\n+\n+ return pred_int\n", "issue": "[ENH] `SARIMAX` - implement `predict_interval` or `predict_quantiles` (for `statsmodels` `SARIMAX` interface)\nFrom discord, Huseyin Aytug:\r\n\r\nhi when I want to produce confidence intervals with `SARIMAX`, I get this error:\r\n\r\n`NotImplementedError: SARIMAX does not have the capability to return quantile predictions. If you think this estimator should have the capability, please open an issue on sktime.`\r\n\r\nHowever, it should be possible according to the documentation http://www.sktime.net/en/latest/api_reference/auto_generated/sktime.forecasting.sarimax.SARIMAX.html#sktime.forecasting.sarimax.SARIMAX.predict_interval\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# !/usr/bin/env python3 -u\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements SARIMAX.\"\"\"\n\n__all__ = [\"SARIMAX\"]\n__author__ = [\"TNTran92\"]\n\nfrom sktime.forecasting.base.adapters import _StatsModelsAdapter\n\n\nclass SARIMAX(_StatsModelsAdapter):\n \"\"\"SARIMAX forecaster.\n\n Direct interface for `statsmodels.tsa.api.SARIMAX`.\n\n Parameters\n ----------\n order : iterable or iterable of iterables, optional, default=(1,0,0)\n The (p,d,q) order of the model for the number of AR parameters,\n differences, and MA parameters. `d` must be an integer\n indicating the integration order of the process, while\n `p` and `q` may either be an integers indicating the AR and MA\n orders (so that all lags up to those orders are included) or else\n iterables giving specific AR and / or MA lags to include. Default is\n an AR(1) model: (1,0,0).\n seasonal_order : iterable, optional, default=(0,0,0,0)\n The (P,D,Q,s) order of the seasonal component of the model for the\n AR parameters, differences, MA parameters, and periodicity.\n `D` must be an integer indicating the integration order of the process,\n while `P` and `Q` may either be an integers indicating the AR and MA\n orders (so that all lags up to those orders are included) or else\n iterables giving specific AR and / or MA lags to include. `s` is an\n integer giving the periodicity (number of periods in season), often it\n is 4 for quarterly data or 12 for monthly data. Default is no seasonal\n effect.\n trend : str{'n','c','t','ct'} or iterable, optional, default=\"c\"\n Parameter controlling the deterministic trend polynomial :math:`A(t)`.\n Can be specified as a string where 'c' indicates a constant (i.e. a\n degree zero component of the trend polynomial), 't' indicates a\n linear trend with time, and 'ct' is both. Can also be specified as an\n iterable defining the non-zero polynomial exponents to include, in\n increasing order. For example, `[1,1,0,1]` denotes\n :math:`a + bt + ct^3`. Default is to not include a trend component.\n measurement_error : bool, optional, default=False\n Whether or not to assume the endogenous observations `endog` were\n measured with error.\n time_varying_regression : bool, optional, default=False\n Used when an explanatory variables, `exog`, are provided\n to select whether or not coefficients on the exogenous regressors are\n allowed to vary over time.\n mle_regression : bool, optional, default=True\n Whether or not to use estimate the regression coefficients for the\n exogenous variables as part of maximum likelihood estimation or through\n the Kalman filter (i.e. recursive least squares). If\n `time_varying_regression` is True, this must be set to False.\n simple_differencing : bool, optional, default=False\n Whether or not to use partially conditional maximum likelihood\n estimation. If True, differencing is performed prior to estimation,\n which discards the first :math:`s D + d` initial rows but results in a\n smaller state-space formulation. See the Notes section for important\n details about interpreting results when this option is used. If False,\n the full SARIMAX model is put in state-space form so that all\n datapoints can be used in estimation.\n enforce_stationarity : bool, optional, default=True\n Whether or not to transform the AR parameters to enforce stationarity\n in the autoregressive component of the model.\n enforce_invertibility : bool, optional, default=True\n Whether or not to transform the MA parameters to enforce invertibility\n in the moving average component of the model.\n hamilton_representation : bool, optional, default=False\n Whether or not to use the Hamilton representation of an ARMA process\n (if True) or the Harvey representation (if False).\n concentrate_scale : bool, optional, default=False\n Whether or not to concentrate the scale (variance of the error term)\n out of the likelihood. This reduces the number of parameters estimated\n by maximum likelihood by one, but standard errors will then not\n be available for the scale parameter.\n trend_offset : int, optional, default=1\n The offset at which to start time trend values. Default is 1, so that\n if `trend='t'` the trend is equal to 1, 2, ..., nobs. Typically is only\n set when the model created by extending a previous dataset.\n use_exact_diffuse : bool, optional, default=False\n Whether or not to use exact diffuse initialization for non-stationary\n states. Default is False (in which case approximate diffuse\n initialization is used).\n random_state : int, RandomState instance or None, optional ,\n default=None \u2013 If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by np.random.\n\n See Also\n --------\n ARIMA\n AutoARIMA\n StatsForecastAutoARIMA\n\n References\n ----------\n .. [1] Hyndman, Rob J., and George Athanasopoulos. Forecasting: principles\n and practice. OTexts, 2014.\n\n Examples\n --------\n >>> from sktime.datasets import load_airline\n >>> from sktime.forecasting.sarimax import SARIMAX\n >>> y = load_airline()\n >>> forecaster = SARIMAX(\n ... order=(1, 0, 0), trend=\"t\", seasonal_order=(1, 0, 0, 6)) # doctest: +SKIP\n ... )\n >>> forecaster.fit(y) # doctest: +SKIP\n SARIMAX(...)\n >>> y_pred = forecaster.predict(fh=y.index) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"ignores-exogeneous-X\": False,\n }\n\n def __init__(\n self,\n order=(1, 0, 0),\n seasonal_order=(0, 0, 0, 0),\n trend=\"c\",\n measurement_error=False,\n time_varying_regression=False,\n mle_regression=True,\n simple_differencing=False,\n enforce_stationarity=True,\n enforce_invertibility=True,\n hamilton_representation=False,\n concentrate_scale=False,\n trend_offset=1,\n use_exact_diffuse=False,\n dates=None,\n freq=None,\n missing=\"none\",\n validate_specification=True,\n random_state=None,\n ):\n\n self.order = order\n self.seasonal_order = seasonal_order\n self.trend = trend\n self.measurement_error = measurement_error\n self.time_varying_regression = time_varying_regression\n self.mle_regression = mle_regression\n self.simple_differencing = simple_differencing\n self.enforce_stationarity = enforce_stationarity\n self.enforce_invertibility = enforce_invertibility\n self.hamilton_representation = hamilton_representation\n self.concentrate_scale = concentrate_scale\n self.trend_offset = trend_offset\n self.use_exact_diffuse = use_exact_diffuse\n self.dates = dates\n self.freq = freq\n self.missing = missing\n self.validate_specification = validate_specification\n\n super().__init__(random_state=random_state)\n\n def _fit_forecaster(self, y, X=None):\n from statsmodels.tsa.api import SARIMAX as _SARIMAX\n\n self._forecaster = _SARIMAX(\n endog=y,\n exog=X,\n order=self.order,\n seasonal_order=self.seasonal_order,\n trend=self.trend,\n measurement_error=self.measurement_error,\n time_varying_regression=self.time_varying_regression,\n mle_regression=self.mle_regression,\n simple_differencing=self.simple_differencing,\n enforce_stationarity=self.enforce_stationarity,\n enforce_invertibility=self.enforce_invertibility,\n hamilton_representation=self.hamilton_representation,\n concentrate_scale=self.concentrate_scale,\n trend_offset=self.trend_offset,\n use_exact_diffuse=self.use_exact_diffuse,\n dates=self.dates,\n freq=self.freq,\n missing=self.missing,\n validate_specification=self.validate_specification,\n )\n self._fitted_forecaster = self._forecaster.fit()\n\n def summary(self):\n \"\"\"Get a summary of the fitted forecaster.\n\n This is the same as the implementation in statsmodels:\n https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_structural_harvey_jaeger.html\n \"\"\"\n return self._fitted_forecaster.summary()\n", "path": "sktime/forecasting/sarimax.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# !/usr/bin/env python3 -u\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements SARIMAX.\"\"\"\n\n__all__ = [\"SARIMAX\"]\n__author__ = [\"TNTran92\", \"yarnabrina\"]\n\nimport pandas as pd\n\nfrom sktime.forecasting.base.adapters import _StatsModelsAdapter\n\n\nclass SARIMAX(_StatsModelsAdapter):\n \"\"\"SARIMAX forecaster.\n\n Direct interface for `statsmodels.tsa.api.SARIMAX`.\n\n Parameters\n ----------\n order : iterable or iterable of iterables, optional, default=(1,0,0)\n The (p,d,q) order of the model for the number of AR parameters,\n differences, and MA parameters. `d` must be an integer\n indicating the integration order of the process, while\n `p` and `q` may either be an integers indicating the AR and MA\n orders (so that all lags up to those orders are included) or else\n iterables giving specific AR and / or MA lags to include. Default is\n an AR(1) model: (1,0,0).\n seasonal_order : iterable, optional, default=(0,0,0,0)\n The (P,D,Q,s) order of the seasonal component of the model for the\n AR parameters, differences, MA parameters, and periodicity.\n `D` must be an integer indicating the integration order of the process,\n while `P` and `Q` may either be an integers indicating the AR and MA\n orders (so that all lags up to those orders are included) or else\n iterables giving specific AR and / or MA lags to include. `s` is an\n integer giving the periodicity (number of periods in season), often it\n is 4 for quarterly data or 12 for monthly data. Default is no seasonal\n effect.\n trend : str{'n','c','t','ct'} or iterable, optional, default=\"c\"\n Parameter controlling the deterministic trend polynomial :math:`A(t)`.\n Can be specified as a string where 'c' indicates a constant (i.e. a\n degree zero component of the trend polynomial), 't' indicates a\n linear trend with time, and 'ct' is both. Can also be specified as an\n iterable defining the non-zero polynomial exponents to include, in\n increasing order. For example, `[1,1,0,1]` denotes\n :math:`a + bt + ct^3`. Default is to not include a trend component.\n measurement_error : bool, optional, default=False\n Whether or not to assume the endogenous observations `endog` were\n measured with error.\n time_varying_regression : bool, optional, default=False\n Used when an explanatory variables, `exog`, are provided\n to select whether or not coefficients on the exogenous regressors are\n allowed to vary over time.\n mle_regression : bool, optional, default=True\n Whether or not to use estimate the regression coefficients for the\n exogenous variables as part of maximum likelihood estimation or through\n the Kalman filter (i.e. recursive least squares). If\n `time_varying_regression` is True, this must be set to False.\n simple_differencing : bool, optional, default=False\n Whether or not to use partially conditional maximum likelihood\n estimation. If True, differencing is performed prior to estimation,\n which discards the first :math:`s D + d` initial rows but results in a\n smaller state-space formulation. See the Notes section for important\n details about interpreting results when this option is used. If False,\n the full SARIMAX model is put in state-space form so that all\n datapoints can be used in estimation.\n enforce_stationarity : bool, optional, default=True\n Whether or not to transform the AR parameters to enforce stationarity\n in the autoregressive component of the model.\n enforce_invertibility : bool, optional, default=True\n Whether or not to transform the MA parameters to enforce invertibility\n in the moving average component of the model.\n hamilton_representation : bool, optional, default=False\n Whether or not to use the Hamilton representation of an ARMA process\n (if True) or the Harvey representation (if False).\n concentrate_scale : bool, optional, default=False\n Whether or not to concentrate the scale (variance of the error term)\n out of the likelihood. This reduces the number of parameters estimated\n by maximum likelihood by one, but standard errors will then not\n be available for the scale parameter.\n trend_offset : int, optional, default=1\n The offset at which to start time trend values. Default is 1, so that\n if `trend='t'` the trend is equal to 1, 2, ..., nobs. Typically is only\n set when the model created by extending a previous dataset.\n use_exact_diffuse : bool, optional, default=False\n Whether or not to use exact diffuse initialization for non-stationary\n states. Default is False (in which case approximate diffuse\n initialization is used).\n random_state : int, RandomState instance or None, optional ,\n default=None \u2013 If int, random_state is the seed used by the random\n number generator; If RandomState instance, random_state is the random\n number generator; If None, the random number generator is the\n RandomState instance used by np.random.\n\n See Also\n --------\n ARIMA\n AutoARIMA\n StatsForecastAutoARIMA\n\n References\n ----------\n .. [1] Hyndman, Rob J., and George Athanasopoulos. Forecasting: principles\n and practice. OTexts, 2014.\n\n Examples\n --------\n >>> from sktime.datasets import load_airline\n >>> from sktime.forecasting.sarimax import SARIMAX\n >>> y = load_airline()\n >>> forecaster = SARIMAX(\n ... order=(1, 0, 0), trend=\"t\", seasonal_order=(1, 0, 0, 6)) # doctest: +SKIP\n ... )\n >>> forecaster.fit(y) # doctest: +SKIP\n SARIMAX(...)\n >>> y_pred = forecaster.predict(fh=y.index) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"ignores-exogeneous-X\": False,\n \"capability:pred_int\": True,\n }\n\n def __init__(\n self,\n order=(1, 0, 0),\n seasonal_order=(0, 0, 0, 0),\n trend=\"c\",\n measurement_error=False,\n time_varying_regression=False,\n mle_regression=True,\n simple_differencing=False,\n enforce_stationarity=True,\n enforce_invertibility=True,\n hamilton_representation=False,\n concentrate_scale=False,\n trend_offset=1,\n use_exact_diffuse=False,\n dates=None,\n freq=None,\n missing=\"none\",\n validate_specification=True,\n random_state=None,\n ):\n\n self.order = order\n self.seasonal_order = seasonal_order\n self.trend = trend\n self.measurement_error = measurement_error\n self.time_varying_regression = time_varying_regression\n self.mle_regression = mle_regression\n self.simple_differencing = simple_differencing\n self.enforce_stationarity = enforce_stationarity\n self.enforce_invertibility = enforce_invertibility\n self.hamilton_representation = hamilton_representation\n self.concentrate_scale = concentrate_scale\n self.trend_offset = trend_offset\n self.use_exact_diffuse = use_exact_diffuse\n self.dates = dates\n self.freq = freq\n self.missing = missing\n self.validate_specification = validate_specification\n\n super().__init__(random_state=random_state)\n\n def _fit_forecaster(self, y, X=None):\n from statsmodels.tsa.api import SARIMAX as _SARIMAX\n\n self._forecaster = _SARIMAX(\n endog=y,\n exog=X,\n order=self.order,\n seasonal_order=self.seasonal_order,\n trend=self.trend,\n measurement_error=self.measurement_error,\n time_varying_regression=self.time_varying_regression,\n mle_regression=self.mle_regression,\n simple_differencing=self.simple_differencing,\n enforce_stationarity=self.enforce_stationarity,\n enforce_invertibility=self.enforce_invertibility,\n hamilton_representation=self.hamilton_representation,\n concentrate_scale=self.concentrate_scale,\n trend_offset=self.trend_offset,\n use_exact_diffuse=self.use_exact_diffuse,\n dates=self.dates,\n freq=self.freq,\n missing=self.missing,\n validate_specification=self.validate_specification,\n )\n self._fitted_forecaster = self._forecaster.fit()\n\n def summary(self):\n \"\"\"Get a summary of the fitted forecaster.\n\n This is the same as the implementation in statsmodels:\n https://www.statsmodels.org/dev/examples/notebooks/generated/statespace_structural_harvey_jaeger.html\n \"\"\"\n return self._fitted_forecaster.summary()\n\n def _predict_interval(self, fh, X=None, coverage=0.95):\n \"\"\"Compute/return prediction interval forecasts.\n\n private _predict_interval containing the core logic,\n called from predict_interval and default _predict_quantiles\n\n Parameters\n ----------\n fh : guaranteed to be ForecastingHorizon\n The forecasting horizon with the steps ahead to to predict.\n X : optional (default=None)\n guaranteed to be of a type in self.get_tag(\"X_inner_mtype\")\n Exogeneous time series to predict from.\n coverage : float or list of float, optional (default=0.95)\n nominal coverage(s) of predictive interval(s)\n\n Returns\n -------\n pred_int : pd.DataFrame\n Column has multi-index: first level is variable name from y in fit,\n second level coverage fractions for which intervals were computed.\n in the same order as in input `coverage`.\n Third level is string \"lower\" or \"upper\", for lower/upper interval end.\n Row index is fh, with additional (upper) levels equal to instance levels,\n from y seen in fit, if y_inner_mtype is Panel or Hierarchical.\n Entries are forecasts of lower/upper interval end,\n for var in col index, at nominal coverage in second col index,\n lower/upper depending on third col index, for the row index.\n Upper/lower interval end forecasts are equivalent to\n quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.\n \"\"\"\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n valid_indices = fh.to_absolute(self.cutoff).to_pandas()\n\n prediction_results = self._fitted_forecaster.get_prediction(\n start=start, end=end, exog=X\n )\n\n columns = pd.MultiIndex.from_product(\n [[\"Coverage\"], coverage, [\"lower\", \"upper\"]]\n )\n pred_int = pd.DataFrame(index=valid_indices, columns=columns)\n\n for c in coverage:\n pred_statsmodels = prediction_results.conf_int(alpha=(1 - c))\n pred_statsmodels.columns = [\"lower\", \"upper\"]\n\n pred_int[(\"Coverage\", c, \"lower\")] = pred_statsmodels.loc[\n valid_indices, \"lower\"\n ]\n pred_int[(\"Coverage\", c, \"upper\")] = pred_statsmodels.loc[\n valid_indices, \"upper\"\n ]\n\n return pred_int\n", "path": "sktime/forecasting/sarimax.py"}]} | 2,896 | 811 |
gh_patches_debug_10993 | rasdani/github-patches | git_diff | ansible__awx-13854 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug Report: awx.awx.export all does not work.
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
### Bug Summary
using the all option does not work as noted in the [example](https://github.com/ansible/awx/blob/devel/awx_collection/plugins/modules/export.py#L111)
It should export everything from that endpoint
I tested this with both the recent pip release of awxkit, and one compiled from dev.
I also tested with the cli directly and it worked using the command
```
awx export --conf.host https://controller --conf.username admin --conf.password 123 --conf.insecure --users --users > users.json
```
### AWX version
22.0.0
### Select the relevant components
- [ ] UI
- [ ] API
- [ ] Docs
- [X] Collection
- [ ] CLI
- [ ] Other
### Installation method
kubernetes
### Modifications
no
### Ansible version
2.14.4
### Operating system
Fedora
### Web browser
_No response_
### Steps to reproduce
Run the following playbook
```
---
- name: Export Workflow
hosts: localhost
connection: local
gather_facts: false
collections:
- ansible.controller
environment:
CONTROLLER_HOST: controller.nas
CONTROLLER_USERNAME: admin
CONTROLLER_PASSWORD: secret123
CONTROLLER_VERIFY_SSL: False
tasks:
- name: Export all users
awx.awx.export:
users: 'all'
register: export_results
- name: Export users to file
copy:
content: "{{ export_results | to_nice_yaml( width=50, explicit_start=True, explicit_end=True) }}"
dest: usersa.yaml
...
```
### Expected results
```
---
assets:
users:
- username: AWX-Collection-tests-controller_role-user
first_name: Joe
last_name: User
email: [email protected]
is_superuser: false
is_system_auditor: false
password: controller_password
related:
roles:
- name: Execute
type: role
content_object:
organization:
name: Default
type: organization
name: test-role-workflow
type: workflow_job_template
natural_key:
username: AWX-Collection-tests-controller_role-user
type: user
- username: AWX-Collection-tests-controller_role-user-ClPzjxrRTIwcOCvO
first_name: Joe
last_name: User
email: [email protected]
is_superuser: false
is_system_auditor: false
password: controller_password
related:
roles: []
natural_key:
username: AWX-Collection-tests-controller_role-user-ClPzjxrRTIwcOCvO
type: user
changed: false
failed: false
...
```
### Actual results
```
---
assets:
users: []
changed: false
failed: false
...
```
### Additional information
I asked @TheRealHaoLiu to test as well to make sure it was not a local problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awx_collection/plugins/modules/export.py`
Content:
```
1 #!/usr/bin/python
2 # coding: utf-8 -*-
3
4 # (c) 2017, John Westcott IV <[email protected]>
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8
9 __metaclass__ = type
10
11
12 ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}
13
14
15 DOCUMENTATION = '''
16 ---
17 module: export
18 author: "John Westcott IV (@john-westcott-iv)"
19 version_added: "3.7.0"
20 short_description: export resources from Automation Platform Controller.
21 description:
22 - Export assets from Automation Platform Controller.
23 options:
24 all:
25 description:
26 - Export all assets
27 type: bool
28 default: 'False'
29 organizations:
30 description:
31 - organization names to export
32 type: list
33 elements: str
34 users:
35 description:
36 - user names to export
37 type: list
38 elements: str
39 teams:
40 description:
41 - team names to export
42 type: list
43 elements: str
44 credential_types:
45 description:
46 - credential type names to export
47 type: list
48 elements: str
49 credentials:
50 description:
51 - credential names to export
52 type: list
53 elements: str
54 execution_environments:
55 description:
56 - execution environment names to export
57 type: list
58 elements: str
59 notification_templates:
60 description:
61 - notification template names to export
62 type: list
63 elements: str
64 inventory_sources:
65 description:
66 - inventory soruces to export
67 type: list
68 elements: str
69 inventory:
70 description:
71 - inventory names to export
72 type: list
73 elements: str
74 projects:
75 description:
76 - project names to export
77 type: list
78 elements: str
79 job_templates:
80 description:
81 - job template names to export
82 type: list
83 elements: str
84 workflow_job_templates:
85 description:
86 - workflow names to export
87 type: list
88 elements: str
89 applications:
90 description:
91 - OAuth2 application names to export
92 type: list
93 elements: str
94 schedules:
95 description:
96 - schedule names to export
97 type: list
98 elements: str
99 requirements:
100 - "awxkit >= 9.3.0"
101 notes:
102 - Specifying a name of "all" for any asset type will export all items of that asset type.
103 extends_documentation_fragment: awx.awx.auth
104 '''
105
106 EXAMPLES = '''
107 - name: Export all assets
108 export:
109 all: True
110
111 - name: Export all inventories
112 export:
113 inventory: 'all'
114
115 - name: Export a job template named "My Template" and all Credentials
116 export:
117 job_templates: "My Template"
118 credential: 'all'
119
120 - name: Export a list of inventories
121 export:
122 inventory: ['My Inventory 1', 'My Inventory 2']
123 '''
124
125 import logging
126 from ansible.module_utils.six.moves import StringIO
127 from ..module_utils.awxkit import ControllerAWXKitModule
128
129 try:
130 from awxkit.api.pages.api import EXPORTABLE_RESOURCES
131
132 HAS_EXPORTABLE_RESOURCES = True
133 except ImportError:
134 HAS_EXPORTABLE_RESOURCES = False
135
136
137 def main():
138 argument_spec = dict(
139 all=dict(type='bool', default=False),
140 )
141
142 # We are not going to raise an error here because the __init__ method of ControllerAWXKitModule will do that for us
143 if HAS_EXPORTABLE_RESOURCES:
144 for resource in EXPORTABLE_RESOURCES:
145 argument_spec[resource] = dict(type='list', elements='str')
146
147 module = ControllerAWXKitModule(argument_spec=argument_spec)
148
149 if not HAS_EXPORTABLE_RESOURCES:
150 module.fail_json(msg="Your version of awxkit does not have import/export")
151
152 # The export process will never change the AWX system
153 module.json_output['changed'] = False
154
155 # The exporter code currently works like the following:
156 # Empty string == all assets of that type
157 # Non-Empty string = just one asset of that type (by name or ID)
158 # Asset type not present or None = skip asset type (unless everything is None, then export all)
159 # Here we are going to setup a dict of values to export
160 export_args = {}
161 for resource in EXPORTABLE_RESOURCES:
162 if module.params.get('all') or module.params.get(resource) == 'all':
163 # If we are exporting everything or we got the keyword "all" we pass in an empty string for this asset type
164 export_args[resource] = ''
165 else:
166 # Otherwise we take either the string or None (if the parameter was not passed) to get one or no items
167 export_args[resource] = module.params.get(resource)
168
169 # Currently the export process does not return anything on error
170 # It simply just logs to Python's logger
171 # Set up a log gobbler to get error messages from export_assets
172 log_capture_string = StringIO()
173 ch = logging.StreamHandler(log_capture_string)
174 for logger_name in ['awxkit.api.pages.api', 'awxkit.api.pages.page']:
175 logger = logging.getLogger(logger_name)
176 logger.setLevel(logging.ERROR)
177 ch.setLevel(logging.ERROR)
178
179 logger.addHandler(ch)
180 log_contents = ''
181
182 # Run the export process
183 try:
184 module.json_output['assets'] = module.get_api_v2_object().export_assets(**export_args)
185 module.exit_json(**module.json_output)
186 except Exception as e:
187 module.fail_json(msg="Failed to export assets {0}".format(e))
188 finally:
189 # Finally, consume the logs in case there were any errors and die if there were
190 log_contents = log_capture_string.getvalue()
191 log_capture_string.close()
192 if log_contents != '':
193 module.fail_json(msg=log_contents)
194
195
196 if __name__ == '__main__':
197 main()
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awx_collection/plugins/modules/export.py b/awx_collection/plugins/modules/export.py
--- a/awx_collection/plugins/modules/export.py
+++ b/awx_collection/plugins/modules/export.py
@@ -159,7 +159,7 @@
# Here we are going to setup a dict of values to export
export_args = {}
for resource in EXPORTABLE_RESOURCES:
- if module.params.get('all') or module.params.get(resource) == 'all':
+ if module.params.get('all') or module.params.get(resource) == ['all']:
# If we are exporting everything or we got the keyword "all" we pass in an empty string for this asset type
export_args[resource] = ''
else:
| {"golden_diff": "diff --git a/awx_collection/plugins/modules/export.py b/awx_collection/plugins/modules/export.py\n--- a/awx_collection/plugins/modules/export.py\n+++ b/awx_collection/plugins/modules/export.py\n@@ -159,7 +159,7 @@\n # Here we are going to setup a dict of values to export\n export_args = {}\n for resource in EXPORTABLE_RESOURCES:\n- if module.params.get('all') or module.params.get(resource) == 'all':\n+ if module.params.get('all') or module.params.get(resource) == ['all']:\n # If we are exporting everything or we got the keyword \"all\" we pass in an empty string for this asset type\n export_args[resource] = ''\n else:\n", "issue": " Bug Report: awx.awx.export all does not work.\n### Please confirm the following\r\n\r\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\r\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\r\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\r\n\r\n### Bug Summary\r\n\r\nusing the all option does not work as noted in the [example](https://github.com/ansible/awx/blob/devel/awx_collection/plugins/modules/export.py#L111)\r\n\r\nIt should export everything from that endpoint\r\n\r\nI tested this with both the recent pip release of awxkit, and one compiled from dev.\r\n\r\nI also tested with the cli directly and it worked using the command\r\n```\r\nawx export --conf.host https://controller --conf.username admin --conf.password 123 --conf.insecure --users --users > users.json\r\n```\r\n\r\n### AWX version\r\n\r\n22.0.0\r\n\r\n### Select the relevant components\r\n\r\n- [ ] UI\r\n- [ ] API\r\n- [ ] Docs\r\n- [X] Collection\r\n- [ ] CLI\r\n- [ ] Other\r\n\r\n### Installation method\r\n\r\nkubernetes\r\n\r\n### Modifications\r\n\r\nno\r\n\r\n### Ansible version\r\n\r\n2.14.4\r\n\r\n### Operating system\r\n\r\nFedora\r\n\r\n### Web browser\r\n\r\n_No response_\r\n\r\n### Steps to reproduce\r\n\r\nRun the following playbook\r\n```\r\n---\r\n- name: Export Workflow\r\n hosts: localhost\r\n connection: local\r\n gather_facts: false\r\n collections:\r\n - ansible.controller\r\n environment:\r\n CONTROLLER_HOST: controller.nas\r\n CONTROLLER_USERNAME: admin\r\n CONTROLLER_PASSWORD: secret123\r\n CONTROLLER_VERIFY_SSL: False\r\n\r\n tasks:\r\n - name: Export all users\r\n awx.awx.export:\r\n users: 'all'\r\n register: export_results\r\n\r\n - name: Export users to file\r\n copy:\r\n content: \"{{ export_results | to_nice_yaml( width=50, explicit_start=True, explicit_end=True) }}\"\r\n dest: usersa.yaml\r\n...\r\n```\r\n### Expected results\r\n```\r\n---\r\nassets:\r\n users:\r\n - username: AWX-Collection-tests-controller_role-user\r\n first_name: Joe\r\n last_name: User\r\n email: [email protected]\r\n is_superuser: false\r\n is_system_auditor: false\r\n password: controller_password\r\n related:\r\n roles:\r\n - name: Execute\r\n type: role\r\n content_object:\r\n organization:\r\n name: Default\r\n type: organization\r\n name: test-role-workflow\r\n type: workflow_job_template\r\n natural_key:\r\n username: AWX-Collection-tests-controller_role-user\r\n type: user\r\n - username: AWX-Collection-tests-controller_role-user-ClPzjxrRTIwcOCvO\r\n first_name: Joe\r\n last_name: User\r\n email: [email protected]\r\n is_superuser: false\r\n is_system_auditor: false\r\n password: controller_password\r\n related:\r\n roles: []\r\n natural_key:\r\n username: AWX-Collection-tests-controller_role-user-ClPzjxrRTIwcOCvO\r\n type: user\r\nchanged: false\r\nfailed: false\r\n...\r\n```\r\n\r\n### Actual results\r\n```\r\n---\r\nassets:\r\n users: []\r\nchanged: false\r\nfailed: false\r\n...\r\n```\r\n### Additional information\r\n\r\nI asked @TheRealHaoLiu to test as well to make sure it was not a local problem. \n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2017, John Westcott IV <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: export\nauthor: \"John Westcott IV (@john-westcott-iv)\"\nversion_added: \"3.7.0\"\nshort_description: export resources from Automation Platform Controller.\ndescription:\n - Export assets from Automation Platform Controller.\noptions:\n all:\n description:\n - Export all assets\n type: bool\n default: 'False'\n organizations:\n description:\n - organization names to export\n type: list\n elements: str\n users:\n description:\n - user names to export\n type: list\n elements: str\n teams:\n description:\n - team names to export\n type: list\n elements: str\n credential_types:\n description:\n - credential type names to export\n type: list\n elements: str\n credentials:\n description:\n - credential names to export\n type: list\n elements: str\n execution_environments:\n description:\n - execution environment names to export\n type: list\n elements: str\n notification_templates:\n description:\n - notification template names to export\n type: list\n elements: str\n inventory_sources:\n description:\n - inventory soruces to export\n type: list\n elements: str\n inventory:\n description:\n - inventory names to export\n type: list\n elements: str\n projects:\n description:\n - project names to export\n type: list\n elements: str\n job_templates:\n description:\n - job template names to export\n type: list\n elements: str\n workflow_job_templates:\n description:\n - workflow names to export\n type: list\n elements: str\n applications:\n description:\n - OAuth2 application names to export\n type: list\n elements: str\n schedules:\n description:\n - schedule names to export\n type: list\n elements: str\nrequirements:\n - \"awxkit >= 9.3.0\"\nnotes:\n - Specifying a name of \"all\" for any asset type will export all items of that asset type.\nextends_documentation_fragment: awx.awx.auth\n'''\n\nEXAMPLES = '''\n- name: Export all assets\n export:\n all: True\n\n- name: Export all inventories\n export:\n inventory: 'all'\n\n- name: Export a job template named \"My Template\" and all Credentials\n export:\n job_templates: \"My Template\"\n credential: 'all'\n\n- name: Export a list of inventories\n export:\n inventory: ['My Inventory 1', 'My Inventory 2']\n'''\n\nimport logging\nfrom ansible.module_utils.six.moves import StringIO\nfrom ..module_utils.awxkit import ControllerAWXKitModule\n\ntry:\n from awxkit.api.pages.api import EXPORTABLE_RESOURCES\n\n HAS_EXPORTABLE_RESOURCES = True\nexcept ImportError:\n HAS_EXPORTABLE_RESOURCES = False\n\n\ndef main():\n argument_spec = dict(\n all=dict(type='bool', default=False),\n )\n\n # We are not going to raise an error here because the __init__ method of ControllerAWXKitModule will do that for us\n if HAS_EXPORTABLE_RESOURCES:\n for resource in EXPORTABLE_RESOURCES:\n argument_spec[resource] = dict(type='list', elements='str')\n\n module = ControllerAWXKitModule(argument_spec=argument_spec)\n\n if not HAS_EXPORTABLE_RESOURCES:\n module.fail_json(msg=\"Your version of awxkit does not have import/export\")\n\n # The export process will never change the AWX system\n module.json_output['changed'] = False\n\n # The exporter code currently works like the following:\n # Empty string == all assets of that type\n # Non-Empty string = just one asset of that type (by name or ID)\n # Asset type not present or None = skip asset type (unless everything is None, then export all)\n # Here we are going to setup a dict of values to export\n export_args = {}\n for resource in EXPORTABLE_RESOURCES:\n if module.params.get('all') or module.params.get(resource) == 'all':\n # If we are exporting everything or we got the keyword \"all\" we pass in an empty string for this asset type\n export_args[resource] = ''\n else:\n # Otherwise we take either the string or None (if the parameter was not passed) to get one or no items\n export_args[resource] = module.params.get(resource)\n\n # Currently the export process does not return anything on error\n # It simply just logs to Python's logger\n # Set up a log gobbler to get error messages from export_assets\n log_capture_string = StringIO()\n ch = logging.StreamHandler(log_capture_string)\n for logger_name in ['awxkit.api.pages.api', 'awxkit.api.pages.page']:\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.ERROR)\n ch.setLevel(logging.ERROR)\n\n logger.addHandler(ch)\n log_contents = ''\n\n # Run the export process\n try:\n module.json_output['assets'] = module.get_api_v2_object().export_assets(**export_args)\n module.exit_json(**module.json_output)\n except Exception as e:\n module.fail_json(msg=\"Failed to export assets {0}\".format(e))\n finally:\n # Finally, consume the logs in case there were any errors and die if there were\n log_contents = log_capture_string.getvalue()\n log_capture_string.close()\n if log_contents != '':\n module.fail_json(msg=log_contents)\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/export.py"}], "after_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2017, John Westcott IV <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: export\nauthor: \"John Westcott IV (@john-westcott-iv)\"\nversion_added: \"3.7.0\"\nshort_description: export resources from Automation Platform Controller.\ndescription:\n - Export assets from Automation Platform Controller.\noptions:\n all:\n description:\n - Export all assets\n type: bool\n default: 'False'\n organizations:\n description:\n - organization names to export\n type: list\n elements: str\n users:\n description:\n - user names to export\n type: list\n elements: str\n teams:\n description:\n - team names to export\n type: list\n elements: str\n credential_types:\n description:\n - credential type names to export\n type: list\n elements: str\n credentials:\n description:\n - credential names to export\n type: list\n elements: str\n execution_environments:\n description:\n - execution environment names to export\n type: list\n elements: str\n notification_templates:\n description:\n - notification template names to export\n type: list\n elements: str\n inventory_sources:\n description:\n - inventory soruces to export\n type: list\n elements: str\n inventory:\n description:\n - inventory names to export\n type: list\n elements: str\n projects:\n description:\n - project names to export\n type: list\n elements: str\n job_templates:\n description:\n - job template names to export\n type: list\n elements: str\n workflow_job_templates:\n description:\n - workflow names to export\n type: list\n elements: str\n applications:\n description:\n - OAuth2 application names to export\n type: list\n elements: str\n schedules:\n description:\n - schedule names to export\n type: list\n elements: str\nrequirements:\n - \"awxkit >= 9.3.0\"\nnotes:\n - Specifying a name of \"all\" for any asset type will export all items of that asset type.\nextends_documentation_fragment: awx.awx.auth\n'''\n\nEXAMPLES = '''\n- name: Export all assets\n export:\n all: True\n\n- name: Export all inventories\n export:\n inventory: 'all'\n\n- name: Export a job template named \"My Template\" and all Credentials\n export:\n job_templates: \"My Template\"\n credential: 'all'\n\n- name: Export a list of inventories\n export:\n inventory: ['My Inventory 1', 'My Inventory 2']\n'''\n\nimport logging\nfrom ansible.module_utils.six.moves import StringIO\nfrom ..module_utils.awxkit import ControllerAWXKitModule\n\ntry:\n from awxkit.api.pages.api import EXPORTABLE_RESOURCES\n\n HAS_EXPORTABLE_RESOURCES = True\nexcept ImportError:\n HAS_EXPORTABLE_RESOURCES = False\n\n\ndef main():\n argument_spec = dict(\n all=dict(type='bool', default=False),\n )\n\n # We are not going to raise an error here because the __init__ method of ControllerAWXKitModule will do that for us\n if HAS_EXPORTABLE_RESOURCES:\n for resource in EXPORTABLE_RESOURCES:\n argument_spec[resource] = dict(type='list', elements='str')\n\n module = ControllerAWXKitModule(argument_spec=argument_spec)\n\n if not HAS_EXPORTABLE_RESOURCES:\n module.fail_json(msg=\"Your version of awxkit does not have import/export\")\n\n # The export process will never change the AWX system\n module.json_output['changed'] = False\n\n # The exporter code currently works like the following:\n # Empty string == all assets of that type\n # Non-Empty string = just one asset of that type (by name or ID)\n # Asset type not present or None = skip asset type (unless everything is None, then export all)\n # Here we are going to setup a dict of values to export\n export_args = {}\n for resource in EXPORTABLE_RESOURCES:\n if module.params.get('all') or module.params.get(resource) == ['all']:\n # If we are exporting everything or we got the keyword \"all\" we pass in an empty string for this asset type\n export_args[resource] = ''\n else:\n # Otherwise we take either the string or None (if the parameter was not passed) to get one or no items\n export_args[resource] = module.params.get(resource)\n\n # Currently the export process does not return anything on error\n # It simply just logs to Python's logger\n # Set up a log gobbler to get error messages from export_assets\n log_capture_string = StringIO()\n ch = logging.StreamHandler(log_capture_string)\n for logger_name in ['awxkit.api.pages.api', 'awxkit.api.pages.page']:\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.ERROR)\n ch.setLevel(logging.ERROR)\n\n logger.addHandler(ch)\n log_contents = ''\n\n # Run the export process\n try:\n module.json_output['assets'] = module.get_api_v2_object().export_assets(**export_args)\n module.exit_json(**module.json_output)\n except Exception as e:\n module.fail_json(msg=\"Failed to export assets {0}\".format(e))\n finally:\n # Finally, consume the logs in case there were any errors and die if there were\n log_contents = log_capture_string.getvalue()\n log_capture_string.close()\n if log_contents != '':\n module.fail_json(msg=log_contents)\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/export.py"}]} | 2,852 | 161 |
gh_patches_debug_47917 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1620 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add option to list tables in MySQL integration :bookmark_tabs:
When users create a connection to the MySQL database it will be useful to show them tips with a list of tables. To be able to do this we need a new method `get_tables_list` implemented in the MySQL integration class.
## Steps :male_detective: :female_detective:
- Frok MindsDB repo
- Add new implementation in https://github.com/mindsdb/mindsdb/blob/stable/mindsdb/integrations/mysql/mysql.py#L51
- Make a PR to staging branch
## Additional rewards :1st_place_medal:
Each code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/integrations/mysql/mysql.py`
Content:
```
1 import os
2 import shutil
3 import tempfile
4
5 from contextlib import closing
6 import mysql.connector
7
8 from lightwood.api import dtype
9 from mindsdb.integrations.base import Integration
10 from mindsdb.utilities.log import log
11
12
13 class MySQLConnectionChecker:
14 def __init__(self, **kwargs):
15 self.host = kwargs.get('host')
16 self.port = kwargs.get('port')
17 self.user = kwargs.get('user')
18 self.password = kwargs.get('password')
19 self.ssl = kwargs.get('ssl')
20 self.ssl_ca = kwargs.get('ssl_ca')
21 self.ssl_cert = kwargs.get('ssl_cert')
22 self.ssl_key = kwargs.get('ssl_key')
23
24 def _get_connnection(self):
25 config = {
26 "host": self.host,
27 "port": self.port,
28 "user": self.user,
29 "password": self.password
30 }
31 if self.ssl is True:
32 config['client_flags'] = [mysql.connector.constants.ClientFlag.SSL]
33 if self.ssl_ca is not None:
34 config["ssl_ca"] = self.ssl_ca
35 if self.ssl_cert is not None:
36 config["ssl_cert"] = self.ssl_cert
37 if self.ssl_key is not None:
38 config["ssl_key"] = self.ssl_key
39 return mysql.connector.connect(**config)
40
41 def check_connection(self):
42 try:
43 con = self._get_connnection()
44 with closing(con) as con:
45 connected = con.is_connected()
46 except Exception:
47 connected = False
48 return connected
49
50
51 class MySQL(Integration, MySQLConnectionChecker):
52 def __init__(self, config, name, db_info):
53 super().__init__(config, name)
54 self.user = db_info.get('user')
55 self.password = db_info.get('password')
56 self.host = db_info.get('host')
57 self.port = db_info.get('port')
58 self.ssl = db_info.get('ssl')
59 self.ssl_ca = db_info.get('ssl_ca')
60 self.ssl_cert = db_info.get('ssl_cert')
61 self.ssl_key = db_info.get('ssl_key')
62
63 def _to_mysql_table(self, dtype_dict, predicted_cols, columns):
64 subtype_map = {
65 dtype.integer: 'int',
66 dtype.float: 'double',
67 dtype.binary: 'bool',
68 dtype.date: 'Date',
69 dtype.datetime: 'Datetime',
70 dtype.binary: 'VARCHAR(500)',
71 dtype.categorical: 'VARCHAR(500)',
72 dtype.tags: 'VARCHAR(500)',
73 dtype.image: 'VARCHAR(500)',
74 dtype.video: 'VARCHAR(500)',
75 dtype.audio: 'VARCHAR(500)',
76 dtype.short_text: 'VARCHAR(500)',
77 dtype.rich_text: 'VARCHAR(500)',
78 dtype.array: 'VARCHAR(500)'
79 }
80
81 column_declaration = []
82 for name in columns:
83 try:
84 col_subtype = dtype_dict[name]
85 new_type = subtype_map[col_subtype]
86 column_declaration.append(f' `{name}` {new_type} ')
87 if name in predicted_cols:
88 column_declaration.append(f' `{name}_original` {new_type} ')
89 except Exception as e:
90 log.error(f'Error: can not determine mysql data type for column {name}: {e}')
91
92 return column_declaration
93
94 def _escape_table_name(self, name):
95 return '`' + name.replace('`', '``') + '`'
96
97 def _query(self, query):
98 con = self._get_connnection()
99 with closing(con) as con:
100 cur = con.cursor(dictionary=True, buffered=True)
101 cur.execute(query)
102 res = True
103 try:
104 res = cur.fetchall()
105 except Exception:
106 pass
107 con.commit()
108
109 return res
110
111 def _get_connect_string(self, table):
112 user = f"{self.config['api']['mysql']['user']}_{self.name}"
113 password = self.config['api']['mysql']['password']
114 host = self.config['api']['mysql']['host']
115 port = self.config['api']['mysql']['port']
116
117 if password is None or password == '':
118 connect = f'mysql://{user}@{host}:{port}/mindsdb/{table}'
119 else:
120 connect = f'mysql://{user}:{password}@{host}:{port}/mindsdb/{table}'
121
122 return connect
123
124 def setup(self):
125 self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')
126 self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')
127
128 connect = self._get_connect_string('predictors')
129
130 q = f"""
131 CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (
132 name VARCHAR(500),
133 status VARCHAR(500),
134 accuracy VARCHAR(500),
135 predict VARCHAR(500),
136 select_data_query VARCHAR(500),
137 external_datasource VARCHAR(500),
138 training_options VARCHAR(500),
139 key name_key (name)
140 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';
141 """
142 self._query(q)
143
144 connect = self._get_connect_string('commands')
145
146 q = f"""
147 CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (
148 command VARCHAR(500),
149 key command_key (command)
150 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';
151 """
152 self._query(q)
153
154 def register_predictors(self, model_data_arr):
155 for model_meta in model_data_arr:
156 name = model_meta['name']
157 predict = model_meta['predict']
158 if not isinstance(predict, list):
159 predict = [predict]
160 columns_sql = ','.join(self._to_mysql_table(
161 model_meta['dtype_dict'],
162 predict,
163 list(model_meta['dtype_dict'].keys())
164 ))
165 columns_sql += ',`when_data` varchar(500)'
166 columns_sql += ',`select_data_query` varchar(500)'
167 columns_sql += ',`external_datasource` varchar(500)'
168 for col in predict:
169 columns_sql += f',`{col}_confidence` double'
170 if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):
171 columns_sql += f',`{col}_min` double'
172 columns_sql += f',`{col}_max` double'
173 columns_sql += f',`{col}_explain` varchar(500)'
174
175 connect = self._get_connect_string(name)
176
177 self.unregister_predictor(name)
178 q = f"""
179 CREATE TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (
180 {columns_sql},
181 index when_data_index (when_data),
182 index select_data_query_index (select_data_query),
183 index external_datasource_index (external_datasource)
184 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';
185 """
186 self._query(q)
187
188 def unregister_predictor(self, name):
189 q = f"""
190 drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};
191 """
192 self._query(q)
193
194 def get_row_count(self, query):
195 q = f"""
196 SELECT COUNT(*) as count
197 FROM ({query}) as query;"""
198 result = self._query(q)
199 return result[0]['count']
200
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mindsdb/integrations/mysql/mysql.py b/mindsdb/integrations/mysql/mysql.py
--- a/mindsdb/integrations/mysql/mysql.py
+++ b/mindsdb/integrations/mysql/mysql.py
@@ -197,3 +197,10 @@
FROM ({query}) as query;"""
result = self._query(q)
return result[0]['count']
+
+ def get_tables_list(self):
+ q= f"""
+ SHOW TABLES;
+ """
+ result = self._query(q)
+ return result
| {"golden_diff": "diff --git a/mindsdb/integrations/mysql/mysql.py b/mindsdb/integrations/mysql/mysql.py\n--- a/mindsdb/integrations/mysql/mysql.py\n+++ b/mindsdb/integrations/mysql/mysql.py\n@@ -197,3 +197,10 @@\n FROM ({query}) as query;\"\"\"\n result = self._query(q)\n return result[0]['count']\n+ \n+ def get_tables_list(self):\n+ q= f\"\"\"\n+ SHOW TABLES;\n+ \"\"\"\n+ result = self._query(q)\n+ return result\n", "issue": "Add option to list tables in MySQL integration :bookmark_tabs: \nWhen users create a connection to the MySQL database it will be useful to show them tips with a list of tables. To be able to do this we need a new method `get_tables_list` implemented in the MySQL integration class.\r\n\r\n## Steps :male_detective: :female_detective: \r\n\r\n- Frok MindsDB repo\r\n- Add new implementation in https://github.com/mindsdb/mindsdb/blob/stable/mindsdb/integrations/mysql/mysql.py#L51\r\n- Make a PR to staging branch\r\n\r\n## Additional rewards :1st_place_medal: \r\n\r\nEach code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/\n", "before_files": [{"content": "import os\nimport shutil\nimport tempfile\n\nfrom contextlib import closing\nimport mysql.connector\n\nfrom lightwood.api import dtype\nfrom mindsdb.integrations.base import Integration\nfrom mindsdb.utilities.log import log\n\n\nclass MySQLConnectionChecker:\n def __init__(self, **kwargs):\n self.host = kwargs.get('host')\n self.port = kwargs.get('port')\n self.user = kwargs.get('user')\n self.password = kwargs.get('password')\n self.ssl = kwargs.get('ssl')\n self.ssl_ca = kwargs.get('ssl_ca')\n self.ssl_cert = kwargs.get('ssl_cert')\n self.ssl_key = kwargs.get('ssl_key')\n\n def _get_connnection(self):\n config = {\n \"host\": self.host,\n \"port\": self.port,\n \"user\": self.user,\n \"password\": self.password\n }\n if self.ssl is True:\n config['client_flags'] = [mysql.connector.constants.ClientFlag.SSL]\n if self.ssl_ca is not None:\n config[\"ssl_ca\"] = self.ssl_ca\n if self.ssl_cert is not None:\n config[\"ssl_cert\"] = self.ssl_cert\n if self.ssl_key is not None:\n config[\"ssl_key\"] = self.ssl_key\n return mysql.connector.connect(**config)\n\n def check_connection(self):\n try:\n con = self._get_connnection()\n with closing(con) as con:\n connected = con.is_connected()\n except Exception:\n connected = False\n return connected\n\n\nclass MySQL(Integration, MySQLConnectionChecker):\n def __init__(self, config, name, db_info):\n super().__init__(config, name)\n self.user = db_info.get('user')\n self.password = db_info.get('password')\n self.host = db_info.get('host')\n self.port = db_info.get('port')\n self.ssl = db_info.get('ssl')\n self.ssl_ca = db_info.get('ssl_ca')\n self.ssl_cert = db_info.get('ssl_cert')\n self.ssl_key = db_info.get('ssl_key')\n\n def _to_mysql_table(self, dtype_dict, predicted_cols, columns):\n subtype_map = {\n dtype.integer: 'int',\n dtype.float: 'double',\n dtype.binary: 'bool',\n dtype.date: 'Date',\n dtype.datetime: 'Datetime',\n dtype.binary: 'VARCHAR(500)',\n dtype.categorical: 'VARCHAR(500)',\n dtype.tags: 'VARCHAR(500)',\n dtype.image: 'VARCHAR(500)',\n dtype.video: 'VARCHAR(500)',\n dtype.audio: 'VARCHAR(500)',\n dtype.short_text: 'VARCHAR(500)',\n dtype.rich_text: 'VARCHAR(500)',\n dtype.array: 'VARCHAR(500)'\n }\n\n column_declaration = []\n for name in columns:\n try:\n col_subtype = dtype_dict[name]\n new_type = subtype_map[col_subtype]\n column_declaration.append(f' `{name}` {new_type} ')\n if name in predicted_cols:\n column_declaration.append(f' `{name}_original` {new_type} ')\n except Exception as e:\n log.error(f'Error: can not determine mysql data type for column {name}: {e}')\n\n return column_declaration\n\n def _escape_table_name(self, name):\n return '`' + name.replace('`', '``') + '`'\n\n def _query(self, query):\n con = self._get_connnection()\n with closing(con) as con:\n cur = con.cursor(dictionary=True, buffered=True)\n cur.execute(query)\n res = True\n try:\n res = cur.fetchall()\n except Exception:\n pass\n con.commit()\n\n return res\n\n def _get_connect_string(self, table):\n user = f\"{self.config['api']['mysql']['user']}_{self.name}\"\n password = self.config['api']['mysql']['password']\n host = self.config['api']['mysql']['host']\n port = self.config['api']['mysql']['port']\n\n if password is None or password == '':\n connect = f'mysql://{user}@{host}:{port}/mindsdb/{table}'\n else:\n connect = f'mysql://{user}:{password}@{host}:{port}/mindsdb/{table}'\n\n return connect\n\n def setup(self):\n self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')\n self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')\n\n connect = self._get_connect_string('predictors')\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (\n name VARCHAR(500),\n status VARCHAR(500),\n accuracy VARCHAR(500),\n predict VARCHAR(500),\n select_data_query VARCHAR(500),\n external_datasource VARCHAR(500),\n training_options VARCHAR(500),\n key name_key (name)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n connect = self._get_connect_string('commands')\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (\n command VARCHAR(500),\n key command_key (command)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n def register_predictors(self, model_data_arr):\n for model_meta in model_data_arr:\n name = model_meta['name']\n predict = model_meta['predict']\n if not isinstance(predict, list):\n predict = [predict]\n columns_sql = ','.join(self._to_mysql_table(\n model_meta['dtype_dict'],\n predict,\n list(model_meta['dtype_dict'].keys())\n ))\n columns_sql += ',`when_data` varchar(500)'\n columns_sql += ',`select_data_query` varchar(500)'\n columns_sql += ',`external_datasource` varchar(500)'\n for col in predict:\n columns_sql += f',`{col}_confidence` double'\n if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):\n columns_sql += f',`{col}_min` double'\n columns_sql += f',`{col}_max` double'\n columns_sql += f',`{col}_explain` varchar(500)'\n\n connect = self._get_connect_string(name)\n\n self.unregister_predictor(name)\n q = f\"\"\"\n CREATE TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (\n {columns_sql},\n index when_data_index (when_data),\n index select_data_query_index (select_data_query),\n index external_datasource_index (external_datasource)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n def unregister_predictor(self, name):\n q = f\"\"\"\n drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};\n \"\"\"\n self._query(q)\n\n def get_row_count(self, query):\n q = f\"\"\" \n SELECT COUNT(*) as count\n FROM ({query}) as query;\"\"\"\n result = self._query(q)\n return result[0]['count']\n", "path": "mindsdb/integrations/mysql/mysql.py"}], "after_files": [{"content": "import os\nimport shutil\nimport tempfile\n\nfrom contextlib import closing\nimport mysql.connector\n\nfrom lightwood.api import dtype\nfrom mindsdb.integrations.base import Integration\nfrom mindsdb.utilities.log import log\n\n\nclass MySQLConnectionChecker:\n def __init__(self, **kwargs):\n self.host = kwargs.get('host')\n self.port = kwargs.get('port')\n self.user = kwargs.get('user')\n self.password = kwargs.get('password')\n self.ssl = kwargs.get('ssl')\n self.ssl_ca = kwargs.get('ssl_ca')\n self.ssl_cert = kwargs.get('ssl_cert')\n self.ssl_key = kwargs.get('ssl_key')\n\n def _get_connnection(self):\n config = {\n \"host\": self.host,\n \"port\": self.port,\n \"user\": self.user,\n \"password\": self.password\n }\n if self.ssl is True:\n config['client_flags'] = [mysql.connector.constants.ClientFlag.SSL]\n if self.ssl_ca is not None:\n config[\"ssl_ca\"] = self.ssl_ca\n if self.ssl_cert is not None:\n config[\"ssl_cert\"] = self.ssl_cert\n if self.ssl_key is not None:\n config[\"ssl_key\"] = self.ssl_key\n return mysql.connector.connect(**config)\n\n def check_connection(self):\n try:\n con = self._get_connnection()\n with closing(con) as con:\n connected = con.is_connected()\n except Exception:\n connected = False\n return connected\n\n\nclass MySQL(Integration, MySQLConnectionChecker):\n def __init__(self, config, name, db_info):\n super().__init__(config, name)\n self.user = db_info.get('user')\n self.password = db_info.get('password')\n self.host = db_info.get('host')\n self.port = db_info.get('port')\n self.ssl = db_info.get('ssl')\n self.ssl_ca = db_info.get('ssl_ca')\n self.ssl_cert = db_info.get('ssl_cert')\n self.ssl_key = db_info.get('ssl_key')\n\n def _to_mysql_table(self, dtype_dict, predicted_cols, columns):\n subtype_map = {\n dtype.integer: 'int',\n dtype.float: 'double',\n dtype.binary: 'bool',\n dtype.date: 'Date',\n dtype.datetime: 'Datetime',\n dtype.binary: 'VARCHAR(500)',\n dtype.categorical: 'VARCHAR(500)',\n dtype.tags: 'VARCHAR(500)',\n dtype.image: 'VARCHAR(500)',\n dtype.video: 'VARCHAR(500)',\n dtype.audio: 'VARCHAR(500)',\n dtype.short_text: 'VARCHAR(500)',\n dtype.rich_text: 'VARCHAR(500)',\n dtype.array: 'VARCHAR(500)'\n }\n\n column_declaration = []\n for name in columns:\n try:\n col_subtype = dtype_dict[name]\n new_type = subtype_map[col_subtype]\n column_declaration.append(f' `{name}` {new_type} ')\n if name in predicted_cols:\n column_declaration.append(f' `{name}_original` {new_type} ')\n except Exception as e:\n log.error(f'Error: can not determine mysql data type for column {name}: {e}')\n\n return column_declaration\n\n def _escape_table_name(self, name):\n return '`' + name.replace('`', '``') + '`'\n\n def _query(self, query):\n con = self._get_connnection()\n with closing(con) as con:\n cur = con.cursor(dictionary=True, buffered=True)\n cur.execute(query)\n res = True\n try:\n res = cur.fetchall()\n except Exception:\n pass\n con.commit()\n\n return res\n\n def _get_connect_string(self, table):\n user = f\"{self.config['api']['mysql']['user']}_{self.name}\"\n password = self.config['api']['mysql']['password']\n host = self.config['api']['mysql']['host']\n port = self.config['api']['mysql']['port']\n\n if password is None or password == '':\n connect = f'mysql://{user}@{host}:{port}/mindsdb/{table}'\n else:\n connect = f'mysql://{user}:{password}@{host}:{port}/mindsdb/{table}'\n\n return connect\n\n def setup(self):\n self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')\n self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')\n\n connect = self._get_connect_string('predictors')\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (\n name VARCHAR(500),\n status VARCHAR(500),\n accuracy VARCHAR(500),\n predict VARCHAR(500),\n select_data_query VARCHAR(500),\n external_datasource VARCHAR(500),\n training_options VARCHAR(500),\n key name_key (name)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n connect = self._get_connect_string('commands')\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (\n command VARCHAR(500),\n key command_key (command)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n def register_predictors(self, model_data_arr):\n for model_meta in model_data_arr:\n name = model_meta['name']\n predict = model_meta['predict']\n if not isinstance(predict, list):\n predict = [predict]\n columns_sql = ','.join(self._to_mysql_table(\n model_meta['dtype_dict'],\n predict,\n list(model_meta['dtype_dict'].keys())\n ))\n columns_sql += ',`when_data` varchar(500)'\n columns_sql += ',`select_data_query` varchar(500)'\n columns_sql += ',`external_datasource` varchar(500)'\n for col in predict:\n columns_sql += f',`{col}_confidence` double'\n if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):\n columns_sql += f',`{col}_min` double'\n columns_sql += f',`{col}_max` double'\n columns_sql += f',`{col}_explain` varchar(500)'\n\n connect = self._get_connect_string(name)\n\n self.unregister_predictor(name)\n q = f\"\"\"\n CREATE TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (\n {columns_sql},\n index when_data_index (when_data),\n index select_data_query_index (select_data_query),\n index external_datasource_index (external_datasource)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n def unregister_predictor(self, name):\n q = f\"\"\"\n drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};\n \"\"\"\n self._query(q)\n\n def get_row_count(self, query):\n q = f\"\"\" \n SELECT COUNT(*) as count\n FROM ({query}) as query;\"\"\"\n result = self._query(q)\n return result[0]['count']\n \n def get_tables_list(self):\n q= f\"\"\"\n SHOW TABLES;\n \"\"\"\n result = self._query(q)\n return result\n", "path": "mindsdb/integrations/mysql/mysql.py"}]} | 2,523 | 127 |
gh_patches_debug_4148 | rasdani/github-patches | git_diff | psf__black-4128 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changes to files happing quickly after running `black` are not detected
**Describe the bug**
It seems `black` does not detect that a file has been changed and needs to be re-checked, if the change happens quickly after `black` has been run on the file.
I'm talking about this feature: [Ignoring unmodified files](https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#ignoring-unmodified-files)
**To Reproduce**
I'm using the following shell script to reproduce the issue:
```bash
#! /usr/bin/env bash
set -eux
echo 'print (1)' > file.py
black file.py
echo 'print ()' > file.py
black file.py
cat file.py
```
The script writes overwrites the file and then runs `black`, doing this twice in a row. In the second run, `black` should reformat `print ()` to `print()`, but this does not happen:
```
$ ./bug.sh
+ echo 'print (1)'
+ black file.py
reformatted file.py
All done! ✨ 🍰 ✨
1 file reformatted.
+ echo 'print ()'
+ black file.py
All done! ✨ 🍰 ✨
1 file left unchanged.
+ cat file.py
print ()
```
Even running it manually after a few seconds does not fix the issue, but removing the cache directory does:
```
$ black file.py
All done! ✨ 🍰 ✨
1 file left unchanged.
$ cat file.py
print ()
$ rm -r ~/Library/Caches/black/
$ black file.py
reformatted file.py
All done! ✨ 🍰 ✨
1 file reformatted.
$ cat file.py
print()
```
**Expected behavior**
I think black should not get confused by changes to files that happen quickly after it has formatted a file. The file should be checked again if it is possible that its content has changed without also changing its timestamp.
**Environment**
- Black's version:
```
$ black --version
black, 23.12.0 (compiled: yes)
Python (CPython) 3.11.6
```
- OS and Python version: macOS 12.7.1, Python 3.11.6 installed via Homebrew.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/black/cache.py`
Content:
```
1 """Caching of formatted files with feature-based invalidation."""
2
3 import hashlib
4 import os
5 import pickle
6 import sys
7 import tempfile
8 from dataclasses import dataclass, field
9 from pathlib import Path
10 from typing import Dict, Iterable, NamedTuple, Set, Tuple
11
12 from platformdirs import user_cache_dir
13
14 from _black_version import version as __version__
15 from black.mode import Mode
16
17 if sys.version_info >= (3, 11):
18 from typing import Self
19 else:
20 from typing_extensions import Self
21
22
23 class FileData(NamedTuple):
24 st_mtime: float
25 st_size: int
26 hash: str
27
28
29 def get_cache_dir() -> Path:
30 """Get the cache directory used by black.
31
32 Users can customize this directory on all systems using `BLACK_CACHE_DIR`
33 environment variable. By default, the cache directory is the user cache directory
34 under the black application.
35
36 This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid
37 repeated calls.
38 """
39 # NOTE: Function mostly exists as a clean way to test getting the cache directory.
40 default_cache_dir = user_cache_dir("black")
41 cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir))
42 cache_dir = cache_dir / __version__
43 return cache_dir
44
45
46 CACHE_DIR = get_cache_dir()
47
48
49 def get_cache_file(mode: Mode) -> Path:
50 return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
51
52
53 @dataclass
54 class Cache:
55 mode: Mode
56 cache_file: Path
57 file_data: Dict[str, FileData] = field(default_factory=dict)
58
59 @classmethod
60 def read(cls, mode: Mode) -> Self:
61 """Read the cache if it exists and is well formed.
62
63 If it is not well formed, the call to write later should
64 resolve the issue.
65 """
66 cache_file = get_cache_file(mode)
67 if not cache_file.exists():
68 return cls(mode, cache_file)
69
70 with cache_file.open("rb") as fobj:
71 try:
72 data: Dict[str, Tuple[float, int, str]] = pickle.load(fobj)
73 file_data = {k: FileData(*v) for k, v in data.items()}
74 except (pickle.UnpicklingError, ValueError, IndexError):
75 return cls(mode, cache_file)
76
77 return cls(mode, cache_file, file_data)
78
79 @staticmethod
80 def hash_digest(path: Path) -> str:
81 """Return hash digest for path."""
82
83 data = path.read_bytes()
84 return hashlib.sha256(data).hexdigest()
85
86 @staticmethod
87 def get_file_data(path: Path) -> FileData:
88 """Return file data for path."""
89
90 stat = path.stat()
91 hash = Cache.hash_digest(path)
92 return FileData(stat.st_mtime, stat.st_size, hash)
93
94 def is_changed(self, source: Path) -> bool:
95 """Check if source has changed compared to cached version."""
96 res_src = source.resolve()
97 old = self.file_data.get(str(res_src))
98 if old is None:
99 return True
100
101 st = res_src.stat()
102 if st.st_size != old.st_size:
103 return True
104 if int(st.st_mtime) != int(old.st_mtime):
105 new_hash = Cache.hash_digest(res_src)
106 if new_hash != old.hash:
107 return True
108 return False
109
110 def filtered_cached(self, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
111 """Split an iterable of paths in `sources` into two sets.
112
113 The first contains paths of files that modified on disk or are not in the
114 cache. The other contains paths to non-modified files.
115 """
116 changed: Set[Path] = set()
117 done: Set[Path] = set()
118 for src in sources:
119 if self.is_changed(src):
120 changed.add(src)
121 else:
122 done.add(src)
123 return changed, done
124
125 def write(self, sources: Iterable[Path]) -> None:
126 """Update the cache file data and write a new cache file."""
127 self.file_data.update(
128 **{str(src.resolve()): Cache.get_file_data(src) for src in sources}
129 )
130 try:
131 CACHE_DIR.mkdir(parents=True, exist_ok=True)
132 with tempfile.NamedTemporaryFile(
133 dir=str(self.cache_file.parent), delete=False
134 ) as f:
135 # We store raw tuples in the cache because pickling NamedTuples
136 # doesn't work with mypyc on Python 3.8, and because it's faster.
137 data: Dict[str, Tuple[float, int, str]] = {
138 k: (*v,) for k, v in self.file_data.items()
139 }
140 pickle.dump(data, f, protocol=4)
141 os.replace(f.name, self.cache_file)
142 except OSError:
143 pass
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/black/cache.py b/src/black/cache.py
--- a/src/black/cache.py
+++ b/src/black/cache.py
@@ -101,7 +101,7 @@
st = res_src.stat()
if st.st_size != old.st_size:
return True
- if int(st.st_mtime) != int(old.st_mtime):
+ if st.st_mtime != old.st_mtime:
new_hash = Cache.hash_digest(res_src)
if new_hash != old.hash:
return True
| {"golden_diff": "diff --git a/src/black/cache.py b/src/black/cache.py\n--- a/src/black/cache.py\n+++ b/src/black/cache.py\n@@ -101,7 +101,7 @@\n st = res_src.stat()\n if st.st_size != old.st_size:\n return True\n- if int(st.st_mtime) != int(old.st_mtime):\n+ if st.st_mtime != old.st_mtime:\n new_hash = Cache.hash_digest(res_src)\n if new_hash != old.hash:\n return True\n", "issue": "Changes to files happing quickly after running `black` are not detected\n**Describe the bug**\r\n\r\nIt seems `black` does not detect that a file has been changed and needs to be re-checked, if the change happens quickly after `black` has been run on the file.\r\n\r\nI'm talking about this feature: [Ignoring unmodified files](https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#ignoring-unmodified-files)\r\n\r\n**To Reproduce**\r\n\r\nI'm using the following shell script to reproduce the issue:\r\n\r\n```bash\r\n#! /usr/bin/env bash\r\n\r\nset -eux\r\n\r\necho 'print (1)' > file.py\r\nblack file.py\r\necho 'print ()' > file.py\r\nblack file.py\r\ncat file.py\r\n```\r\n\r\nThe script writes overwrites the file and then runs `black`, doing this twice in a row. In the second run, `black` should reformat `print ()` to `print()`, but this does not happen:\r\n\r\n```\r\n$ ./bug.sh \r\n+ echo 'print (1)'\r\n+ black file.py\r\nreformatted file.py\r\n\r\nAll done! \u2728 \ud83c\udf70 \u2728\r\n1 file reformatted.\r\n+ echo 'print ()'\r\n+ black file.py\r\nAll done! \u2728 \ud83c\udf70 \u2728\r\n1 file left unchanged.\r\n+ cat file.py\r\nprint ()\r\n```\r\n\r\nEven running it manually after a few seconds does not fix the issue, but removing the cache directory does:\r\n\r\n```\r\n$ black file.py\r\nAll done! \u2728 \ud83c\udf70 \u2728\r\n1 file left unchanged.\r\n$ cat file.py\r\nprint ()\r\n$ rm -r ~/Library/Caches/black/\r\n$ black file.py\r\nreformatted file.py\r\n\r\nAll done! \u2728 \ud83c\udf70 \u2728\r\n1 file reformatted.\r\n$ cat file.py\r\nprint()\r\n```\r\n\r\n**Expected behavior**\r\n\r\nI think black should not get confused by changes to files that happen quickly after it has formatted a file. The file should be checked again if it is possible that its content has changed without also changing its timestamp.\r\n\r\n**Environment**\r\n\r\n- Black's version:\r\n\r\n ```\r\n $ black --version\r\n black, 23.12.0 (compiled: yes)\r\n Python (CPython) 3.11.6\r\n ```\r\n\r\n- OS and Python version: macOS 12.7.1, Python 3.11.6 installed via Homebrew.\n", "before_files": [{"content": "\"\"\"Caching of formatted files with feature-based invalidation.\"\"\"\n\nimport hashlib\nimport os\nimport pickle\nimport sys\nimport tempfile\nfrom dataclasses import dataclass, field\nfrom pathlib import Path\nfrom typing import Dict, Iterable, NamedTuple, Set, Tuple\n\nfrom platformdirs import user_cache_dir\n\nfrom _black_version import version as __version__\nfrom black.mode import Mode\n\nif sys.version_info >= (3, 11):\n from typing import Self\nelse:\n from typing_extensions import Self\n\n\nclass FileData(NamedTuple):\n st_mtime: float\n st_size: int\n hash: str\n\n\ndef get_cache_dir() -> Path:\n \"\"\"Get the cache directory used by black.\n\n Users can customize this directory on all systems using `BLACK_CACHE_DIR`\n environment variable. By default, the cache directory is the user cache directory\n under the black application.\n\n This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid\n repeated calls.\n \"\"\"\n # NOTE: Function mostly exists as a clean way to test getting the cache directory.\n default_cache_dir = user_cache_dir(\"black\")\n cache_dir = Path(os.environ.get(\"BLACK_CACHE_DIR\", default_cache_dir))\n cache_dir = cache_dir / __version__\n return cache_dir\n\n\nCACHE_DIR = get_cache_dir()\n\n\ndef get_cache_file(mode: Mode) -> Path:\n return CACHE_DIR / f\"cache.{mode.get_cache_key()}.pickle\"\n\n\n@dataclass\nclass Cache:\n mode: Mode\n cache_file: Path\n file_data: Dict[str, FileData] = field(default_factory=dict)\n\n @classmethod\n def read(cls, mode: Mode) -> Self:\n \"\"\"Read the cache if it exists and is well formed.\n\n If it is not well formed, the call to write later should\n resolve the issue.\n \"\"\"\n cache_file = get_cache_file(mode)\n if not cache_file.exists():\n return cls(mode, cache_file)\n\n with cache_file.open(\"rb\") as fobj:\n try:\n data: Dict[str, Tuple[float, int, str]] = pickle.load(fobj)\n file_data = {k: FileData(*v) for k, v in data.items()}\n except (pickle.UnpicklingError, ValueError, IndexError):\n return cls(mode, cache_file)\n\n return cls(mode, cache_file, file_data)\n\n @staticmethod\n def hash_digest(path: Path) -> str:\n \"\"\"Return hash digest for path.\"\"\"\n\n data = path.read_bytes()\n return hashlib.sha256(data).hexdigest()\n\n @staticmethod\n def get_file_data(path: Path) -> FileData:\n \"\"\"Return file data for path.\"\"\"\n\n stat = path.stat()\n hash = Cache.hash_digest(path)\n return FileData(stat.st_mtime, stat.st_size, hash)\n\n def is_changed(self, source: Path) -> bool:\n \"\"\"Check if source has changed compared to cached version.\"\"\"\n res_src = source.resolve()\n old = self.file_data.get(str(res_src))\n if old is None:\n return True\n\n st = res_src.stat()\n if st.st_size != old.st_size:\n return True\n if int(st.st_mtime) != int(old.st_mtime):\n new_hash = Cache.hash_digest(res_src)\n if new_hash != old.hash:\n return True\n return False\n\n def filtered_cached(self, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:\n \"\"\"Split an iterable of paths in `sources` into two sets.\n\n The first contains paths of files that modified on disk or are not in the\n cache. The other contains paths to non-modified files.\n \"\"\"\n changed: Set[Path] = set()\n done: Set[Path] = set()\n for src in sources:\n if self.is_changed(src):\n changed.add(src)\n else:\n done.add(src)\n return changed, done\n\n def write(self, sources: Iterable[Path]) -> None:\n \"\"\"Update the cache file data and write a new cache file.\"\"\"\n self.file_data.update(\n **{str(src.resolve()): Cache.get_file_data(src) for src in sources}\n )\n try:\n CACHE_DIR.mkdir(parents=True, exist_ok=True)\n with tempfile.NamedTemporaryFile(\n dir=str(self.cache_file.parent), delete=False\n ) as f:\n # We store raw tuples in the cache because pickling NamedTuples\n # doesn't work with mypyc on Python 3.8, and because it's faster.\n data: Dict[str, Tuple[float, int, str]] = {\n k: (*v,) for k, v in self.file_data.items()\n }\n pickle.dump(data, f, protocol=4)\n os.replace(f.name, self.cache_file)\n except OSError:\n pass\n", "path": "src/black/cache.py"}], "after_files": [{"content": "\"\"\"Caching of formatted files with feature-based invalidation.\"\"\"\n\nimport hashlib\nimport os\nimport pickle\nimport sys\nimport tempfile\nfrom dataclasses import dataclass, field\nfrom pathlib import Path\nfrom typing import Dict, Iterable, NamedTuple, Set, Tuple\n\nfrom platformdirs import user_cache_dir\n\nfrom _black_version import version as __version__\nfrom black.mode import Mode\n\nif sys.version_info >= (3, 11):\n from typing import Self\nelse:\n from typing_extensions import Self\n\n\nclass FileData(NamedTuple):\n st_mtime: float\n st_size: int\n hash: str\n\n\ndef get_cache_dir() -> Path:\n \"\"\"Get the cache directory used by black.\n\n Users can customize this directory on all systems using `BLACK_CACHE_DIR`\n environment variable. By default, the cache directory is the user cache directory\n under the black application.\n\n This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid\n repeated calls.\n \"\"\"\n # NOTE: Function mostly exists as a clean way to test getting the cache directory.\n default_cache_dir = user_cache_dir(\"black\")\n cache_dir = Path(os.environ.get(\"BLACK_CACHE_DIR\", default_cache_dir))\n cache_dir = cache_dir / __version__\n return cache_dir\n\n\nCACHE_DIR = get_cache_dir()\n\n\ndef get_cache_file(mode: Mode) -> Path:\n return CACHE_DIR / f\"cache.{mode.get_cache_key()}.pickle\"\n\n\n@dataclass\nclass Cache:\n mode: Mode\n cache_file: Path\n file_data: Dict[str, FileData] = field(default_factory=dict)\n\n @classmethod\n def read(cls, mode: Mode) -> Self:\n \"\"\"Read the cache if it exists and is well formed.\n\n If it is not well formed, the call to write later should\n resolve the issue.\n \"\"\"\n cache_file = get_cache_file(mode)\n if not cache_file.exists():\n return cls(mode, cache_file)\n\n with cache_file.open(\"rb\") as fobj:\n try:\n data: Dict[str, Tuple[float, int, str]] = pickle.load(fobj)\n file_data = {k: FileData(*v) for k, v in data.items()}\n except (pickle.UnpicklingError, ValueError, IndexError):\n return cls(mode, cache_file)\n\n return cls(mode, cache_file, file_data)\n\n @staticmethod\n def hash_digest(path: Path) -> str:\n \"\"\"Return hash digest for path.\"\"\"\n\n data = path.read_bytes()\n return hashlib.sha256(data).hexdigest()\n\n @staticmethod\n def get_file_data(path: Path) -> FileData:\n \"\"\"Return file data for path.\"\"\"\n\n stat = path.stat()\n hash = Cache.hash_digest(path)\n return FileData(stat.st_mtime, stat.st_size, hash)\n\n def is_changed(self, source: Path) -> bool:\n \"\"\"Check if source has changed compared to cached version.\"\"\"\n res_src = source.resolve()\n old = self.file_data.get(str(res_src))\n if old is None:\n return True\n\n st = res_src.stat()\n if st.st_size != old.st_size:\n return True\n if st.st_mtime != old.st_mtime:\n new_hash = Cache.hash_digest(res_src)\n if new_hash != old.hash:\n return True\n return False\n\n def filtered_cached(self, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:\n \"\"\"Split an iterable of paths in `sources` into two sets.\n\n The first contains paths of files that modified on disk or are not in the\n cache. The other contains paths to non-modified files.\n \"\"\"\n changed: Set[Path] = set()\n done: Set[Path] = set()\n for src in sources:\n if self.is_changed(src):\n changed.add(src)\n else:\n done.add(src)\n return changed, done\n\n def write(self, sources: Iterable[Path]) -> None:\n \"\"\"Update the cache file data and write a new cache file.\"\"\"\n self.file_data.update(\n **{str(src.resolve()): Cache.get_file_data(src) for src in sources}\n )\n try:\n CACHE_DIR.mkdir(parents=True, exist_ok=True)\n with tempfile.NamedTemporaryFile(\n dir=str(self.cache_file.parent), delete=False\n ) as f:\n # We store raw tuples in the cache because pickling NamedTuples\n # doesn't work with mypyc on Python 3.8, and because it's faster.\n data: Dict[str, Tuple[float, int, str]] = {\n k: (*v,) for k, v in self.file_data.items()\n }\n pickle.dump(data, f, protocol=4)\n os.replace(f.name, self.cache_file)\n except OSError:\n pass\n", "path": "src/black/cache.py"}]} | 2,138 | 110 |
gh_patches_debug_2180 | rasdani/github-patches | git_diff | GPflow__GPflow-2052 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing Reference to Manipulating Kernels Page
# Documentation/tutorial notebooks
In the [Kernel Design page](https://gpflow.github.io/GPflow/2.7.0/notebooks/tailor/kernel_design.html), there is a missing reference to the "Manipulating Kernels" notebook at the end. This notebook seems accessible from the old documentations, up to [2.6.4](https://gpflow.github.io/GPflow/2.6.4/notebooks/advanced/kernels.html).
It seems for some reason, this page was removed. Maybe it was considered unnecessary, as some information is given in the getting started page but I disagree. I believe it gives a more comprehensive review of the available kernel implementations, so it would be nice to have it back.
As a side note, for some reason 2.6.4 documentation insist on having dark theme for me, but I like the light theme better. Is there an option to change this? I am forced to clean the cookies to get a light background.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/sphinx/notebooks/tailor/kernel_design.pct.py`
Content:
```
1 # ---
2 # jupyter:
3 # jupytext:
4 # formats: ipynb,.pct.py:percent
5 # text_representation:
6 # extension: .py
7 # format_name: percent
8 # format_version: '1.3'
9 # jupytext_version: 1.3.3
10 # kernelspec:
11 # display_name: Python 3
12 # language: python
13 # name: python3
14 # ---
15
16 # %% [markdown]
17 # # Kernel Design
18 #
19 # It's easy to make new kernels in GPflow. To demonstrate, we'll have a look at the Brownian motion kernel, whose function is
20 # \begin{equation}
21 # k(x, x') = \sigma^2 \text{min}(x, x')
22 # \end{equation}
23 # where $\sigma^2$ is a variance parameter.
24
25 # %%
26 import matplotlib.pyplot as plt
27 import numpy as np
28 import tensorflow as tf
29
30 import gpflow
31 from gpflow.utilities import positive, print_summary
32
33 plt.style.use("ggplot")
34 # %matplotlib inline
35
36 # %% [markdown]
37 # To make this new kernel class, we inherit from the base class `gpflow.kernels.Kernel` and implement the three functions below. **NOTE:** Depending on the kernel to be implemented, other classes can be more adequate. For example, if the kernel to be implemented is isotropic stationary, you can immediately subclass `gpflow.kernels.IsotropicStationary` (at which point you
38 # only have to override `K_r` or `K_r2`; see the `IsotropicStationary` class docstring). Stationary but anisotropic kernels should subclass `gpflow.kernels.AnisotropicStationary` and override `K_d`.
39 #
40 # #### `__init__`
41 # In this simple example, the constructor takes no argument (though it could, if that was convenient, for example to pass in an initial value for `variance`). It *must* call the constructor of the superclass with appropriate arguments. Brownian motion is only defined in one dimension, and we'll assume that the `active_dims` are `[0]`, for simplicity.
42 #
43 # We've added a parameter to the kernel using the `Parameter` class. Using this class lets the parameter be used in computing the kernel function, and it will automatically be recognised for optimization (or MCMC). Here, the variance parameter is initialized at 1, and constrained to be positive.
44 #
45 # #### `K`
46 # This is where you implement the kernel function itself. This takes two arguments, `X` and `X2`. By convention, we make the second argument optional (it defaults to `None`).
47 #
48 # Inside `K`, all the computation must be done with TensorFlow - here we've used `tf.minimum`. When GPflow executes the `K` function, `X` and `X2` will be TensorFlow tensors, and parameters such as `self.variance` behave like TensorFlow tensors as well.
49 #
50 # #### `K_diag`
51 # This convenience function allows GPflow to save memory at predict time. It's simply the diagonal of the `K` function, in the case where `X2` is `None`. It must return a one-dimensional vector, so we use TensorFlow's reshape command.
52
53 # %%
54 class Brownian(gpflow.kernels.Kernel):
55 def __init__(self):
56 super().__init__(active_dims=[0])
57 self.variance = gpflow.Parameter(1.0, transform=positive())
58
59 def K(self, X, X2=None):
60 if X2 is None:
61 X2 = X
62 return self.variance * tf.minimum(
63 X, tf.transpose(X2)
64 ) # this returns a 2D tensor
65
66 def K_diag(self, X):
67 return self.variance * tf.reshape(X, (-1,)) # this returns a 1D tensor
68
69
70 k_brownian = Brownian()
71 print_summary(k_brownian, fmt="notebook")
72
73 # %% [markdown]
74 # We can now evaluate our new kernel function and draw samples from a Gaussian process with this covariance:
75
76 # %%
77 np.random.seed(23) # for reproducibility
78
79
80 def plotkernelsample(k, ax, xmin=0, xmax=3):
81 xx = np.linspace(xmin, xmax, 300)[:, None]
82 K = k(xx)
83 ax.plot(xx, np.random.multivariate_normal(np.zeros(300), K, 5).T)
84 ax.set_title("Samples " + k.__class__.__name__)
85
86
87 def plotkernelfunction(k, ax, xmin=0, xmax=3, other=0):
88 xx = np.linspace(xmin, xmax, 100)[:, None]
89 ax.plot(xx, k(xx, np.zeros((1, 1)) + other))
90 ax.set_title(k.__class__.__name__ + " k(x, %.1f)" % other)
91
92
93 f, axes = plt.subplots(1, 2, figsize=(12, 4), sharex=True)
94 plotkernelfunction(k_brownian, axes[0], other=2.0)
95 plotkernelsample(k_brownian, axes[1])
96
97 # %% [markdown]
98 # ## Using the kernel in a model
99 #
100 # Because we've inherited from the `Kernel` base class, this new kernel has all the properties needed to be used in GPflow. It also has some convenience features such as allowing the user to call
101 #
102 # `k(X, X2)`
103 #
104 # which computes the kernel matrix.
105 #
106 # To show that this kernel works, let's use it inside GP regression. We'll see that Brownian motion has quite interesting properties. To add a little flexibility, we'll add a `Constant` kernel to our `Brownian` kernel, and the `GPR` class will handle the noise.
107
108 # %%
109 np.random.seed(42)
110 X = np.random.rand(5, 1)
111 Y = np.sin(X * 6) + np.random.randn(*X.shape) * 0.001
112
113 k1 = Brownian()
114 k2 = gpflow.kernels.Constant()
115 k = k1 + k2
116
117 m = gpflow.models.GPR((X, Y), kernel=k)
118 # m.likelihood.variance.assign(1e-6)
119
120 opt = gpflow.optimizers.Scipy()
121 opt.minimize(m.training_loss, variables=m.trainable_variables)
122 print_summary(m, fmt="notebook")
123
124 xx = np.linspace(0, 1.1, 100).reshape(100, 1)
125 mean, var = m.predict_y(xx)
126 plt.plot(X, Y, "kx", mew=2)
127 (line,) = plt.plot(xx, mean, lw=2)
128 _ = plt.fill_between(
129 xx[:, 0],
130 mean[:, 0] - 2 * np.sqrt(var[:, 0]),
131 mean[:, 0] + 2 * np.sqrt(var[:, 0]),
132 color=line.get_color(),
133 alpha=0.2,
134 )
135
136 # %% [markdown]
137 # ## See also
138 #
139 # For more details on how to manipulate existing kernels (or the one you just created!), we refer to the [Manipulating kernels](../advanced/kernels.ipynb) notebook.
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/sphinx/notebooks/tailor/kernel_design.pct.py b/doc/sphinx/notebooks/tailor/kernel_design.pct.py
--- a/doc/sphinx/notebooks/tailor/kernel_design.pct.py
+++ b/doc/sphinx/notebooks/tailor/kernel_design.pct.py
@@ -136,4 +136,4 @@
# %% [markdown]
# ## See also
#
-# For more details on how to manipulate existing kernels (or the one you just created!), we refer to the [Manipulating kernels](../advanced/kernels.ipynb) notebook.
+# For more details on how to manipulate existing kernels (or the one you just created!), please refer to the [kernels](../getting_started/kernels.ipynb) notebook.
| {"golden_diff": "diff --git a/doc/sphinx/notebooks/tailor/kernel_design.pct.py b/doc/sphinx/notebooks/tailor/kernel_design.pct.py\n--- a/doc/sphinx/notebooks/tailor/kernel_design.pct.py\n+++ b/doc/sphinx/notebooks/tailor/kernel_design.pct.py\n@@ -136,4 +136,4 @@\n # %% [markdown]\n # ## See also\n #\n-# For more details on how to manipulate existing kernels (or the one you just created!), we refer to the [Manipulating kernels](../advanced/kernels.ipynb) notebook.\n+# For more details on how to manipulate existing kernels (or the one you just created!), please refer to the [kernels](../getting_started/kernels.ipynb) notebook.\n", "issue": "Missing Reference to Manipulating Kernels Page\n# Documentation/tutorial notebooks\r\n\r\nIn the [Kernel Design page](https://gpflow.github.io/GPflow/2.7.0/notebooks/tailor/kernel_design.html), there is a missing reference to the \"Manipulating Kernels\" notebook at the end. This notebook seems accessible from the old documentations, up to [2.6.4](https://gpflow.github.io/GPflow/2.6.4/notebooks/advanced/kernels.html).\r\n\r\nIt seems for some reason, this page was removed. Maybe it was considered unnecessary, as some information is given in the getting started page but I disagree. I believe it gives a more comprehensive review of the available kernel implementations, so it would be nice to have it back.\r\n\r\nAs a side note, for some reason 2.6.4 documentation insist on having dark theme for me, but I like the light theme better. Is there an option to change this? I am forced to clean the cookies to get a light background.\n", "before_files": [{"content": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,.pct.py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.3.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Kernel Design\n#\n# It's easy to make new kernels in GPflow. To demonstrate, we'll have a look at the Brownian motion kernel, whose function is\n# \\begin{equation}\n# k(x, x') = \\sigma^2 \\text{min}(x, x')\n# \\end{equation}\n# where $\\sigma^2$ is a variance parameter.\n\n# %%\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nimport gpflow\nfrom gpflow.utilities import positive, print_summary\n\nplt.style.use(\"ggplot\")\n# %matplotlib inline\n\n# %% [markdown]\n# To make this new kernel class, we inherit from the base class `gpflow.kernels.Kernel` and implement the three functions below. **NOTE:** Depending on the kernel to be implemented, other classes can be more adequate. For example, if the kernel to be implemented is isotropic stationary, you can immediately subclass `gpflow.kernels.IsotropicStationary` (at which point you\n# only have to override `K_r` or `K_r2`; see the `IsotropicStationary` class docstring). Stationary but anisotropic kernels should subclass `gpflow.kernels.AnisotropicStationary` and override `K_d`.\n#\n# #### `__init__`\n# In this simple example, the constructor takes no argument (though it could, if that was convenient, for example to pass in an initial value for `variance`). It *must* call the constructor of the superclass with appropriate arguments. Brownian motion is only defined in one dimension, and we'll assume that the `active_dims` are `[0]`, for simplicity.\n#\n# We've added a parameter to the kernel using the `Parameter` class. Using this class lets the parameter be used in computing the kernel function, and it will automatically be recognised for optimization (or MCMC). Here, the variance parameter is initialized at 1, and constrained to be positive.\n#\n# #### `K`\n# This is where you implement the kernel function itself. This takes two arguments, `X` and `X2`. By convention, we make the second argument optional (it defaults to `None`).\n#\n# Inside `K`, all the computation must be done with TensorFlow - here we've used `tf.minimum`. When GPflow executes the `K` function, `X` and `X2` will be TensorFlow tensors, and parameters such as `self.variance` behave like TensorFlow tensors as well.\n#\n# #### `K_diag`\n# This convenience function allows GPflow to save memory at predict time. It's simply the diagonal of the `K` function, in the case where `X2` is `None`. It must return a one-dimensional vector, so we use TensorFlow's reshape command.\n\n# %%\nclass Brownian(gpflow.kernels.Kernel):\n def __init__(self):\n super().__init__(active_dims=[0])\n self.variance = gpflow.Parameter(1.0, transform=positive())\n\n def K(self, X, X2=None):\n if X2 is None:\n X2 = X\n return self.variance * tf.minimum(\n X, tf.transpose(X2)\n ) # this returns a 2D tensor\n\n def K_diag(self, X):\n return self.variance * tf.reshape(X, (-1,)) # this returns a 1D tensor\n\n\nk_brownian = Brownian()\nprint_summary(k_brownian, fmt=\"notebook\")\n\n# %% [markdown]\n# We can now evaluate our new kernel function and draw samples from a Gaussian process with this covariance:\n\n# %%\nnp.random.seed(23) # for reproducibility\n\n\ndef plotkernelsample(k, ax, xmin=0, xmax=3):\n xx = np.linspace(xmin, xmax, 300)[:, None]\n K = k(xx)\n ax.plot(xx, np.random.multivariate_normal(np.zeros(300), K, 5).T)\n ax.set_title(\"Samples \" + k.__class__.__name__)\n\n\ndef plotkernelfunction(k, ax, xmin=0, xmax=3, other=0):\n xx = np.linspace(xmin, xmax, 100)[:, None]\n ax.plot(xx, k(xx, np.zeros((1, 1)) + other))\n ax.set_title(k.__class__.__name__ + \" k(x, %.1f)\" % other)\n\n\nf, axes = plt.subplots(1, 2, figsize=(12, 4), sharex=True)\nplotkernelfunction(k_brownian, axes[0], other=2.0)\nplotkernelsample(k_brownian, axes[1])\n\n# %% [markdown]\n# ## Using the kernel in a model\n#\n# Because we've inherited from the `Kernel` base class, this new kernel has all the properties needed to be used in GPflow. It also has some convenience features such as allowing the user to call\n#\n# `k(X, X2)`\n#\n# which computes the kernel matrix.\n#\n# To show that this kernel works, let's use it inside GP regression. We'll see that Brownian motion has quite interesting properties. To add a little flexibility, we'll add a `Constant` kernel to our `Brownian` kernel, and the `GPR` class will handle the noise.\n\n# %%\nnp.random.seed(42)\nX = np.random.rand(5, 1)\nY = np.sin(X * 6) + np.random.randn(*X.shape) * 0.001\n\nk1 = Brownian()\nk2 = gpflow.kernels.Constant()\nk = k1 + k2\n\nm = gpflow.models.GPR((X, Y), kernel=k)\n# m.likelihood.variance.assign(1e-6)\n\nopt = gpflow.optimizers.Scipy()\nopt.minimize(m.training_loss, variables=m.trainable_variables)\nprint_summary(m, fmt=\"notebook\")\n\nxx = np.linspace(0, 1.1, 100).reshape(100, 1)\nmean, var = m.predict_y(xx)\nplt.plot(X, Y, \"kx\", mew=2)\n(line,) = plt.plot(xx, mean, lw=2)\n_ = plt.fill_between(\n xx[:, 0],\n mean[:, 0] - 2 * np.sqrt(var[:, 0]),\n mean[:, 0] + 2 * np.sqrt(var[:, 0]),\n color=line.get_color(),\n alpha=0.2,\n)\n\n# %% [markdown]\n# ## See also\n#\n# For more details on how to manipulate existing kernels (or the one you just created!), we refer to the [Manipulating kernels](../advanced/kernels.ipynb) notebook.\n", "path": "doc/sphinx/notebooks/tailor/kernel_design.pct.py"}], "after_files": [{"content": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,.pct.py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.3.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Kernel Design\n#\n# It's easy to make new kernels in GPflow. To demonstrate, we'll have a look at the Brownian motion kernel, whose function is\n# \\begin{equation}\n# k(x, x') = \\sigma^2 \\text{min}(x, x')\n# \\end{equation}\n# where $\\sigma^2$ is a variance parameter.\n\n# %%\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nimport gpflow\nfrom gpflow.utilities import positive, print_summary\n\nplt.style.use(\"ggplot\")\n# %matplotlib inline\n\n# %% [markdown]\n# To make this new kernel class, we inherit from the base class `gpflow.kernels.Kernel` and implement the three functions below. **NOTE:** Depending on the kernel to be implemented, other classes can be more adequate. For example, if the kernel to be implemented is isotropic stationary, you can immediately subclass `gpflow.kernels.IsotropicStationary` (at which point you\n# only have to override `K_r` or `K_r2`; see the `IsotropicStationary` class docstring). Stationary but anisotropic kernels should subclass `gpflow.kernels.AnisotropicStationary` and override `K_d`.\n#\n# #### `__init__`\n# In this simple example, the constructor takes no argument (though it could, if that was convenient, for example to pass in an initial value for `variance`). It *must* call the constructor of the superclass with appropriate arguments. Brownian motion is only defined in one dimension, and we'll assume that the `active_dims` are `[0]`, for simplicity.\n#\n# We've added a parameter to the kernel using the `Parameter` class. Using this class lets the parameter be used in computing the kernel function, and it will automatically be recognised for optimization (or MCMC). Here, the variance parameter is initialized at 1, and constrained to be positive.\n#\n# #### `K`\n# This is where you implement the kernel function itself. This takes two arguments, `X` and `X2`. By convention, we make the second argument optional (it defaults to `None`).\n#\n# Inside `K`, all the computation must be done with TensorFlow - here we've used `tf.minimum`. When GPflow executes the `K` function, `X` and `X2` will be TensorFlow tensors, and parameters such as `self.variance` behave like TensorFlow tensors as well.\n#\n# #### `K_diag`\n# This convenience function allows GPflow to save memory at predict time. It's simply the diagonal of the `K` function, in the case where `X2` is `None`. It must return a one-dimensional vector, so we use TensorFlow's reshape command.\n\n# %%\nclass Brownian(gpflow.kernels.Kernel):\n def __init__(self):\n super().__init__(active_dims=[0])\n self.variance = gpflow.Parameter(1.0, transform=positive())\n\n def K(self, X, X2=None):\n if X2 is None:\n X2 = X\n return self.variance * tf.minimum(\n X, tf.transpose(X2)\n ) # this returns a 2D tensor\n\n def K_diag(self, X):\n return self.variance * tf.reshape(X, (-1,)) # this returns a 1D tensor\n\n\nk_brownian = Brownian()\nprint_summary(k_brownian, fmt=\"notebook\")\n\n# %% [markdown]\n# We can now evaluate our new kernel function and draw samples from a Gaussian process with this covariance:\n\n# %%\nnp.random.seed(23) # for reproducibility\n\n\ndef plotkernelsample(k, ax, xmin=0, xmax=3):\n xx = np.linspace(xmin, xmax, 300)[:, None]\n K = k(xx)\n ax.plot(xx, np.random.multivariate_normal(np.zeros(300), K, 5).T)\n ax.set_title(\"Samples \" + k.__class__.__name__)\n\n\ndef plotkernelfunction(k, ax, xmin=0, xmax=3, other=0):\n xx = np.linspace(xmin, xmax, 100)[:, None]\n ax.plot(xx, k(xx, np.zeros((1, 1)) + other))\n ax.set_title(k.__class__.__name__ + \" k(x, %.1f)\" % other)\n\n\nf, axes = plt.subplots(1, 2, figsize=(12, 4), sharex=True)\nplotkernelfunction(k_brownian, axes[0], other=2.0)\nplotkernelsample(k_brownian, axes[1])\n\n# %% [markdown]\n# ## Using the kernel in a model\n#\n# Because we've inherited from the `Kernel` base class, this new kernel has all the properties needed to be used in GPflow. It also has some convenience features such as allowing the user to call\n#\n# `k(X, X2)`\n#\n# which computes the kernel matrix.\n#\n# To show that this kernel works, let's use it inside GP regression. We'll see that Brownian motion has quite interesting properties. To add a little flexibility, we'll add a `Constant` kernel to our `Brownian` kernel, and the `GPR` class will handle the noise.\n\n# %%\nnp.random.seed(42)\nX = np.random.rand(5, 1)\nY = np.sin(X * 6) + np.random.randn(*X.shape) * 0.001\n\nk1 = Brownian()\nk2 = gpflow.kernels.Constant()\nk = k1 + k2\n\nm = gpflow.models.GPR((X, Y), kernel=k)\n# m.likelihood.variance.assign(1e-6)\n\nopt = gpflow.optimizers.Scipy()\nopt.minimize(m.training_loss, variables=m.trainable_variables)\nprint_summary(m, fmt=\"notebook\")\n\nxx = np.linspace(0, 1.1, 100).reshape(100, 1)\nmean, var = m.predict_y(xx)\nplt.plot(X, Y, \"kx\", mew=2)\n(line,) = plt.plot(xx, mean, lw=2)\n_ = plt.fill_between(\n xx[:, 0],\n mean[:, 0] - 2 * np.sqrt(var[:, 0]),\n mean[:, 0] + 2 * np.sqrt(var[:, 0]),\n color=line.get_color(),\n alpha=0.2,\n)\n\n# %% [markdown]\n# ## See also\n#\n# For more details on how to manipulate existing kernels (or the one you just created!), please refer to the [kernels](../getting_started/kernels.ipynb) notebook.\n", "path": "doc/sphinx/notebooks/tailor/kernel_design.pct.py"}]} | 2,350 | 168 |
gh_patches_debug_40630 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-6617 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docs.opnfv.org is 'Not Found.'
## Details
The entire documentation site for OPNFV is missing. The most recent builds have succeeded, and as far as I know DNS hasn't changed recently.
* Read the Docs project URL: opnfvdocsdemo.readthedocs.io
* Build URL (if applicable): https://readthedocs.org/projects/opnfvdocsdemo/builds/
* Read the Docs username (if applicable):
## Expected Result
Going to https://docs.opnfv.org/ returns the documentation site.
## Actual Result
```
curl -i -L https://opnfvdocsdemo.readthedocs.io/
HTTP/1.1 302 Found
Content-Type: text/html; charset=utf-8
Location: http://docs.opnfv.org/en/stable-hunter/
Server: nginx
X-Frame-Options: DENY
x-content-type-options: nosniff
x-xss-protection: 1; mode=block
X-Served: Django-Proxito
X-Deity: web04
Strict-Transport-Security: max-age=31536000; includeSubDomains
Date: Wed, 29 Jan 2020 23:13:29 GMT
Content-Length: 0
HTTP/1.1 301 Moved Permanently
Server: CloudFront
Date: Wed, 29 Jan 2020 23:13:29 GMT
Content-Type: text/html
Content-Length: 183
Connection: keep-alive
Location: https://docs.opnfv.org/en/stable-hunter/
X-Cache: Redirect from cloudfront
Via: 1.1 5ab5dc09da67e3ea794ec8a82992cc89.cloudfront.net (CloudFront)
X-Amz-Cf-Pop: HIO50-C1
X-Amz-Cf-Id: 0_rJ9aN8nFAFm6M9VPcWPWHa7B8QOaSW1_Y3Llttz31ZTaK03cTaYQ==
HTTP/2 404
content-type: text/html; charset=utf-8
content-length: 10
server: nginx
x-frame-options: DENY
x-content-type-options: nosniff
x-xss-protection: 1; mode=block
x-served: Proxito-404
x-deity: web03
strict-transport-security: max-age=0
date: Wed, 29 Jan 2020 23:13:30 GMT
x-cache: Miss from cloudfront
via: 1.1 1b0911478686968732f973d6e5e31d11.cloudfront.net (CloudFront)
x-amz-cf-pop: HIO50-C1
x-amz-cf-id: sRmKIeU3LyXtKb93316GUwkxqiChktuq227k3nhDcOPqU-78E7JFTA==
Not Found.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/proxito/middleware.py`
Content:
```
1 """
2 Middleware for Proxito.
3
4 This is used to take the request and map the host to the proper project slug.
5
6 Additional processing is done to get the project from the URL in the ``views.py`` as well.
7 """
8 import logging
9
10 from django.conf import settings
11 from django.shortcuts import render
12 from django.utils.deprecation import MiddlewareMixin
13
14 from readthedocs.projects.models import Domain
15
16 log = logging.getLogger(__name__) # noqa
17
18
19 def map_host_to_project_slug(request):
20 """
21 Take the request and map the host to the proper project slug.
22
23 We check, in order:
24
25 * The ``HTTP_X_RTD_SLUG`` host header for explicit Project mapping
26 - This sets ``request.rtdheader`` True
27 * The ``PUBLIC_DOMAIN`` where we can use the subdomain as the project name
28 - This sets ``request.subdomain`` True
29 * The hostname without port information, which maps to ``Domain`` objects
30 - This sets ``request.cname`` True
31 """
32
33 host = request.get_host().lower().split(':')[0]
34 public_domain = settings.PUBLIC_DOMAIN.lower().split(':')[0]
35 host_parts = host.split('.')
36 public_domain_parts = public_domain.split('.')
37
38 project_slug = None
39
40 # Explicit Project slug being passed in
41 if 'HTTP_X_RTD_SLUG' in request.META:
42 project_slug = request.META['HTTP_X_RTD_SLUG'].lower()
43 request.rtdheader = True
44 log.info('Setting project based on X_RTD_SLUG header: %s' % project_slug)
45
46 elif public_domain in host or host == 'proxito':
47 # Serve from the PUBLIC_DOMAIN, ensuring it looks like `foo.PUBLIC_DOMAIN`
48 if public_domain_parts == host_parts[1:]:
49 project_slug = host_parts[0]
50 request.subdomain = True
51 log.debug('Proxito Public Domain: host=%s', host)
52 else:
53 # TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example
54 # But these feel like they might be phishing, etc. so let's block them for now.
55 log.warning('Weird variation on our hostname: host=%s', host)
56 return render(
57 request, 'core/dns-404.html', context={'host': host}, status=400
58 )
59
60 # Serve CNAMEs
61 else:
62 domain = Domain.objects.filter(domain=host).first()
63 if domain:
64 project_slug = domain.project.slug
65 request.cname = True
66 log.debug('Proxito CNAME: host=%s', host)
67 else:
68 # Some person is CNAMEing to us without configuring a domain - 404.
69 log.debug('CNAME 404: host=%s', host)
70 return render(
71 request, 'core/dns-404.html', context={'host': host}, status=404
72 )
73
74 log.debug('Proxito Project: slug=%s', project_slug)
75 return project_slug
76
77
78 class ProxitoMiddleware(MiddlewareMixin):
79
80 """The actual middleware we'll be using in prod."""
81
82 def process_request(self, request): # noqa
83 if any([not settings.USE_SUBDOMAIN, 'localhost' in request.get_host(),
84 'testserver' in request.get_host()]):
85 log.debug('Not processing Proxito middleware')
86 return None
87
88 ret = map_host_to_project_slug(request)
89
90 # Handle returning a response
91 if hasattr(ret, 'status_code'):
92 return ret
93
94 # Otherwise set the slug on the request
95 request.host_project_slug = request.slug = ret
96
97 return None
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/proxito/middleware.py b/readthedocs/proxito/middleware.py
--- a/readthedocs/proxito/middleware.py
+++ b/readthedocs/proxito/middleware.py
@@ -11,7 +11,7 @@
from django.shortcuts import render
from django.utils.deprecation import MiddlewareMixin
-from readthedocs.projects.models import Domain
+from readthedocs.projects.models import Domain, Project
log = logging.getLogger(__name__) # noqa
@@ -40,39 +40,38 @@
# Explicit Project slug being passed in
if 'HTTP_X_RTD_SLUG' in request.META:
project_slug = request.META['HTTP_X_RTD_SLUG'].lower()
- request.rtdheader = True
- log.info('Setting project based on X_RTD_SLUG header: %s' % project_slug)
+ if Project.objects.filter(slug=project_slug).exists():
+ request.rtdheader = True
+ log.info('Setting project based on X_RTD_SLUG header: %s', project_slug)
+ return project_slug
- elif public_domain in host or host == 'proxito':
+ if public_domain in host or host == 'proxito':
# Serve from the PUBLIC_DOMAIN, ensuring it looks like `foo.PUBLIC_DOMAIN`
if public_domain_parts == host_parts[1:]:
project_slug = host_parts[0]
request.subdomain = True
log.debug('Proxito Public Domain: host=%s', host)
- else:
- # TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example
- # But these feel like they might be phishing, etc. so let's block them for now.
- log.warning('Weird variation on our hostname: host=%s', host)
- return render(
- request, 'core/dns-404.html', context={'host': host}, status=400
- )
+ return project_slug
+ # TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example
+ # But these feel like they might be phishing, etc. so let's block them for now.
+ log.warning('Weird variation on our hostname: host=%s', host)
+ return render(
+ request, 'core/dns-404.html', context={'host': host}, status=400
+ )
# Serve CNAMEs
- else:
- domain = Domain.objects.filter(domain=host).first()
- if domain:
- project_slug = domain.project.slug
- request.cname = True
- log.debug('Proxito CNAME: host=%s', host)
- else:
- # Some person is CNAMEing to us without configuring a domain - 404.
- log.debug('CNAME 404: host=%s', host)
- return render(
- request, 'core/dns-404.html', context={'host': host}, status=404
- )
-
- log.debug('Proxito Project: slug=%s', project_slug)
- return project_slug
+ domain = Domain.objects.filter(domain=host).first()
+ if domain:
+ project_slug = domain.project.slug
+ request.cname = True
+ log.debug('Proxito CNAME: host=%s', host)
+ return project_slug
+
+ # Some person is CNAMEing to us without configuring a domain - 404.
+ log.debug('CNAME 404: host=%s', host)
+ return render(
+ request, 'core/dns-404.html', context={'host': host}, status=404
+ )
class ProxitoMiddleware(MiddlewareMixin):
@@ -91,6 +90,8 @@
if hasattr(ret, 'status_code'):
return ret
+ log.debug('Proxito Project: slug=%s', ret)
+
# Otherwise set the slug on the request
request.host_project_slug = request.slug = ret
| {"golden_diff": "diff --git a/readthedocs/proxito/middleware.py b/readthedocs/proxito/middleware.py\n--- a/readthedocs/proxito/middleware.py\n+++ b/readthedocs/proxito/middleware.py\n@@ -11,7 +11,7 @@\n from django.shortcuts import render\n from django.utils.deprecation import MiddlewareMixin\n \n-from readthedocs.projects.models import Domain\n+from readthedocs.projects.models import Domain, Project\n \n log = logging.getLogger(__name__) # noqa\n \n@@ -40,39 +40,38 @@\n # Explicit Project slug being passed in\n if 'HTTP_X_RTD_SLUG' in request.META:\n project_slug = request.META['HTTP_X_RTD_SLUG'].lower()\n- request.rtdheader = True\n- log.info('Setting project based on X_RTD_SLUG header: %s' % project_slug)\n+ if Project.objects.filter(slug=project_slug).exists():\n+ request.rtdheader = True\n+ log.info('Setting project based on X_RTD_SLUG header: %s', project_slug)\n+ return project_slug\n \n- elif public_domain in host or host == 'proxito':\n+ if public_domain in host or host == 'proxito':\n # Serve from the PUBLIC_DOMAIN, ensuring it looks like `foo.PUBLIC_DOMAIN`\n if public_domain_parts == host_parts[1:]:\n project_slug = host_parts[0]\n request.subdomain = True\n log.debug('Proxito Public Domain: host=%s', host)\n- else:\n- # TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example\n- # But these feel like they might be phishing, etc. so let's block them for now.\n- log.warning('Weird variation on our hostname: host=%s', host)\n- return render(\n- request, 'core/dns-404.html', context={'host': host}, status=400\n- )\n+ return project_slug\n+ # TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example\n+ # But these feel like they might be phishing, etc. so let's block them for now.\n+ log.warning('Weird variation on our hostname: host=%s', host)\n+ return render(\n+ request, 'core/dns-404.html', context={'host': host}, status=400\n+ )\n \n # Serve CNAMEs\n- else:\n- domain = Domain.objects.filter(domain=host).first()\n- if domain:\n- project_slug = domain.project.slug\n- request.cname = True\n- log.debug('Proxito CNAME: host=%s', host)\n- else:\n- # Some person is CNAMEing to us without configuring a domain - 404.\n- log.debug('CNAME 404: host=%s', host)\n- return render(\n- request, 'core/dns-404.html', context={'host': host}, status=404\n- )\n-\n- log.debug('Proxito Project: slug=%s', project_slug)\n- return project_slug\n+ domain = Domain.objects.filter(domain=host).first()\n+ if domain:\n+ project_slug = domain.project.slug\n+ request.cname = True\n+ log.debug('Proxito CNAME: host=%s', host)\n+ return project_slug\n+\n+ # Some person is CNAMEing to us without configuring a domain - 404.\n+ log.debug('CNAME 404: host=%s', host)\n+ return render(\n+ request, 'core/dns-404.html', context={'host': host}, status=404\n+ )\n \n \n class ProxitoMiddleware(MiddlewareMixin):\n@@ -91,6 +90,8 @@\n if hasattr(ret, 'status_code'):\n return ret\n \n+ log.debug('Proxito Project: slug=%s', ret)\n+\n # Otherwise set the slug on the request\n request.host_project_slug = request.slug = ret\n", "issue": "docs.opnfv.org is 'Not Found.'\n## Details\r\n\r\nThe entire documentation site for OPNFV is missing. The most recent builds have succeeded, and as far as I know DNS hasn't changed recently.\r\n\r\n* Read the Docs project URL: opnfvdocsdemo.readthedocs.io\r\n* Build URL (if applicable): https://readthedocs.org/projects/opnfvdocsdemo/builds/\r\n* Read the Docs username (if applicable):\r\n\r\n## Expected Result\r\n\r\nGoing to https://docs.opnfv.org/ returns the documentation site.\r\n\r\n## Actual Result\r\n```\r\ncurl -i -L https://opnfvdocsdemo.readthedocs.io/\r\nHTTP/1.1 302 Found\r\nContent-Type: text/html; charset=utf-8\r\nLocation: http://docs.opnfv.org/en/stable-hunter/\r\nServer: nginx\r\nX-Frame-Options: DENY\r\nx-content-type-options: nosniff\r\nx-xss-protection: 1; mode=block\r\nX-Served: Django-Proxito\r\nX-Deity: web04\r\nStrict-Transport-Security: max-age=31536000; includeSubDomains\r\nDate: Wed, 29 Jan 2020 23:13:29 GMT\r\nContent-Length: 0\r\n\r\nHTTP/1.1 301 Moved Permanently\r\nServer: CloudFront\r\nDate: Wed, 29 Jan 2020 23:13:29 GMT\r\nContent-Type: text/html\r\nContent-Length: 183\r\nConnection: keep-alive\r\nLocation: https://docs.opnfv.org/en/stable-hunter/\r\nX-Cache: Redirect from cloudfront\r\nVia: 1.1 5ab5dc09da67e3ea794ec8a82992cc89.cloudfront.net (CloudFront)\r\nX-Amz-Cf-Pop: HIO50-C1\r\nX-Amz-Cf-Id: 0_rJ9aN8nFAFm6M9VPcWPWHa7B8QOaSW1_Y3Llttz31ZTaK03cTaYQ==\r\n\r\nHTTP/2 404 \r\ncontent-type: text/html; charset=utf-8\r\ncontent-length: 10\r\nserver: nginx\r\nx-frame-options: DENY\r\nx-content-type-options: nosniff\r\nx-xss-protection: 1; mode=block\r\nx-served: Proxito-404\r\nx-deity: web03\r\nstrict-transport-security: max-age=0\r\ndate: Wed, 29 Jan 2020 23:13:30 GMT\r\nx-cache: Miss from cloudfront\r\nvia: 1.1 1b0911478686968732f973d6e5e31d11.cloudfront.net (CloudFront)\r\nx-amz-cf-pop: HIO50-C1\r\nx-amz-cf-id: sRmKIeU3LyXtKb93316GUwkxqiChktuq227k3nhDcOPqU-78E7JFTA==\r\n\r\nNot Found.\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nMiddleware for Proxito.\n\nThis is used to take the request and map the host to the proper project slug.\n\nAdditional processing is done to get the project from the URL in the ``views.py`` as well.\n\"\"\"\nimport logging\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.utils.deprecation import MiddlewareMixin\n\nfrom readthedocs.projects.models import Domain\n\nlog = logging.getLogger(__name__) # noqa\n\n\ndef map_host_to_project_slug(request):\n \"\"\"\n Take the request and map the host to the proper project slug.\n\n We check, in order:\n\n * The ``HTTP_X_RTD_SLUG`` host header for explicit Project mapping\n - This sets ``request.rtdheader`` True\n * The ``PUBLIC_DOMAIN`` where we can use the subdomain as the project name\n - This sets ``request.subdomain`` True\n * The hostname without port information, which maps to ``Domain`` objects\n - This sets ``request.cname`` True\n \"\"\"\n\n host = request.get_host().lower().split(':')[0]\n public_domain = settings.PUBLIC_DOMAIN.lower().split(':')[0]\n host_parts = host.split('.')\n public_domain_parts = public_domain.split('.')\n\n project_slug = None\n\n # Explicit Project slug being passed in\n if 'HTTP_X_RTD_SLUG' in request.META:\n project_slug = request.META['HTTP_X_RTD_SLUG'].lower()\n request.rtdheader = True\n log.info('Setting project based on X_RTD_SLUG header: %s' % project_slug)\n\n elif public_domain in host or host == 'proxito':\n # Serve from the PUBLIC_DOMAIN, ensuring it looks like `foo.PUBLIC_DOMAIN`\n if public_domain_parts == host_parts[1:]:\n project_slug = host_parts[0]\n request.subdomain = True\n log.debug('Proxito Public Domain: host=%s', host)\n else:\n # TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example\n # But these feel like they might be phishing, etc. so let's block them for now.\n log.warning('Weird variation on our hostname: host=%s', host)\n return render(\n request, 'core/dns-404.html', context={'host': host}, status=400\n )\n\n # Serve CNAMEs\n else:\n domain = Domain.objects.filter(domain=host).first()\n if domain:\n project_slug = domain.project.slug\n request.cname = True\n log.debug('Proxito CNAME: host=%s', host)\n else:\n # Some person is CNAMEing to us without configuring a domain - 404.\n log.debug('CNAME 404: host=%s', host)\n return render(\n request, 'core/dns-404.html', context={'host': host}, status=404\n )\n\n log.debug('Proxito Project: slug=%s', project_slug)\n return project_slug\n\n\nclass ProxitoMiddleware(MiddlewareMixin):\n\n \"\"\"The actual middleware we'll be using in prod.\"\"\"\n\n def process_request(self, request): # noqa\n if any([not settings.USE_SUBDOMAIN, 'localhost' in request.get_host(),\n 'testserver' in request.get_host()]):\n log.debug('Not processing Proxito middleware')\n return None\n\n ret = map_host_to_project_slug(request)\n\n # Handle returning a response\n if hasattr(ret, 'status_code'):\n return ret\n\n # Otherwise set the slug on the request\n request.host_project_slug = request.slug = ret\n\n return None\n", "path": "readthedocs/proxito/middleware.py"}], "after_files": [{"content": "\"\"\"\nMiddleware for Proxito.\n\nThis is used to take the request and map the host to the proper project slug.\n\nAdditional processing is done to get the project from the URL in the ``views.py`` as well.\n\"\"\"\nimport logging\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.utils.deprecation import MiddlewareMixin\n\nfrom readthedocs.projects.models import Domain, Project\n\nlog = logging.getLogger(__name__) # noqa\n\n\ndef map_host_to_project_slug(request):\n \"\"\"\n Take the request and map the host to the proper project slug.\n\n We check, in order:\n\n * The ``HTTP_X_RTD_SLUG`` host header for explicit Project mapping\n - This sets ``request.rtdheader`` True\n * The ``PUBLIC_DOMAIN`` where we can use the subdomain as the project name\n - This sets ``request.subdomain`` True\n * The hostname without port information, which maps to ``Domain`` objects\n - This sets ``request.cname`` True\n \"\"\"\n\n host = request.get_host().lower().split(':')[0]\n public_domain = settings.PUBLIC_DOMAIN.lower().split(':')[0]\n host_parts = host.split('.')\n public_domain_parts = public_domain.split('.')\n\n project_slug = None\n\n # Explicit Project slug being passed in\n if 'HTTP_X_RTD_SLUG' in request.META:\n project_slug = request.META['HTTP_X_RTD_SLUG'].lower()\n if Project.objects.filter(slug=project_slug).exists():\n request.rtdheader = True\n log.info('Setting project based on X_RTD_SLUG header: %s', project_slug)\n return project_slug\n\n if public_domain in host or host == 'proxito':\n # Serve from the PUBLIC_DOMAIN, ensuring it looks like `foo.PUBLIC_DOMAIN`\n if public_domain_parts == host_parts[1:]:\n project_slug = host_parts[0]\n request.subdomain = True\n log.debug('Proxito Public Domain: host=%s', host)\n return project_slug\n # TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example\n # But these feel like they might be phishing, etc. so let's block them for now.\n log.warning('Weird variation on our hostname: host=%s', host)\n return render(\n request, 'core/dns-404.html', context={'host': host}, status=400\n )\n\n # Serve CNAMEs\n domain = Domain.objects.filter(domain=host).first()\n if domain:\n project_slug = domain.project.slug\n request.cname = True\n log.debug('Proxito CNAME: host=%s', host)\n return project_slug\n\n # Some person is CNAMEing to us without configuring a domain - 404.\n log.debug('CNAME 404: host=%s', host)\n return render(\n request, 'core/dns-404.html', context={'host': host}, status=404\n )\n\n\nclass ProxitoMiddleware(MiddlewareMixin):\n\n \"\"\"The actual middleware we'll be using in prod.\"\"\"\n\n def process_request(self, request): # noqa\n if any([not settings.USE_SUBDOMAIN, 'localhost' in request.get_host(),\n 'testserver' in request.get_host()]):\n log.debug('Not processing Proxito middleware')\n return None\n\n ret = map_host_to_project_slug(request)\n\n # Handle returning a response\n if hasattr(ret, 'status_code'):\n return ret\n\n log.debug('Proxito Project: slug=%s', ret)\n\n # Otherwise set the slug on the request\n request.host_project_slug = request.slug = ret\n\n return None\n", "path": "readthedocs/proxito/middleware.py"}]} | 1,955 | 897 |
gh_patches_debug_42841 | rasdani/github-patches | git_diff | learningequality__kolibri-6868 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
names of CSV files should be internationalized
### Observed behavior
The names of files when exporting and downloading from the facility data page are all in English:
> 

ref: https://github.com/learningequality/kolibri/pull/6835
### Expected behavior
Names of files should be in the user's currently selected language. This is the same behavior as currently exists in Coach CSV export.
### User-facing consequences
file name meaning is not understandable in the user's preferred language
### Context
0.13.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/plugins/facility/views.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import print_function
3 from __future__ import unicode_literals
4
5 import io
6 import os
7 from datetime import datetime
8
9 from django.http import Http404
10 from django.http.response import FileResponse
11 from django.utils.decorators import method_decorator
12 from django.views.generic.base import TemplateView
13
14 from kolibri.core.decorators import cache_no_user_data
15 from kolibri.utils import conf
16
17
18 @method_decorator(cache_no_user_data, name="dispatch")
19 class FacilityManagementView(TemplateView):
20 template_name = "facility_management.html"
21
22
23 def download_csv_file(request, filename):
24 filepath = os.path.join(conf.KOLIBRI_HOME, "temp", filename)
25
26 # if the file does not exist on disk, return a 404
27 if filepath is None or not os.path.exists(filepath):
28 raise Http404("Creation of users export file has failed")
29
30 # generate a file response
31 response = FileResponse(io.open(filepath, "rb"))
32 # set the content-type by guessing from the filename
33 response["Content-Type"] = "text/csv"
34
35 # set the content-disposition as attachment to force download
36 response["Content-Disposition"] = "attachment; filename=users_{}.csv".format(
37 datetime.now().strftime("%Y%m%d_%H%M%S")
38 )
39
40 # set the content-length to the file size
41 response["Content-Length"] = os.path.getsize(filepath)
42
43 return response
44
```
Path: `kolibri/core/logger/csv_export.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import csv
4 import io
5 import json
6 import logging
7 import math
8 import os
9 import sys
10 from collections import OrderedDict
11
12 from django.core.cache import cache
13 from django.http import Http404
14 from django.http import HttpResponse
15 from django.http.response import FileResponse
16
17 from .models import ContentSessionLog
18 from .models import ContentSummaryLog
19 from kolibri.core.content.models import ChannelMetadata
20 from kolibri.core.content.models import ContentNode
21 from kolibri.utils import conf
22
23
24 logger = logging.getLogger(__name__)
25
26
27 def cache_channel_name(obj):
28 channel_id = obj["channel_id"]
29 key = "{id}_ChannelMetadata_name".format(id=channel_id)
30 channel_name = cache.get(key)
31 if channel_name is None:
32 try:
33 channel_name = ChannelMetadata.objects.get(id=channel_id)
34 except ChannelMetadata.DoesNotExist:
35 channel_name = ""
36 cache.set(key, channel_name, 60 * 10)
37 return channel_name
38
39
40 def cache_content_title(obj):
41 content_id = obj["content_id"]
42 key = "{id}_ContentNode_title".format(id=content_id)
43 title = cache.get(key)
44 if title is None:
45 node = ContentNode.objects.filter(content_id=content_id).first()
46 if node:
47 title = node.title
48 else:
49 title = ""
50 cache.set(key, title, 60 * 10)
51 return title
52
53
54 mappings = {
55 "channel_name": cache_channel_name,
56 "content_title": cache_content_title,
57 "time_spent": lambda x: "{:.1f}".format(round(x["time_spent"], 1)),
58 "progress": lambda x: "{:.4f}".format(math.floor(x["progress"] * 10000.0) / 10000),
59 }
60
61 labels = OrderedDict(
62 (
63 ("user__facility__name", "Facility name"),
64 ("user__username", "Username"),
65 ("channel_id", "Channel id"),
66 ("channel_name", "Channel name"),
67 ("content_id", "Content id"),
68 ("content_title", "Content title"),
69 ("start_timestamp", "Time of first interaction"),
70 ("end_timestamp", "Time of last interaction"),
71 ("completion_timestamp", "Time of completion"),
72 ("time_spent", "Time Spent (sec)"),
73 ("progress", "Progress (0-1)"),
74 ("kind", "Content kind"),
75 )
76 )
77
78
79 def map_object(obj):
80 mapped_obj = {}
81 for header, label in labels.items():
82 if header in mappings:
83 mapped_obj[label] = mappings[header](obj)
84 elif header in obj:
85 mapped_obj[label] = obj[header]
86 return mapped_obj
87
88
89 classes_info = {
90 "session": {
91 "queryset": ContentSessionLog.objects.all(),
92 "filename": "content_session_logs.csv",
93 "db_columns": (
94 "user__username",
95 "user__facility__name",
96 "channel_id",
97 "content_id",
98 "start_timestamp",
99 "end_timestamp",
100 "time_spent",
101 "progress",
102 "kind",
103 ),
104 },
105 "summary": {
106 "queryset": ContentSummaryLog.objects.all(),
107 "filename": "content_summary_logs.csv",
108 "db_columns": (
109 "user__username",
110 "user__facility__name",
111 "content_id",
112 "channel_id",
113 "start_timestamp",
114 "end_timestamp",
115 "completion_timestamp",
116 "time_spent",
117 "progress",
118 "kind",
119 ),
120 },
121 }
122
123
124 def csv_file_generator(log_type, filepath, overwrite=False):
125 if log_type not in ("summary", "session"):
126 raise ValueError(
127 "Impossible to create a csv export file for {}".format(log_type)
128 )
129
130 log_info = classes_info[log_type]
131
132 if not overwrite and os.path.exists(filepath):
133 raise ValueError("{} already exists".format(filepath))
134 queryset = log_info["queryset"]
135
136 # Exclude completion timestamp for the sessionlog CSV
137 header_labels = tuple(
138 label
139 for label in labels.values()
140 if log_type == "summary" or label != "completion_timestamp"
141 )
142
143 if sys.version_info[0] < 3:
144 csv_file = io.open(filepath, "wb")
145 else:
146 csv_file = io.open(filepath, "w", newline="")
147
148 with csv_file as f:
149 writer = csv.DictWriter(f, header_labels)
150 logger.info("Creating csv file {filename}".format(filename=filepath))
151 writer.writeheader()
152 for item in queryset.select_related("user", "user__facility").values(
153 *log_info["db_columns"]
154 ):
155 writer.writerow(map_object(item))
156 yield
157
158
159 def exported_logs_info(request):
160 """
161 Get the last modification timestamp of the summary logs exported
162
163 :returns: An object with the files informatin
164 """
165
166 logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export")
167 csv_statuses = {}
168 csv_export_filenames = {
169 "session": "content_session_logs.csv",
170 "summary": "content_summary_logs.csv",
171 }
172 for log_type in csv_export_filenames.keys():
173 log_path = os.path.join(logs_dir, csv_export_filenames[log_type])
174 if os.path.exists(log_path):
175 csv_statuses[log_type] = os.path.getmtime(log_path)
176 else:
177 csv_statuses[log_type] = None
178
179 return HttpResponse(json.dumps(csv_statuses), content_type="application/json")
180
181
182 def download_csv_file(request, log_type):
183 csv_export_filenames = {
184 "session": "content_session_logs.csv",
185 "summary": "content_summary_logs.csv",
186 }
187 if log_type in csv_export_filenames.keys():
188 filepath = os.path.join(
189 conf.KOLIBRI_HOME, "log_export", csv_export_filenames[log_type]
190 )
191 else:
192 filepath = None
193
194 # if the file does not exist on disk, return a 404
195 if filepath is None or not os.path.exists(filepath):
196 raise Http404("There is no csv export file for {} available".format(log_type))
197
198 # generate a file response
199 response = FileResponse(io.open(filepath, "rb"))
200 # set the content-type by guessing from the filename
201 response["Content-Type"] = "text/csv"
202
203 # set the content-disposition as attachment to force download
204 response["Content-Disposition"] = "attachment; filename={}".format(
205 csv_export_filenames[log_type]
206 )
207
208 # set the content-length to the file size
209 response["Content-Length"] = os.path.getsize(filepath)
210
211 return response
212
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/core/logger/csv_export.py b/kolibri/core/logger/csv_export.py
--- a/kolibri/core/logger/csv_export.py
+++ b/kolibri/core/logger/csv_export.py
@@ -13,6 +13,10 @@
from django.http import Http404
from django.http import HttpResponse
from django.http.response import FileResponse
+from django.template.defaultfilters import slugify
+from django.utils import translation
+from django.utils.translation import get_language_from_request
+from django.utils.translation import pgettext
from .models import ContentSessionLog
from .models import ContentSummaryLog
@@ -180,6 +184,29 @@
def download_csv_file(request, log_type):
+ locale = get_language_from_request(request)
+ translation.activate(locale)
+
+ csv_translated_filenames = {
+ "session": (
+ slugify(
+ pgettext(
+ "Default name for the exported CSV file with content session logs. Please keep the underscores between words in the translation",
+ "content_session_logs",
+ )
+ )
+ + ".csv"
+ ).replace("-", "_"),
+ "summary": (
+ slugify(
+ pgettext(
+ "Default name for the exported CSV file with content summary logs. Please keep the underscores between words in the translation",
+ "content_summary_logs",
+ )
+ )
+ + ".csv"
+ ).replace("-", "_"),
+ }
csv_export_filenames = {
"session": "content_session_logs.csv",
"summary": "content_summary_logs.csv",
@@ -202,8 +229,9 @@
# set the content-disposition as attachment to force download
response["Content-Disposition"] = "attachment; filename={}".format(
- csv_export_filenames[log_type]
+ str(csv_translated_filenames[log_type])
)
+ translation.deactivate()
# set the content-length to the file size
response["Content-Length"] = os.path.getsize(filepath)
diff --git a/kolibri/plugins/facility/views.py b/kolibri/plugins/facility/views.py
--- a/kolibri/plugins/facility/views.py
+++ b/kolibri/plugins/facility/views.py
@@ -8,7 +8,11 @@
from django.http import Http404
from django.http.response import FileResponse
+from django.template.defaultfilters import slugify
+from django.utils import translation
from django.utils.decorators import method_decorator
+from django.utils.translation import get_language_from_request
+from django.utils.translation import pgettext
from django.views.generic.base import TemplateView
from kolibri.core.decorators import cache_no_user_data
@@ -21,6 +25,8 @@
def download_csv_file(request, filename):
+ locale = get_language_from_request(request)
+ translation.activate(locale)
filepath = os.path.join(conf.KOLIBRI_HOME, "temp", filename)
# if the file does not exist on disk, return a 404
@@ -33,11 +39,21 @@
response["Content-Type"] = "text/csv"
# set the content-disposition as attachment to force download
- response["Content-Disposition"] = "attachment; filename=users_{}.csv".format(
- datetime.now().strftime("%Y%m%d_%H%M%S")
+ exported_filename = (
+ slugify(
+ pgettext(
+ "Default name for the exported CSV file of facility user data. Please keep the underscore between words in the translation",
+ "users_{}",
+ ).format(datetime.now().strftime("%Y%m%d_%H%M%S"))
+ ).replace("-", "_")
+ + ".csv"
+ )
+ response["Content-Disposition"] = "attachment; filename={}".format(
+ str(exported_filename)
)
# set the content-length to the file size
response["Content-Length"] = os.path.getsize(filepath)
+ translation.deactivate()
return response
| {"golden_diff": "diff --git a/kolibri/core/logger/csv_export.py b/kolibri/core/logger/csv_export.py\n--- a/kolibri/core/logger/csv_export.py\n+++ b/kolibri/core/logger/csv_export.py\n@@ -13,6 +13,10 @@\n from django.http import Http404\n from django.http import HttpResponse\n from django.http.response import FileResponse\n+from django.template.defaultfilters import slugify\n+from django.utils import translation\n+from django.utils.translation import get_language_from_request\n+from django.utils.translation import pgettext\n \n from .models import ContentSessionLog\n from .models import ContentSummaryLog\n@@ -180,6 +184,29 @@\n \n \n def download_csv_file(request, log_type):\n+ locale = get_language_from_request(request)\n+ translation.activate(locale)\n+\n+ csv_translated_filenames = {\n+ \"session\": (\n+ slugify(\n+ pgettext(\n+ \"Default name for the exported CSV file with content session logs. Please keep the underscores between words in the translation\",\n+ \"content_session_logs\",\n+ )\n+ )\n+ + \".csv\"\n+ ).replace(\"-\", \"_\"),\n+ \"summary\": (\n+ slugify(\n+ pgettext(\n+ \"Default name for the exported CSV file with content summary logs. Please keep the underscores between words in the translation\",\n+ \"content_summary_logs\",\n+ )\n+ )\n+ + \".csv\"\n+ ).replace(\"-\", \"_\"),\n+ }\n csv_export_filenames = {\n \"session\": \"content_session_logs.csv\",\n \"summary\": \"content_summary_logs.csv\",\n@@ -202,8 +229,9 @@\n \n # set the content-disposition as attachment to force download\n response[\"Content-Disposition\"] = \"attachment; filename={}\".format(\n- csv_export_filenames[log_type]\n+ str(csv_translated_filenames[log_type])\n )\n+ translation.deactivate()\n \n # set the content-length to the file size\n response[\"Content-Length\"] = os.path.getsize(filepath)\ndiff --git a/kolibri/plugins/facility/views.py b/kolibri/plugins/facility/views.py\n--- a/kolibri/plugins/facility/views.py\n+++ b/kolibri/plugins/facility/views.py\n@@ -8,7 +8,11 @@\n \n from django.http import Http404\n from django.http.response import FileResponse\n+from django.template.defaultfilters import slugify\n+from django.utils import translation\n from django.utils.decorators import method_decorator\n+from django.utils.translation import get_language_from_request\n+from django.utils.translation import pgettext\n from django.views.generic.base import TemplateView\n \n from kolibri.core.decorators import cache_no_user_data\n@@ -21,6 +25,8 @@\n \n \n def download_csv_file(request, filename):\n+ locale = get_language_from_request(request)\n+ translation.activate(locale)\n filepath = os.path.join(conf.KOLIBRI_HOME, \"temp\", filename)\n \n # if the file does not exist on disk, return a 404\n@@ -33,11 +39,21 @@\n response[\"Content-Type\"] = \"text/csv\"\n \n # set the content-disposition as attachment to force download\n- response[\"Content-Disposition\"] = \"attachment; filename=users_{}.csv\".format(\n- datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n+ exported_filename = (\n+ slugify(\n+ pgettext(\n+ \"Default name for the exported CSV file of facility user data. Please keep the underscore between words in the translation\",\n+ \"users_{}\",\n+ ).format(datetime.now().strftime(\"%Y%m%d_%H%M%S\"))\n+ ).replace(\"-\", \"_\")\n+ + \".csv\"\n+ )\n+ response[\"Content-Disposition\"] = \"attachment; filename={}\".format(\n+ str(exported_filename)\n )\n \n # set the content-length to the file size\n response[\"Content-Length\"] = os.path.getsize(filepath)\n+ translation.deactivate()\n \n return response\n", "issue": "names of CSV files should be internationalized\n\r\n### Observed behavior\r\n\r\nThe names of files when exporting and downloading from the facility data page are all in English:\r\n\r\n> \r\n\r\n\r\n\r\nref: https://github.com/learningequality/kolibri/pull/6835\r\n\r\n### Expected behavior\r\n\r\nNames of files should be in the user's currently selected language. This is the same behavior as currently exists in Coach CSV export.\r\n\r\n### User-facing consequences\r\n\r\nfile name meaning is not understandable in the user's preferred language\r\n\r\n### Context\r\n\r\n0.13.2\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport os\nfrom datetime import datetime\n\nfrom django.http import Http404\nfrom django.http.response import FileResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic.base import TemplateView\n\nfrom kolibri.core.decorators import cache_no_user_data\nfrom kolibri.utils import conf\n\n\n@method_decorator(cache_no_user_data, name=\"dispatch\")\nclass FacilityManagementView(TemplateView):\n template_name = \"facility_management.html\"\n\n\ndef download_csv_file(request, filename):\n filepath = os.path.join(conf.KOLIBRI_HOME, \"temp\", filename)\n\n # if the file does not exist on disk, return a 404\n if filepath is None or not os.path.exists(filepath):\n raise Http404(\"Creation of users export file has failed\")\n\n # generate a file response\n response = FileResponse(io.open(filepath, \"rb\"))\n # set the content-type by guessing from the filename\n response[\"Content-Type\"] = \"text/csv\"\n\n # set the content-disposition as attachment to force download\n response[\"Content-Disposition\"] = \"attachment; filename=users_{}.csv\".format(\n datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n )\n\n # set the content-length to the file size\n response[\"Content-Length\"] = os.path.getsize(filepath)\n\n return response\n", "path": "kolibri/plugins/facility/views.py"}, {"content": "from __future__ import unicode_literals\n\nimport csv\nimport io\nimport json\nimport logging\nimport math\nimport os\nimport sys\nfrom collections import OrderedDict\n\nfrom django.core.cache import cache\nfrom django.http import Http404\nfrom django.http import HttpResponse\nfrom django.http.response import FileResponse\n\nfrom .models import ContentSessionLog\nfrom .models import ContentSummaryLog\nfrom kolibri.core.content.models import ChannelMetadata\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.utils import conf\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef cache_channel_name(obj):\n channel_id = obj[\"channel_id\"]\n key = \"{id}_ChannelMetadata_name\".format(id=channel_id)\n channel_name = cache.get(key)\n if channel_name is None:\n try:\n channel_name = ChannelMetadata.objects.get(id=channel_id)\n except ChannelMetadata.DoesNotExist:\n channel_name = \"\"\n cache.set(key, channel_name, 60 * 10)\n return channel_name\n\n\ndef cache_content_title(obj):\n content_id = obj[\"content_id\"]\n key = \"{id}_ContentNode_title\".format(id=content_id)\n title = cache.get(key)\n if title is None:\n node = ContentNode.objects.filter(content_id=content_id).first()\n if node:\n title = node.title\n else:\n title = \"\"\n cache.set(key, title, 60 * 10)\n return title\n\n\nmappings = {\n \"channel_name\": cache_channel_name,\n \"content_title\": cache_content_title,\n \"time_spent\": lambda x: \"{:.1f}\".format(round(x[\"time_spent\"], 1)),\n \"progress\": lambda x: \"{:.4f}\".format(math.floor(x[\"progress\"] * 10000.0) / 10000),\n}\n\nlabels = OrderedDict(\n (\n (\"user__facility__name\", \"Facility name\"),\n (\"user__username\", \"Username\"),\n (\"channel_id\", \"Channel id\"),\n (\"channel_name\", \"Channel name\"),\n (\"content_id\", \"Content id\"),\n (\"content_title\", \"Content title\"),\n (\"start_timestamp\", \"Time of first interaction\"),\n (\"end_timestamp\", \"Time of last interaction\"),\n (\"completion_timestamp\", \"Time of completion\"),\n (\"time_spent\", \"Time Spent (sec)\"),\n (\"progress\", \"Progress (0-1)\"),\n (\"kind\", \"Content kind\"),\n )\n)\n\n\ndef map_object(obj):\n mapped_obj = {}\n for header, label in labels.items():\n if header in mappings:\n mapped_obj[label] = mappings[header](obj)\n elif header in obj:\n mapped_obj[label] = obj[header]\n return mapped_obj\n\n\nclasses_info = {\n \"session\": {\n \"queryset\": ContentSessionLog.objects.all(),\n \"filename\": \"content_session_logs.csv\",\n \"db_columns\": (\n \"user__username\",\n \"user__facility__name\",\n \"channel_id\",\n \"content_id\",\n \"start_timestamp\",\n \"end_timestamp\",\n \"time_spent\",\n \"progress\",\n \"kind\",\n ),\n },\n \"summary\": {\n \"queryset\": ContentSummaryLog.objects.all(),\n \"filename\": \"content_summary_logs.csv\",\n \"db_columns\": (\n \"user__username\",\n \"user__facility__name\",\n \"content_id\",\n \"channel_id\",\n \"start_timestamp\",\n \"end_timestamp\",\n \"completion_timestamp\",\n \"time_spent\",\n \"progress\",\n \"kind\",\n ),\n },\n}\n\n\ndef csv_file_generator(log_type, filepath, overwrite=False):\n if log_type not in (\"summary\", \"session\"):\n raise ValueError(\n \"Impossible to create a csv export file for {}\".format(log_type)\n )\n\n log_info = classes_info[log_type]\n\n if not overwrite and os.path.exists(filepath):\n raise ValueError(\"{} already exists\".format(filepath))\n queryset = log_info[\"queryset\"]\n\n # Exclude completion timestamp for the sessionlog CSV\n header_labels = tuple(\n label\n for label in labels.values()\n if log_type == \"summary\" or label != \"completion_timestamp\"\n )\n\n if sys.version_info[0] < 3:\n csv_file = io.open(filepath, \"wb\")\n else:\n csv_file = io.open(filepath, \"w\", newline=\"\")\n\n with csv_file as f:\n writer = csv.DictWriter(f, header_labels)\n logger.info(\"Creating csv file {filename}\".format(filename=filepath))\n writer.writeheader()\n for item in queryset.select_related(\"user\", \"user__facility\").values(\n *log_info[\"db_columns\"]\n ):\n writer.writerow(map_object(item))\n yield\n\n\ndef exported_logs_info(request):\n \"\"\"\n Get the last modification timestamp of the summary logs exported\n\n :returns: An object with the files informatin\n \"\"\"\n\n logs_dir = os.path.join(conf.KOLIBRI_HOME, \"log_export\")\n csv_statuses = {}\n csv_export_filenames = {\n \"session\": \"content_session_logs.csv\",\n \"summary\": \"content_summary_logs.csv\",\n }\n for log_type in csv_export_filenames.keys():\n log_path = os.path.join(logs_dir, csv_export_filenames[log_type])\n if os.path.exists(log_path):\n csv_statuses[log_type] = os.path.getmtime(log_path)\n else:\n csv_statuses[log_type] = None\n\n return HttpResponse(json.dumps(csv_statuses), content_type=\"application/json\")\n\n\ndef download_csv_file(request, log_type):\n csv_export_filenames = {\n \"session\": \"content_session_logs.csv\",\n \"summary\": \"content_summary_logs.csv\",\n }\n if log_type in csv_export_filenames.keys():\n filepath = os.path.join(\n conf.KOLIBRI_HOME, \"log_export\", csv_export_filenames[log_type]\n )\n else:\n filepath = None\n\n # if the file does not exist on disk, return a 404\n if filepath is None or not os.path.exists(filepath):\n raise Http404(\"There is no csv export file for {} available\".format(log_type))\n\n # generate a file response\n response = FileResponse(io.open(filepath, \"rb\"))\n # set the content-type by guessing from the filename\n response[\"Content-Type\"] = \"text/csv\"\n\n # set the content-disposition as attachment to force download\n response[\"Content-Disposition\"] = \"attachment; filename={}\".format(\n csv_export_filenames[log_type]\n )\n\n # set the content-length to the file size\n response[\"Content-Length\"] = os.path.getsize(filepath)\n\n return response\n", "path": "kolibri/core/logger/csv_export.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport os\nfrom datetime import datetime\n\nfrom django.http import Http404\nfrom django.http.response import FileResponse\nfrom django.template.defaultfilters import slugify\nfrom django.utils import translation\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import get_language_from_request\nfrom django.utils.translation import pgettext\nfrom django.views.generic.base import TemplateView\n\nfrom kolibri.core.decorators import cache_no_user_data\nfrom kolibri.utils import conf\n\n\n@method_decorator(cache_no_user_data, name=\"dispatch\")\nclass FacilityManagementView(TemplateView):\n template_name = \"facility_management.html\"\n\n\ndef download_csv_file(request, filename):\n locale = get_language_from_request(request)\n translation.activate(locale)\n filepath = os.path.join(conf.KOLIBRI_HOME, \"temp\", filename)\n\n # if the file does not exist on disk, return a 404\n if filepath is None or not os.path.exists(filepath):\n raise Http404(\"Creation of users export file has failed\")\n\n # generate a file response\n response = FileResponse(io.open(filepath, \"rb\"))\n # set the content-type by guessing from the filename\n response[\"Content-Type\"] = \"text/csv\"\n\n # set the content-disposition as attachment to force download\n exported_filename = (\n slugify(\n pgettext(\n \"Default name for the exported CSV file of facility user data. Please keep the underscore between words in the translation\",\n \"users_{}\",\n ).format(datetime.now().strftime(\"%Y%m%d_%H%M%S\"))\n ).replace(\"-\", \"_\")\n + \".csv\"\n )\n response[\"Content-Disposition\"] = \"attachment; filename={}\".format(\n str(exported_filename)\n )\n\n # set the content-length to the file size\n response[\"Content-Length\"] = os.path.getsize(filepath)\n translation.deactivate()\n\n return response\n", "path": "kolibri/plugins/facility/views.py"}, {"content": "from __future__ import unicode_literals\n\nimport csv\nimport io\nimport json\nimport logging\nimport math\nimport os\nimport sys\nfrom collections import OrderedDict\n\nfrom django.core.cache import cache\nfrom django.http import Http404\nfrom django.http import HttpResponse\nfrom django.http.response import FileResponse\nfrom django.template.defaultfilters import slugify\nfrom django.utils import translation\nfrom django.utils.translation import get_language_from_request\nfrom django.utils.translation import pgettext\n\nfrom .models import ContentSessionLog\nfrom .models import ContentSummaryLog\nfrom kolibri.core.content.models import ChannelMetadata\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.utils import conf\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef cache_channel_name(obj):\n channel_id = obj[\"channel_id\"]\n key = \"{id}_ChannelMetadata_name\".format(id=channel_id)\n channel_name = cache.get(key)\n if channel_name is None:\n try:\n channel_name = ChannelMetadata.objects.get(id=channel_id)\n except ChannelMetadata.DoesNotExist:\n channel_name = \"\"\n cache.set(key, channel_name, 60 * 10)\n return channel_name\n\n\ndef cache_content_title(obj):\n content_id = obj[\"content_id\"]\n key = \"{id}_ContentNode_title\".format(id=content_id)\n title = cache.get(key)\n if title is None:\n node = ContentNode.objects.filter(content_id=content_id).first()\n if node:\n title = node.title\n else:\n title = \"\"\n cache.set(key, title, 60 * 10)\n return title\n\n\nmappings = {\n \"channel_name\": cache_channel_name,\n \"content_title\": cache_content_title,\n \"time_spent\": lambda x: \"{:.1f}\".format(round(x[\"time_spent\"], 1)),\n \"progress\": lambda x: \"{:.4f}\".format(math.floor(x[\"progress\"] * 10000.0) / 10000),\n}\n\nlabels = OrderedDict(\n (\n (\"user__facility__name\", \"Facility name\"),\n (\"user__username\", \"Username\"),\n (\"channel_id\", \"Channel id\"),\n (\"channel_name\", \"Channel name\"),\n (\"content_id\", \"Content id\"),\n (\"content_title\", \"Content title\"),\n (\"start_timestamp\", \"Time of first interaction\"),\n (\"end_timestamp\", \"Time of last interaction\"),\n (\"completion_timestamp\", \"Time of completion\"),\n (\"time_spent\", \"Time Spent (sec)\"),\n (\"progress\", \"Progress (0-1)\"),\n (\"kind\", \"Content kind\"),\n )\n)\n\n\ndef map_object(obj):\n mapped_obj = {}\n for header, label in labels.items():\n if header in mappings:\n mapped_obj[label] = mappings[header](obj)\n elif header in obj:\n mapped_obj[label] = obj[header]\n return mapped_obj\n\n\nclasses_info = {\n \"session\": {\n \"queryset\": ContentSessionLog.objects.all(),\n \"filename\": \"content_session_logs.csv\",\n \"db_columns\": (\n \"user__username\",\n \"user__facility__name\",\n \"channel_id\",\n \"content_id\",\n \"start_timestamp\",\n \"end_timestamp\",\n \"time_spent\",\n \"progress\",\n \"kind\",\n ),\n },\n \"summary\": {\n \"queryset\": ContentSummaryLog.objects.all(),\n \"filename\": \"content_summary_logs.csv\",\n \"db_columns\": (\n \"user__username\",\n \"user__facility__name\",\n \"content_id\",\n \"channel_id\",\n \"start_timestamp\",\n \"end_timestamp\",\n \"completion_timestamp\",\n \"time_spent\",\n \"progress\",\n \"kind\",\n ),\n },\n}\n\n\ndef csv_file_generator(log_type, filepath, overwrite=False):\n if log_type not in (\"summary\", \"session\"):\n raise ValueError(\n \"Impossible to create a csv export file for {}\".format(log_type)\n )\n\n log_info = classes_info[log_type]\n\n if not overwrite and os.path.exists(filepath):\n raise ValueError(\"{} already exists\".format(filepath))\n queryset = log_info[\"queryset\"]\n\n # Exclude completion timestamp for the sessionlog CSV\n header_labels = tuple(\n label\n for label in labels.values()\n if log_type == \"summary\" or label != \"completion_timestamp\"\n )\n\n if sys.version_info[0] < 3:\n csv_file = io.open(filepath, \"wb\")\n else:\n csv_file = io.open(filepath, \"w\", newline=\"\")\n\n with csv_file as f:\n writer = csv.DictWriter(f, header_labels)\n logger.info(\"Creating csv file {filename}\".format(filename=filepath))\n writer.writeheader()\n for item in queryset.select_related(\"user\", \"user__facility\").values(\n *log_info[\"db_columns\"]\n ):\n writer.writerow(map_object(item))\n yield\n\n\ndef exported_logs_info(request):\n \"\"\"\n Get the last modification timestamp of the summary logs exported\n\n :returns: An object with the files informatin\n \"\"\"\n\n logs_dir = os.path.join(conf.KOLIBRI_HOME, \"log_export\")\n csv_statuses = {}\n csv_export_filenames = {\n \"session\": \"content_session_logs.csv\",\n \"summary\": \"content_summary_logs.csv\",\n }\n for log_type in csv_export_filenames.keys():\n log_path = os.path.join(logs_dir, csv_export_filenames[log_type])\n if os.path.exists(log_path):\n csv_statuses[log_type] = os.path.getmtime(log_path)\n else:\n csv_statuses[log_type] = None\n\n return HttpResponse(json.dumps(csv_statuses), content_type=\"application/json\")\n\n\ndef download_csv_file(request, log_type):\n locale = get_language_from_request(request)\n translation.activate(locale)\n\n csv_translated_filenames = {\n \"session\": (\n slugify(\n pgettext(\n \"Default name for the exported CSV file with content session logs. Please keep the underscores between words in the translation\",\n \"content_session_logs\",\n )\n )\n + \".csv\"\n ).replace(\"-\", \"_\"),\n \"summary\": (\n slugify(\n pgettext(\n \"Default name for the exported CSV file with content summary logs. Please keep the underscores between words in the translation\",\n \"content_summary_logs\",\n )\n )\n + \".csv\"\n ).replace(\"-\", \"_\"),\n }\n csv_export_filenames = {\n \"session\": \"content_session_logs.csv\",\n \"summary\": \"content_summary_logs.csv\",\n }\n if log_type in csv_export_filenames.keys():\n filepath = os.path.join(\n conf.KOLIBRI_HOME, \"log_export\", csv_export_filenames[log_type]\n )\n else:\n filepath = None\n\n # if the file does not exist on disk, return a 404\n if filepath is None or not os.path.exists(filepath):\n raise Http404(\"There is no csv export file for {} available\".format(log_type))\n\n # generate a file response\n response = FileResponse(io.open(filepath, \"rb\"))\n # set the content-type by guessing from the filename\n response[\"Content-Type\"] = \"text/csv\"\n\n # set the content-disposition as attachment to force download\n response[\"Content-Disposition\"] = \"attachment; filename={}\".format(\n str(csv_translated_filenames[log_type])\n )\n translation.deactivate()\n\n # set the content-length to the file size\n response[\"Content-Length\"] = os.path.getsize(filepath)\n\n return response\n", "path": "kolibri/core/logger/csv_export.py"}]} | 2,874 | 875 |
gh_patches_debug_43447 | rasdani/github-patches | git_diff | beetbox__beets-4008 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Subsonicupdate plugin broken in multiple paces
### Problem
Running the latest git version of beets causes the subsonicupdate plugin to fail with a generic "Error: 1".
The current code is broken in 2 places:
1. `def __get_version(self):`
Code is trying to retrieve the version in order to decide whether to authenticate with token or password: a much better choice than my original plan of assuming everyone would use an updated version and/or prefer token over password due to additional security.
In the current code, the REST API is called by passing only parameters 'c' and 'f': this API fails because it's not complete and the plugin stops processing.
2. `def start_scan(self):`
Assuming the authentication would go through, this call would fail anyway since the current code is splitting the API version in 3 values and passing them separately, e.g. `&v=1&v=61&v=1`, rather than `&v=1.61.1`
I have always been running an ancient version of beets with my original code for the plugin and I noticed this just today as I set up a new machine and installed beets form git repo through AUR.
It also seems I am the only user of this plugin given no other feedback has been filed so far, so I can understand if this fix doesn't get high priority.
### Setup
* OS: Arch Linux
* Python version: 3.9.6
* beets version: 1.5.0
* Turning off plugins made problem go away (yes/no): disabling subsonicupdate plugin will prevent the error from happening
### Additional Info
Subsonic API reference: http://www.subsonic.org/pages/api.jsp
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/subsonicupdate.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of beets.
3 # Copyright 2016, Adrian Sampson.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15
16 """Updates Subsonic library on Beets import
17 Your Beets configuration file should contain
18 a "subsonic" section like the following:
19 subsonic:
20 url: https://mydomain.com:443/subsonic
21 user: username
22 pass: password
23 """
24 from __future__ import division, absolute_import, print_function
25
26 import hashlib
27 import random
28 import string
29
30 import requests
31
32 from binascii import hexlify
33 from beets import config
34 from beets.plugins import BeetsPlugin
35
36 __author__ = 'https://github.com/maffo999'
37 AUTH_TOKEN_VERSION = (1, 12)
38
39
40 class SubsonicUpdate(BeetsPlugin):
41 def __init__(self):
42 super(SubsonicUpdate, self).__init__()
43 # Set default configuration values
44 config['subsonic'].add({
45 'user': 'admin',
46 'pass': 'admin',
47 'url': 'http://localhost:4040',
48 })
49 config['subsonic']['pass'].redact = True
50 self._version = None
51 self._auth = None
52 self.register_listener('import', self.start_scan)
53
54 @property
55 def version(self):
56 if self._version is None:
57 self._version = self.__get_version()
58 return self._version
59
60 @property
61 def auth(self):
62 if self._auth is None:
63 if self.version is not None:
64 if self.version > AUTH_TOKEN_VERSION:
65 self._auth = "token"
66 else:
67 self._auth = "password"
68 self._log.info(
69 u"using '{}' authentication method".format(self._auth))
70 return self._auth
71
72 @staticmethod
73 def __create_token():
74 """Create salt and token from given password.
75
76 :return: The generated salt and hashed token
77 """
78 password = config['subsonic']['pass'].as_str()
79
80 # Pick the random sequence and salt the password
81 r = string.ascii_letters + string.digits
82 salt = "".join([random.choice(r) for _ in range(6)])
83 salted_password = password + salt
84 token = hashlib.md5(salted_password.encode('utf-8')).hexdigest()
85
86 # Put together the payload of the request to the server and the URL
87 return salt, token
88
89 @staticmethod
90 def __format_url(endpoint):
91 """Get the Subsonic URL to trigger the given endpoint.
92 Uses either the url config option or the deprecated host, port,
93 and context_path config options together.
94
95 :return: Endpoint for updating Subsonic
96 """
97
98 url = config['subsonic']['url'].as_str()
99 if url and url.endswith('/'):
100 url = url[:-1]
101
102 # @deprecated("Use url config option instead")
103 if not url:
104 host = config['subsonic']['host'].as_str()
105 port = config['subsonic']['port'].get(int)
106 context_path = config['subsonic']['contextpath'].as_str()
107 if context_path == '/':
108 context_path = ''
109 url = "http://{}:{}{}".format(host, port, context_path)
110
111 return url + '/rest/{}'.format(endpoint)
112
113 def __get_version(self):
114 url = self.__format_url("ping.view")
115 payload = {
116 'c': 'beets',
117 'f': 'json'
118 }
119 try:
120 response = requests.get(url, params=payload)
121 if response.status_code == 200:
122 json = response.json()
123 version = json['subsonic-response']['version']
124 self._log.info(
125 u'subsonic version:{0} '.format(version))
126 return tuple(int(s) for s in version.split('.'))
127 else:
128 self._log.error(u'Error: {0}', json)
129 return None
130 except Exception as error:
131 self._log.error(u'Error: {0}'.format(error))
132 return None
133
134 def start_scan(self):
135 user = config['subsonic']['user'].as_str()
136 url = self.__format_url("startScan.view")
137
138 if self.auth == 'token':
139 salt, token = self.__create_token()
140 payload = {
141 'u': user,
142 't': token,
143 's': salt,
144 'v': self.version, # Subsonic 6.1 and newer.
145 'c': 'beets',
146 'f': 'json'
147 }
148 elif self.auth == 'password':
149 password = config['subsonic']['pass'].as_str()
150 encpass = hexlify(password.encode()).decode()
151 payload = {
152 'u': user,
153 'p': 'enc:{}'.format(encpass),
154 'v': self.version,
155 'c': 'beets',
156 'f': 'json'
157 }
158 else:
159 return
160 try:
161 response = requests.get(url, params=payload)
162 json = response.json()
163
164 if response.status_code == 200 and \
165 json['subsonic-response']['status'] == "ok":
166 count = json['subsonic-response']['scanStatus']['count']
167 self._log.info(
168 u'Updating Subsonic; scanning {0} tracks'.format(count))
169 elif response.status_code == 200 and \
170 json['subsonic-response']['status'] == "failed":
171 error_message = json['subsonic-response']['error']['message']
172 self._log.error(u'Error: {0}'.format(error_message))
173 else:
174 self._log.error(u'Error: {0}', json)
175 except Exception as error:
176 self._log.error(u'Error: {0}'.format(error))
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/subsonicupdate.py b/beetsplug/subsonicupdate.py
--- a/beetsplug/subsonicupdate.py
+++ b/beetsplug/subsonicupdate.py
@@ -20,6 +20,14 @@
url: https://mydomain.com:443/subsonic
user: username
pass: password
+ auth: token
+For older Subsonic versions, token authentication
+is not supported, use password instead:
+ subsonic:
+ url: https://mydomain.com:443/subsonic
+ user: username
+ pass: password
+ auth: pass
"""
from __future__ import division, absolute_import, print_function
@@ -34,7 +42,6 @@
from beets.plugins import BeetsPlugin
__author__ = 'https://github.com/maffo999'
-AUTH_TOKEN_VERSION = (1, 12)
class SubsonicUpdate(BeetsPlugin):
@@ -45,30 +52,11 @@
'user': 'admin',
'pass': 'admin',
'url': 'http://localhost:4040',
+ 'auth': 'token',
})
config['subsonic']['pass'].redact = True
- self._version = None
- self._auth = None
self.register_listener('import', self.start_scan)
- @property
- def version(self):
- if self._version is None:
- self._version = self.__get_version()
- return self._version
-
- @property
- def auth(self):
- if self._auth is None:
- if self.version is not None:
- if self.version > AUTH_TOKEN_VERSION:
- self._auth = "token"
- else:
- self._auth = "password"
- self._log.info(
- u"using '{}' authentication method".format(self._auth))
- return self._auth
-
@staticmethod
def __create_token():
"""Create salt and token from given password.
@@ -110,48 +98,30 @@
return url + '/rest/{}'.format(endpoint)
- def __get_version(self):
- url = self.__format_url("ping.view")
- payload = {
- 'c': 'beets',
- 'f': 'json'
- }
- try:
- response = requests.get(url, params=payload)
- if response.status_code == 200:
- json = response.json()
- version = json['subsonic-response']['version']
- self._log.info(
- u'subsonic version:{0} '.format(version))
- return tuple(int(s) for s in version.split('.'))
- else:
- self._log.error(u'Error: {0}', json)
- return None
- except Exception as error:
- self._log.error(u'Error: {0}'.format(error))
- return None
-
def start_scan(self):
user = config['subsonic']['user'].as_str()
- url = self.__format_url("startScan.view")
+ auth = config['subsonic']['auth'].as_str()
+ url = self.__format_url("startScan")
+ self._log.debug(u'URL is {0}', url)
+ self._log.debug(u'auth type is {0}', config['subsonic']['auth'])
- if self.auth == 'token':
+ if auth == "token":
salt, token = self.__create_token()
payload = {
'u': user,
't': token,
's': salt,
- 'v': self.version, # Subsonic 6.1 and newer.
+ 'v': '1.13.0', # Subsonic 5.3 and newer
'c': 'beets',
'f': 'json'
}
- elif self.auth == 'password':
+ elif auth == "password":
password = config['subsonic']['pass'].as_str()
encpass = hexlify(password.encode()).decode()
payload = {
'u': user,
'p': 'enc:{}'.format(encpass),
- 'v': self.version,
+ 'v': '1.12.0',
'c': 'beets',
'f': 'json'
}
| {"golden_diff": "diff --git a/beetsplug/subsonicupdate.py b/beetsplug/subsonicupdate.py\n--- a/beetsplug/subsonicupdate.py\n+++ b/beetsplug/subsonicupdate.py\n@@ -20,6 +20,14 @@\n url: https://mydomain.com:443/subsonic\n user: username\n pass: password\n+ auth: token\n+For older Subsonic versions, token authentication\n+is not supported, use password instead:\n+ subsonic:\n+ url: https://mydomain.com:443/subsonic\n+ user: username\n+ pass: password\n+ auth: pass\n \"\"\"\n from __future__ import division, absolute_import, print_function\n \n@@ -34,7 +42,6 @@\n from beets.plugins import BeetsPlugin\n \n __author__ = 'https://github.com/maffo999'\n-AUTH_TOKEN_VERSION = (1, 12)\n \n \n class SubsonicUpdate(BeetsPlugin):\n@@ -45,30 +52,11 @@\n 'user': 'admin',\n 'pass': 'admin',\n 'url': 'http://localhost:4040',\n+ 'auth': 'token',\n })\n config['subsonic']['pass'].redact = True\n- self._version = None\n- self._auth = None\n self.register_listener('import', self.start_scan)\n \n- @property\n- def version(self):\n- if self._version is None:\n- self._version = self.__get_version()\n- return self._version\n-\n- @property\n- def auth(self):\n- if self._auth is None:\n- if self.version is not None:\n- if self.version > AUTH_TOKEN_VERSION:\n- self._auth = \"token\"\n- else:\n- self._auth = \"password\"\n- self._log.info(\n- u\"using '{}' authentication method\".format(self._auth))\n- return self._auth\n-\n @staticmethod\n def __create_token():\n \"\"\"Create salt and token from given password.\n@@ -110,48 +98,30 @@\n \n return url + '/rest/{}'.format(endpoint)\n \n- def __get_version(self):\n- url = self.__format_url(\"ping.view\")\n- payload = {\n- 'c': 'beets',\n- 'f': 'json'\n- }\n- try:\n- response = requests.get(url, params=payload)\n- if response.status_code == 200:\n- json = response.json()\n- version = json['subsonic-response']['version']\n- self._log.info(\n- u'subsonic version:{0} '.format(version))\n- return tuple(int(s) for s in version.split('.'))\n- else:\n- self._log.error(u'Error: {0}', json)\n- return None\n- except Exception as error:\n- self._log.error(u'Error: {0}'.format(error))\n- return None\n-\n def start_scan(self):\n user = config['subsonic']['user'].as_str()\n- url = self.__format_url(\"startScan.view\")\n+ auth = config['subsonic']['auth'].as_str()\n+ url = self.__format_url(\"startScan\")\n+ self._log.debug(u'URL is {0}', url)\n+ self._log.debug(u'auth type is {0}', config['subsonic']['auth'])\n \n- if self.auth == 'token':\n+ if auth == \"token\":\n salt, token = self.__create_token()\n payload = {\n 'u': user,\n 't': token,\n 's': salt,\n- 'v': self.version, # Subsonic 6.1 and newer.\n+ 'v': '1.13.0', # Subsonic 5.3 and newer\n 'c': 'beets',\n 'f': 'json'\n }\n- elif self.auth == 'password':\n+ elif auth == \"password\":\n password = config['subsonic']['pass'].as_str()\n encpass = hexlify(password.encode()).decode()\n payload = {\n 'u': user,\n 'p': 'enc:{}'.format(encpass),\n- 'v': self.version,\n+ 'v': '1.12.0',\n 'c': 'beets',\n 'f': 'json'\n }\n", "issue": "Subsonicupdate plugin broken in multiple paces\n### Problem\r\n\r\nRunning the latest git version of beets causes the subsonicupdate plugin to fail with a generic \"Error: 1\".\r\nThe current code is broken in 2 places:\r\n\r\n1. `def __get_version(self):`\r\nCode is trying to retrieve the version in order to decide whether to authenticate with token or password: a much better choice than my original plan of assuming everyone would use an updated version and/or prefer token over password due to additional security.\r\nIn the current code, the REST API is called by passing only parameters 'c' and 'f': this API fails because it's not complete and the plugin stops processing.\r\n\r\n2. `def start_scan(self):`\r\nAssuming the authentication would go through, this call would fail anyway since the current code is splitting the API version in 3 values and passing them separately, e.g. `&v=1&v=61&v=1`, rather than `&v=1.61.1`\r\n\r\nI have always been running an ancient version of beets with my original code for the plugin and I noticed this just today as I set up a new machine and installed beets form git repo through AUR.\r\nIt also seems I am the only user of this plugin given no other feedback has been filed so far, so I can understand if this fix doesn't get high priority.\r\n\r\n### Setup\r\n\r\n* OS: Arch Linux\r\n* Python version: 3.9.6\r\n* beets version: 1.5.0\r\n* Turning off plugins made problem go away (yes/no): disabling subsonicupdate plugin will prevent the error from happening\r\n\r\n### Additional Info\r\nSubsonic API reference: http://www.subsonic.org/pages/api.jsp\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Updates Subsonic library on Beets import\nYour Beets configuration file should contain\na \"subsonic\" section like the following:\n subsonic:\n url: https://mydomain.com:443/subsonic\n user: username\n pass: password\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport hashlib\nimport random\nimport string\n\nimport requests\n\nfrom binascii import hexlify\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n__author__ = 'https://github.com/maffo999'\nAUTH_TOKEN_VERSION = (1, 12)\n\n\nclass SubsonicUpdate(BeetsPlugin):\n def __init__(self):\n super(SubsonicUpdate, self).__init__()\n # Set default configuration values\n config['subsonic'].add({\n 'user': 'admin',\n 'pass': 'admin',\n 'url': 'http://localhost:4040',\n })\n config['subsonic']['pass'].redact = True\n self._version = None\n self._auth = None\n self.register_listener('import', self.start_scan)\n\n @property\n def version(self):\n if self._version is None:\n self._version = self.__get_version()\n return self._version\n\n @property\n def auth(self):\n if self._auth is None:\n if self.version is not None:\n if self.version > AUTH_TOKEN_VERSION:\n self._auth = \"token\"\n else:\n self._auth = \"password\"\n self._log.info(\n u\"using '{}' authentication method\".format(self._auth))\n return self._auth\n\n @staticmethod\n def __create_token():\n \"\"\"Create salt and token from given password.\n\n :return: The generated salt and hashed token\n \"\"\"\n password = config['subsonic']['pass'].as_str()\n\n # Pick the random sequence and salt the password\n r = string.ascii_letters + string.digits\n salt = \"\".join([random.choice(r) for _ in range(6)])\n salted_password = password + salt\n token = hashlib.md5(salted_password.encode('utf-8')).hexdigest()\n\n # Put together the payload of the request to the server and the URL\n return salt, token\n\n @staticmethod\n def __format_url(endpoint):\n \"\"\"Get the Subsonic URL to trigger the given endpoint.\n Uses either the url config option or the deprecated host, port,\n and context_path config options together.\n\n :return: Endpoint for updating Subsonic\n \"\"\"\n\n url = config['subsonic']['url'].as_str()\n if url and url.endswith('/'):\n url = url[:-1]\n\n # @deprecated(\"Use url config option instead\")\n if not url:\n host = config['subsonic']['host'].as_str()\n port = config['subsonic']['port'].get(int)\n context_path = config['subsonic']['contextpath'].as_str()\n if context_path == '/':\n context_path = ''\n url = \"http://{}:{}{}\".format(host, port, context_path)\n\n return url + '/rest/{}'.format(endpoint)\n\n def __get_version(self):\n url = self.__format_url(\"ping.view\")\n payload = {\n 'c': 'beets',\n 'f': 'json'\n }\n try:\n response = requests.get(url, params=payload)\n if response.status_code == 200:\n json = response.json()\n version = json['subsonic-response']['version']\n self._log.info(\n u'subsonic version:{0} '.format(version))\n return tuple(int(s) for s in version.split('.'))\n else:\n self._log.error(u'Error: {0}', json)\n return None\n except Exception as error:\n self._log.error(u'Error: {0}'.format(error))\n return None\n\n def start_scan(self):\n user = config['subsonic']['user'].as_str()\n url = self.__format_url(\"startScan.view\")\n\n if self.auth == 'token':\n salt, token = self.__create_token()\n payload = {\n 'u': user,\n 't': token,\n 's': salt,\n 'v': self.version, # Subsonic 6.1 and newer.\n 'c': 'beets',\n 'f': 'json'\n }\n elif self.auth == 'password':\n password = config['subsonic']['pass'].as_str()\n encpass = hexlify(password.encode()).decode()\n payload = {\n 'u': user,\n 'p': 'enc:{}'.format(encpass),\n 'v': self.version,\n 'c': 'beets',\n 'f': 'json'\n }\n else:\n return\n try:\n response = requests.get(url, params=payload)\n json = response.json()\n\n if response.status_code == 200 and \\\n json['subsonic-response']['status'] == \"ok\":\n count = json['subsonic-response']['scanStatus']['count']\n self._log.info(\n u'Updating Subsonic; scanning {0} tracks'.format(count))\n elif response.status_code == 200 and \\\n json['subsonic-response']['status'] == \"failed\":\n error_message = json['subsonic-response']['error']['message']\n self._log.error(u'Error: {0}'.format(error_message))\n else:\n self._log.error(u'Error: {0}', json)\n except Exception as error:\n self._log.error(u'Error: {0}'.format(error))\n", "path": "beetsplug/subsonicupdate.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Updates Subsonic library on Beets import\nYour Beets configuration file should contain\na \"subsonic\" section like the following:\n subsonic:\n url: https://mydomain.com:443/subsonic\n user: username\n pass: password\n auth: token\nFor older Subsonic versions, token authentication\nis not supported, use password instead:\n subsonic:\n url: https://mydomain.com:443/subsonic\n user: username\n pass: password\n auth: pass\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport hashlib\nimport random\nimport string\n\nimport requests\n\nfrom binascii import hexlify\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n__author__ = 'https://github.com/maffo999'\n\n\nclass SubsonicUpdate(BeetsPlugin):\n def __init__(self):\n super(SubsonicUpdate, self).__init__()\n # Set default configuration values\n config['subsonic'].add({\n 'user': 'admin',\n 'pass': 'admin',\n 'url': 'http://localhost:4040',\n 'auth': 'token',\n })\n config['subsonic']['pass'].redact = True\n self.register_listener('import', self.start_scan)\n\n @staticmethod\n def __create_token():\n \"\"\"Create salt and token from given password.\n\n :return: The generated salt and hashed token\n \"\"\"\n password = config['subsonic']['pass'].as_str()\n\n # Pick the random sequence and salt the password\n r = string.ascii_letters + string.digits\n salt = \"\".join([random.choice(r) for _ in range(6)])\n salted_password = password + salt\n token = hashlib.md5(salted_password.encode('utf-8')).hexdigest()\n\n # Put together the payload of the request to the server and the URL\n return salt, token\n\n @staticmethod\n def __format_url(endpoint):\n \"\"\"Get the Subsonic URL to trigger the given endpoint.\n Uses either the url config option or the deprecated host, port,\n and context_path config options together.\n\n :return: Endpoint for updating Subsonic\n \"\"\"\n\n url = config['subsonic']['url'].as_str()\n if url and url.endswith('/'):\n url = url[:-1]\n\n # @deprecated(\"Use url config option instead\")\n if not url:\n host = config['subsonic']['host'].as_str()\n port = config['subsonic']['port'].get(int)\n context_path = config['subsonic']['contextpath'].as_str()\n if context_path == '/':\n context_path = ''\n url = \"http://{}:{}{}\".format(host, port, context_path)\n\n return url + '/rest/{}'.format(endpoint)\n\n def start_scan(self):\n user = config['subsonic']['user'].as_str()\n auth = config['subsonic']['auth'].as_str()\n url = self.__format_url(\"startScan\")\n self._log.debug(u'URL is {0}', url)\n self._log.debug(u'auth type is {0}', config['subsonic']['auth'])\n\n if auth == \"token\":\n salt, token = self.__create_token()\n payload = {\n 'u': user,\n 't': token,\n 's': salt,\n 'v': '1.13.0', # Subsonic 5.3 and newer\n 'c': 'beets',\n 'f': 'json'\n }\n elif auth == \"password\":\n password = config['subsonic']['pass'].as_str()\n encpass = hexlify(password.encode()).decode()\n payload = {\n 'u': user,\n 'p': 'enc:{}'.format(encpass),\n 'v': '1.12.0',\n 'c': 'beets',\n 'f': 'json'\n }\n else:\n return\n try:\n response = requests.get(url, params=payload)\n json = response.json()\n\n if response.status_code == 200 and \\\n json['subsonic-response']['status'] == \"ok\":\n count = json['subsonic-response']['scanStatus']['count']\n self._log.info(\n u'Updating Subsonic; scanning {0} tracks'.format(count))\n elif response.status_code == 200 and \\\n json['subsonic-response']['status'] == \"failed\":\n error_message = json['subsonic-response']['error']['message']\n self._log.error(u'Error: {0}'.format(error_message))\n else:\n self._log.error(u'Error: {0}', json)\n except Exception as error:\n self._log.error(u'Error: {0}'.format(error))\n", "path": "beetsplug/subsonicupdate.py"}]} | 2,443 | 988 |
gh_patches_debug_40000 | rasdani/github-patches | git_diff | liberapay__liberapay.com-610 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Take throttling is too complicated
https://witches.town/@Alda/2122717
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liberapay/models/_mixin_team.py`
Content:
```
1 """Teams are groups of participants.
2 """
3 from __future__ import division, print_function, unicode_literals
4
5 from collections import OrderedDict
6 from decimal import Decimal, ROUND_UP
7 from statistics import median
8
9 from liberapay.constants import D_CENT, D_INF, D_UNIT, D_ZERO
10
11
12 class MemberLimitReached(Exception): pass
13
14
15 class InactiveParticipantAdded(Exception): pass
16
17
18 class MixinTeam(object):
19
20 def invite(self, invitee, inviter):
21 assert self.kind == 'group'
22 with self.db.get_cursor() as c:
23 n_id = invitee.notify(
24 'team_invite',
25 team=self.username,
26 team_url=self.url(),
27 inviter=inviter.username,
28 )
29 payload = dict(invitee=invitee.id, notification_id=n_id)
30 self.add_event(c, 'invite', payload, inviter.id)
31
32 def add_member(self, member, cursor=None):
33 """Add a member to this team.
34 """
35 if len(self.get_current_takes()) == 149:
36 raise MemberLimitReached
37 if member.status != 'active':
38 raise InactiveParticipantAdded
39 self.set_take_for(member, D_ZERO, self, cursor=cursor)
40
41 def remove_all_members(self, cursor=None):
42 (cursor or self.db).run("""
43 INSERT INTO takes (ctime, member, team, amount, recorder) (
44 SELECT ctime, member, %(id)s, NULL, %(id)s
45 FROM current_takes
46 WHERE team=%(id)s
47 );
48 """, dict(id=self.id))
49
50 def member_of(self, team):
51 """Given a Participant object, return a boolean.
52 """
53 assert team.kind == 'group'
54 return self.db.one("""
55 SELECT true
56 FROM current_takes
57 WHERE team=%s AND member=%s
58 """, (team.id, self.id), default=False)
59
60 def get_takes_last_week(self):
61 """Get the users' nominal takes last week. Used in throttling.
62 """
63 assert self.kind == 'group'
64 takes = {t.member: t.amount for t in self.db.all("""
65
66 SELECT DISTINCT ON (member) member, amount, mtime
67 FROM takes
68 WHERE team=%s
69 AND mtime < (
70 SELECT ts_start
71 FROM paydays
72 WHERE ts_end > ts_start
73 ORDER BY ts_start DESC LIMIT 1
74 )
75 ORDER BY member, mtime DESC
76
77 """, (self.id,)) if t.amount}
78 return takes
79
80 def get_take_for(self, member):
81 """Return the nominal take for this member, or None.
82 """
83 return self.db.one(
84 "SELECT amount FROM current_takes WHERE member = %s AND team = %s",
85 (member.id, self.id)
86 )
87
88 def compute_max_this_week(self, member_id, last_week):
89 """2x the member's take last week, or the member's take last week + a
90 proportional share of the leftover, or a minimum based on last week's
91 median take, or 1.
92 """
93 sum_last_week = sum(last_week.values())
94 initial_leftover = self.receiving - sum_last_week
95 nonzero_last_week = [a for a in last_week.values() if a]
96 member_last_week = last_week.get(member_id, 0)
97 leftover_share = member_last_week / (sum_last_week or D_INF)
98 leftover_share = max(leftover_share, D_UNIT / self.nmembers)
99 return max(
100 member_last_week * 2,
101 member_last_week + initial_leftover * leftover_share,
102 median(nonzero_last_week or (0,)),
103 D_UNIT
104 )
105
106 def set_take_for(self, member, take, recorder, check_max=True, cursor=None):
107 """Sets member's take from the team pool.
108 """
109 assert self.kind == 'group'
110
111 if recorder.id != self.id:
112 cur_take = self.get_take_for(member)
113 if cur_take is None:
114 return None
115
116 if not isinstance(take, (None.__class__, Decimal)):
117 take = Decimal(take)
118
119 if take and check_max and take > 1:
120 last_week = self.get_takes_last_week()
121 max_this_week = self.compute_max_this_week(member.id, last_week)
122 if take > max_this_week:
123 take = max_this_week
124
125 with self.db.get_cursor(cursor) as cursor:
126 # Lock to avoid race conditions
127 cursor.run("LOCK TABLE takes IN EXCLUSIVE MODE")
128 # Compute the current takes
129 old_takes = self.compute_actual_takes(cursor)
130 # Insert the new take
131 cursor.run("""
132
133 INSERT INTO takes (ctime, member, team, amount, recorder)
134 VALUES ( COALESCE (( SELECT ctime
135 FROM takes
136 WHERE member=%(member)s
137 AND team=%(team)s
138 LIMIT 1
139 ), CURRENT_TIMESTAMP)
140 , %(member)s
141 , %(team)s
142 , %(amount)s
143 , %(recorder)s
144 )
145
146 """, dict(member=member.id, team=self.id, amount=take,
147 recorder=recorder.id))
148 # Compute the new takes
149 new_takes = self.compute_actual_takes(cursor)
150 # Update receiving amounts in the participants table
151 self.update_taking(old_takes, new_takes, cursor, member)
152 # Update is_funded on member's tips
153 member.update_giving(cursor)
154
155 return take
156
157 def update_taking(self, old_takes, new_takes, cursor=None, member=None):
158 """Update `taking` amounts based on the difference between `old_takes`
159 and `new_takes`.
160 """
161 for p_id in set(old_takes.keys()).union(new_takes.keys()):
162 old = old_takes.get(p_id, {}).get('actual_amount', D_ZERO)
163 new = new_takes.get(p_id, {}).get('actual_amount', D_ZERO)
164 diff = new - old
165 if diff != 0:
166 (cursor or self.db).run("""
167 UPDATE participants
168 SET taking = (taking + %(diff)s)
169 , receiving = (receiving + %(diff)s)
170 WHERE id=%(p_id)s
171 """, dict(p_id=p_id, diff=diff))
172 if member and p_id == member.id:
173 r = (cursor or self.db).one(
174 "SELECT taking, receiving FROM participants WHERE id = %s",
175 (p_id,)
176 )
177 member.set_attributes(**r._asdict())
178
179 def get_current_takes(self, cursor=None):
180 """Return a list of member takes for a team.
181 """
182 assert self.kind == 'group'
183 TAKES = """
184 SELECT p.id AS member_id, p.username AS member_name, p.avatar_url
185 , (p.mangopay_user_id IS NOT NULL) AS is_identified
186 , t.amount, t.ctime, t.mtime
187 FROM current_takes t
188 JOIN participants p ON p.id = member
189 WHERE t.team=%(team)s
190 ORDER BY p.username
191 """
192 records = (cursor or self.db).all(TAKES, dict(team=self.id))
193 return [r._asdict() for r in records]
194
195 def compute_actual_takes(self, cursor=None):
196 """Get the takes, compute the actual amounts, and return an OrderedDict.
197 """
198 actual_takes = OrderedDict()
199 nominal_takes = self.get_current_takes(cursor=cursor)
200 balance = self.receiving
201 total_takes = sum(t['amount'] for t in nominal_takes if t['is_identified'])
202 ratio = min(balance / total_takes, 1) if total_takes else 0
203 for take in nominal_takes:
204 nominal = take['nominal_take'] = take.pop('amount')
205 actual = take['actual_amount'] = min(
206 (nominal * ratio).quantize(D_CENT, rounding=ROUND_UP),
207 balance
208 ) if take['is_identified'] else D_ZERO
209 balance -= actual
210 actual_takes[take['member_id']] = take
211 actual_takes.leftover = balance
212 return actual_takes
213
214 @property
215 def nmembers(self):
216 assert self.kind == 'group'
217 return self.db.one("""
218 SELECT COUNT(*)
219 FROM current_takes
220 WHERE team=%s
221 """, (self.id,))
222
223 def get_members(self):
224 """Return an OrderedDict of member dicts.
225 """
226 takes = self.compute_actual_takes()
227 last_week = self.get_takes_last_week()
228 members = OrderedDict()
229 members.leftover = takes.leftover
230 for take in takes.values():
231 member = {}
232 m_id = member['id'] = take['member_id']
233 member['username'] = take['member_name']
234 member['nominal_take'] = take['nominal_take']
235 member['actual_amount'] = take['actual_amount']
236 member['last_week'] = last_week.get(m_id, D_ZERO)
237 member['max_this_week'] = self.compute_max_this_week(m_id, last_week)
238 members[member['id']] = member
239 return members
240
241 @property
242 def closed_by(self):
243 assert self.status == 'closed'
244 return self.db.one("""
245 SELECT member
246 FROM takes
247 WHERE team = %s
248 ORDER BY mtime DESC
249 LIMIT 1
250 """, (self.id,))
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/liberapay/models/_mixin_team.py b/liberapay/models/_mixin_team.py
--- a/liberapay/models/_mixin_team.py
+++ b/liberapay/models/_mixin_team.py
@@ -86,19 +86,21 @@
)
def compute_max_this_week(self, member_id, last_week):
- """2x the member's take last week, or the member's take last week + a
- proportional share of the leftover, or a minimum based on last week's
- median take, or 1.
+ """2x the member's take last week, or the member's take last week + the
+ leftover, or last week's median take, or 1.00, or infinity if the takes
+ were all zero last week or if throttling is disabled.
"""
+ if not self.throttle_takes:
+ return D_INF
sum_last_week = sum(last_week.values())
+ if sum_last_week == 0:
+ return D_INF
initial_leftover = self.receiving - sum_last_week
nonzero_last_week = [a for a in last_week.values() if a]
member_last_week = last_week.get(member_id, 0)
- leftover_share = member_last_week / (sum_last_week or D_INF)
- leftover_share = max(leftover_share, D_UNIT / self.nmembers)
return max(
member_last_week * 2,
- member_last_week + initial_leftover * leftover_share,
+ member_last_week + initial_leftover,
median(nonzero_last_week or (0,)),
D_UNIT
)
@@ -116,17 +118,17 @@
if not isinstance(take, (None.__class__, Decimal)):
take = Decimal(take)
- if take and check_max and take > 1:
- last_week = self.get_takes_last_week()
- max_this_week = self.compute_max_this_week(member.id, last_week)
- if take > max_this_week:
- take = max_this_week
-
with self.db.get_cursor(cursor) as cursor:
# Lock to avoid race conditions
cursor.run("LOCK TABLE takes IN EXCLUSIVE MODE")
# Compute the current takes
old_takes = self.compute_actual_takes(cursor)
+ # Throttle the new take, if there is more than one member
+ if take and check_max and len(old_takes) > 1 and take > 1:
+ last_week = self.get_takes_last_week()
+ max_this_week = self.compute_max_this_week(member.id, last_week)
+ if take > max_this_week:
+ take = max_this_week
# Insert the new take
cursor.run("""
@@ -234,7 +236,8 @@
member['nominal_take'] = take['nominal_take']
member['actual_amount'] = take['actual_amount']
member['last_week'] = last_week.get(m_id, D_ZERO)
- member['max_this_week'] = self.compute_max_this_week(m_id, last_week)
+ x = self.compute_max_this_week(m_id, last_week)
+ member['max_this_week'] = x if x.is_finite() else None
members[member['id']] = member
return members
| {"golden_diff": "diff --git a/liberapay/models/_mixin_team.py b/liberapay/models/_mixin_team.py\n--- a/liberapay/models/_mixin_team.py\n+++ b/liberapay/models/_mixin_team.py\n@@ -86,19 +86,21 @@\n )\n \n def compute_max_this_week(self, member_id, last_week):\n- \"\"\"2x the member's take last week, or the member's take last week + a\n- proportional share of the leftover, or a minimum based on last week's\n- median take, or 1.\n+ \"\"\"2x the member's take last week, or the member's take last week + the\n+ leftover, or last week's median take, or 1.00, or infinity if the takes\n+ were all zero last week or if throttling is disabled.\n \"\"\"\n+ if not self.throttle_takes:\n+ return D_INF\n sum_last_week = sum(last_week.values())\n+ if sum_last_week == 0:\n+ return D_INF\n initial_leftover = self.receiving - sum_last_week\n nonzero_last_week = [a for a in last_week.values() if a]\n member_last_week = last_week.get(member_id, 0)\n- leftover_share = member_last_week / (sum_last_week or D_INF)\n- leftover_share = max(leftover_share, D_UNIT / self.nmembers)\n return max(\n member_last_week * 2,\n- member_last_week + initial_leftover * leftover_share,\n+ member_last_week + initial_leftover,\n median(nonzero_last_week or (0,)),\n D_UNIT\n )\n@@ -116,17 +118,17 @@\n if not isinstance(take, (None.__class__, Decimal)):\n take = Decimal(take)\n \n- if take and check_max and take > 1:\n- last_week = self.get_takes_last_week()\n- max_this_week = self.compute_max_this_week(member.id, last_week)\n- if take > max_this_week:\n- take = max_this_week\n-\n with self.db.get_cursor(cursor) as cursor:\n # Lock to avoid race conditions\n cursor.run(\"LOCK TABLE takes IN EXCLUSIVE MODE\")\n # Compute the current takes\n old_takes = self.compute_actual_takes(cursor)\n+ # Throttle the new take, if there is more than one member\n+ if take and check_max and len(old_takes) > 1 and take > 1:\n+ last_week = self.get_takes_last_week()\n+ max_this_week = self.compute_max_this_week(member.id, last_week)\n+ if take > max_this_week:\n+ take = max_this_week\n # Insert the new take\n cursor.run(\"\"\"\n \n@@ -234,7 +236,8 @@\n member['nominal_take'] = take['nominal_take']\n member['actual_amount'] = take['actual_amount']\n member['last_week'] = last_week.get(m_id, D_ZERO)\n- member['max_this_week'] = self.compute_max_this_week(m_id, last_week)\n+ x = self.compute_max_this_week(m_id, last_week)\n+ member['max_this_week'] = x if x.is_finite() else None\n members[member['id']] = member\n return members\n", "issue": "Take throttling is too complicated\nhttps://witches.town/@Alda/2122717\n", "before_files": [{"content": "\"\"\"Teams are groups of participants.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom decimal import Decimal, ROUND_UP\nfrom statistics import median\n\nfrom liberapay.constants import D_CENT, D_INF, D_UNIT, D_ZERO\n\n\nclass MemberLimitReached(Exception): pass\n\n\nclass InactiveParticipantAdded(Exception): pass\n\n\nclass MixinTeam(object):\n\n def invite(self, invitee, inviter):\n assert self.kind == 'group'\n with self.db.get_cursor() as c:\n n_id = invitee.notify(\n 'team_invite',\n team=self.username,\n team_url=self.url(),\n inviter=inviter.username,\n )\n payload = dict(invitee=invitee.id, notification_id=n_id)\n self.add_event(c, 'invite', payload, inviter.id)\n\n def add_member(self, member, cursor=None):\n \"\"\"Add a member to this team.\n \"\"\"\n if len(self.get_current_takes()) == 149:\n raise MemberLimitReached\n if member.status != 'active':\n raise InactiveParticipantAdded\n self.set_take_for(member, D_ZERO, self, cursor=cursor)\n\n def remove_all_members(self, cursor=None):\n (cursor or self.db).run(\"\"\"\n INSERT INTO takes (ctime, member, team, amount, recorder) (\n SELECT ctime, member, %(id)s, NULL, %(id)s\n FROM current_takes\n WHERE team=%(id)s\n );\n \"\"\", dict(id=self.id))\n\n def member_of(self, team):\n \"\"\"Given a Participant object, return a boolean.\n \"\"\"\n assert team.kind == 'group'\n return self.db.one(\"\"\"\n SELECT true\n FROM current_takes\n WHERE team=%s AND member=%s\n \"\"\", (team.id, self.id), default=False)\n\n def get_takes_last_week(self):\n \"\"\"Get the users' nominal takes last week. Used in throttling.\n \"\"\"\n assert self.kind == 'group'\n takes = {t.member: t.amount for t in self.db.all(\"\"\"\n\n SELECT DISTINCT ON (member) member, amount, mtime\n FROM takes\n WHERE team=%s\n AND mtime < (\n SELECT ts_start\n FROM paydays\n WHERE ts_end > ts_start\n ORDER BY ts_start DESC LIMIT 1\n )\n ORDER BY member, mtime DESC\n\n \"\"\", (self.id,)) if t.amount}\n return takes\n\n def get_take_for(self, member):\n \"\"\"Return the nominal take for this member, or None.\n \"\"\"\n return self.db.one(\n \"SELECT amount FROM current_takes WHERE member = %s AND team = %s\",\n (member.id, self.id)\n )\n\n def compute_max_this_week(self, member_id, last_week):\n \"\"\"2x the member's take last week, or the member's take last week + a\n proportional share of the leftover, or a minimum based on last week's\n median take, or 1.\n \"\"\"\n sum_last_week = sum(last_week.values())\n initial_leftover = self.receiving - sum_last_week\n nonzero_last_week = [a for a in last_week.values() if a]\n member_last_week = last_week.get(member_id, 0)\n leftover_share = member_last_week / (sum_last_week or D_INF)\n leftover_share = max(leftover_share, D_UNIT / self.nmembers)\n return max(\n member_last_week * 2,\n member_last_week + initial_leftover * leftover_share,\n median(nonzero_last_week or (0,)),\n D_UNIT\n )\n\n def set_take_for(self, member, take, recorder, check_max=True, cursor=None):\n \"\"\"Sets member's take from the team pool.\n \"\"\"\n assert self.kind == 'group'\n\n if recorder.id != self.id:\n cur_take = self.get_take_for(member)\n if cur_take is None:\n return None\n\n if not isinstance(take, (None.__class__, Decimal)):\n take = Decimal(take)\n\n if take and check_max and take > 1:\n last_week = self.get_takes_last_week()\n max_this_week = self.compute_max_this_week(member.id, last_week)\n if take > max_this_week:\n take = max_this_week\n\n with self.db.get_cursor(cursor) as cursor:\n # Lock to avoid race conditions\n cursor.run(\"LOCK TABLE takes IN EXCLUSIVE MODE\")\n # Compute the current takes\n old_takes = self.compute_actual_takes(cursor)\n # Insert the new take\n cursor.run(\"\"\"\n\n INSERT INTO takes (ctime, member, team, amount, recorder)\n VALUES ( COALESCE (( SELECT ctime\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %(member)s\n , %(team)s\n , %(amount)s\n , %(recorder)s\n )\n\n \"\"\", dict(member=member.id, team=self.id, amount=take,\n recorder=recorder.id))\n # Compute the new takes\n new_takes = self.compute_actual_takes(cursor)\n # Update receiving amounts in the participants table\n self.update_taking(old_takes, new_takes, cursor, member)\n # Update is_funded on member's tips\n member.update_giving(cursor)\n\n return take\n\n def update_taking(self, old_takes, new_takes, cursor=None, member=None):\n \"\"\"Update `taking` amounts based on the difference between `old_takes`\n and `new_takes`.\n \"\"\"\n for p_id in set(old_takes.keys()).union(new_takes.keys()):\n old = old_takes.get(p_id, {}).get('actual_amount', D_ZERO)\n new = new_takes.get(p_id, {}).get('actual_amount', D_ZERO)\n diff = new - old\n if diff != 0:\n (cursor or self.db).run(\"\"\"\n UPDATE participants\n SET taking = (taking + %(diff)s)\n , receiving = (receiving + %(diff)s)\n WHERE id=%(p_id)s\n \"\"\", dict(p_id=p_id, diff=diff))\n if member and p_id == member.id:\n r = (cursor or self.db).one(\n \"SELECT taking, receiving FROM participants WHERE id = %s\",\n (p_id,)\n )\n member.set_attributes(**r._asdict())\n\n def get_current_takes(self, cursor=None):\n \"\"\"Return a list of member takes for a team.\n \"\"\"\n assert self.kind == 'group'\n TAKES = \"\"\"\n SELECT p.id AS member_id, p.username AS member_name, p.avatar_url\n , (p.mangopay_user_id IS NOT NULL) AS is_identified\n , t.amount, t.ctime, t.mtime\n FROM current_takes t\n JOIN participants p ON p.id = member\n WHERE t.team=%(team)s\n ORDER BY p.username\n \"\"\"\n records = (cursor or self.db).all(TAKES, dict(team=self.id))\n return [r._asdict() for r in records]\n\n def compute_actual_takes(self, cursor=None):\n \"\"\"Get the takes, compute the actual amounts, and return an OrderedDict.\n \"\"\"\n actual_takes = OrderedDict()\n nominal_takes = self.get_current_takes(cursor=cursor)\n balance = self.receiving\n total_takes = sum(t['amount'] for t in nominal_takes if t['is_identified'])\n ratio = min(balance / total_takes, 1) if total_takes else 0\n for take in nominal_takes:\n nominal = take['nominal_take'] = take.pop('amount')\n actual = take['actual_amount'] = min(\n (nominal * ratio).quantize(D_CENT, rounding=ROUND_UP),\n balance\n ) if take['is_identified'] else D_ZERO\n balance -= actual\n actual_takes[take['member_id']] = take\n actual_takes.leftover = balance\n return actual_takes\n\n @property\n def nmembers(self):\n assert self.kind == 'group'\n return self.db.one(\"\"\"\n SELECT COUNT(*)\n FROM current_takes\n WHERE team=%s\n \"\"\", (self.id,))\n\n def get_members(self):\n \"\"\"Return an OrderedDict of member dicts.\n \"\"\"\n takes = self.compute_actual_takes()\n last_week = self.get_takes_last_week()\n members = OrderedDict()\n members.leftover = takes.leftover\n for take in takes.values():\n member = {}\n m_id = member['id'] = take['member_id']\n member['username'] = take['member_name']\n member['nominal_take'] = take['nominal_take']\n member['actual_amount'] = take['actual_amount']\n member['last_week'] = last_week.get(m_id, D_ZERO)\n member['max_this_week'] = self.compute_max_this_week(m_id, last_week)\n members[member['id']] = member\n return members\n\n @property\n def closed_by(self):\n assert self.status == 'closed'\n return self.db.one(\"\"\"\n SELECT member\n FROM takes\n WHERE team = %s\n ORDER BY mtime DESC\n LIMIT 1\n \"\"\", (self.id,))\n", "path": "liberapay/models/_mixin_team.py"}], "after_files": [{"content": "\"\"\"Teams are groups of participants.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom decimal import Decimal, ROUND_UP\nfrom statistics import median\n\nfrom liberapay.constants import D_CENT, D_INF, D_UNIT, D_ZERO\n\n\nclass MemberLimitReached(Exception): pass\n\n\nclass InactiveParticipantAdded(Exception): pass\n\n\nclass MixinTeam(object):\n\n def invite(self, invitee, inviter):\n assert self.kind == 'group'\n with self.db.get_cursor() as c:\n n_id = invitee.notify(\n 'team_invite',\n team=self.username,\n team_url=self.url(),\n inviter=inviter.username,\n )\n payload = dict(invitee=invitee.id, notification_id=n_id)\n self.add_event(c, 'invite', payload, inviter.id)\n\n def add_member(self, member, cursor=None):\n \"\"\"Add a member to this team.\n \"\"\"\n if len(self.get_current_takes()) == 149:\n raise MemberLimitReached\n if member.status != 'active':\n raise InactiveParticipantAdded\n self.set_take_for(member, D_ZERO, self, cursor=cursor)\n\n def remove_all_members(self, cursor=None):\n (cursor or self.db).run(\"\"\"\n INSERT INTO takes (ctime, member, team, amount, recorder) (\n SELECT ctime, member, %(id)s, NULL, %(id)s\n FROM current_takes\n WHERE team=%(id)s\n );\n \"\"\", dict(id=self.id))\n\n def member_of(self, team):\n \"\"\"Given a Participant object, return a boolean.\n \"\"\"\n assert team.kind == 'group'\n return self.db.one(\"\"\"\n SELECT true\n FROM current_takes\n WHERE team=%s AND member=%s\n \"\"\", (team.id, self.id), default=False)\n\n def get_takes_last_week(self):\n \"\"\"Get the users' nominal takes last week. Used in throttling.\n \"\"\"\n assert self.kind == 'group'\n takes = {t.member: t.amount for t in self.db.all(\"\"\"\n\n SELECT DISTINCT ON (member) member, amount, mtime\n FROM takes\n WHERE team=%s\n AND mtime < (\n SELECT ts_start\n FROM paydays\n WHERE ts_end > ts_start\n ORDER BY ts_start DESC LIMIT 1\n )\n ORDER BY member, mtime DESC\n\n \"\"\", (self.id,)) if t.amount}\n return takes\n\n def get_take_for(self, member):\n \"\"\"Return the nominal take for this member, or None.\n \"\"\"\n return self.db.one(\n \"SELECT amount FROM current_takes WHERE member = %s AND team = %s\",\n (member.id, self.id)\n )\n\n def compute_max_this_week(self, member_id, last_week):\n \"\"\"2x the member's take last week, or the member's take last week + the\n leftover, or last week's median take, or 1.00, or infinity if the takes\n were all zero last week or if throttling is disabled.\n \"\"\"\n if not self.throttle_takes:\n return D_INF\n sum_last_week = sum(last_week.values())\n if sum_last_week == 0:\n return D_INF\n initial_leftover = self.receiving - sum_last_week\n nonzero_last_week = [a for a in last_week.values() if a]\n member_last_week = last_week.get(member_id, 0)\n return max(\n member_last_week * 2,\n member_last_week + initial_leftover,\n median(nonzero_last_week or (0,)),\n D_UNIT\n )\n\n def set_take_for(self, member, take, recorder, check_max=True, cursor=None):\n \"\"\"Sets member's take from the team pool.\n \"\"\"\n assert self.kind == 'group'\n\n if recorder.id != self.id:\n cur_take = self.get_take_for(member)\n if cur_take is None:\n return None\n\n if not isinstance(take, (None.__class__, Decimal)):\n take = Decimal(take)\n\n with self.db.get_cursor(cursor) as cursor:\n # Lock to avoid race conditions\n cursor.run(\"LOCK TABLE takes IN EXCLUSIVE MODE\")\n # Compute the current takes\n old_takes = self.compute_actual_takes(cursor)\n # Throttle the new take, if there is more than one member\n if take and check_max and len(old_takes) > 1 and take > 1:\n last_week = self.get_takes_last_week()\n max_this_week = self.compute_max_this_week(member.id, last_week)\n if take > max_this_week:\n take = max_this_week\n # Insert the new take\n cursor.run(\"\"\"\n\n INSERT INTO takes (ctime, member, team, amount, recorder)\n VALUES ( COALESCE (( SELECT ctime\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %(member)s\n , %(team)s\n , %(amount)s\n , %(recorder)s\n )\n\n \"\"\", dict(member=member.id, team=self.id, amount=take,\n recorder=recorder.id))\n # Compute the new takes\n new_takes = self.compute_actual_takes(cursor)\n # Update receiving amounts in the participants table\n self.update_taking(old_takes, new_takes, cursor, member)\n # Update is_funded on member's tips\n member.update_giving(cursor)\n\n return take\n\n def update_taking(self, old_takes, new_takes, cursor=None, member=None):\n \"\"\"Update `taking` amounts based on the difference between `old_takes`\n and `new_takes`.\n \"\"\"\n for p_id in set(old_takes.keys()).union(new_takes.keys()):\n old = old_takes.get(p_id, {}).get('actual_amount', D_ZERO)\n new = new_takes.get(p_id, {}).get('actual_amount', D_ZERO)\n diff = new - old\n if diff != 0:\n (cursor or self.db).run(\"\"\"\n UPDATE participants\n SET taking = (taking + %(diff)s)\n , receiving = (receiving + %(diff)s)\n WHERE id=%(p_id)s\n \"\"\", dict(p_id=p_id, diff=diff))\n if member and p_id == member.id:\n r = (cursor or self.db).one(\n \"SELECT taking, receiving FROM participants WHERE id = %s\",\n (p_id,)\n )\n member.set_attributes(**r._asdict())\n\n def get_current_takes(self, cursor=None):\n \"\"\"Return a list of member takes for a team.\n \"\"\"\n assert self.kind == 'group'\n TAKES = \"\"\"\n SELECT p.id AS member_id, p.username AS member_name, p.avatar_url\n , (p.mangopay_user_id IS NOT NULL) AS is_identified\n , t.amount, t.ctime, t.mtime\n FROM current_takes t\n JOIN participants p ON p.id = member\n WHERE t.team=%(team)s\n ORDER BY p.username\n \"\"\"\n records = (cursor or self.db).all(TAKES, dict(team=self.id))\n return [r._asdict() for r in records]\n\n def compute_actual_takes(self, cursor=None):\n \"\"\"Get the takes, compute the actual amounts, and return an OrderedDict.\n \"\"\"\n actual_takes = OrderedDict()\n nominal_takes = self.get_current_takes(cursor=cursor)\n balance = self.receiving\n total_takes = sum(t['amount'] for t in nominal_takes if t['is_identified'])\n ratio = min(balance / total_takes, 1) if total_takes else 0\n for take in nominal_takes:\n nominal = take['nominal_take'] = take.pop('amount')\n actual = take['actual_amount'] = min(\n (nominal * ratio).quantize(D_CENT, rounding=ROUND_UP),\n balance\n ) if take['is_identified'] else D_ZERO\n balance -= actual\n actual_takes[take['member_id']] = take\n actual_takes.leftover = balance\n return actual_takes\n\n @property\n def nmembers(self):\n assert self.kind == 'group'\n return self.db.one(\"\"\"\n SELECT COUNT(*)\n FROM current_takes\n WHERE team=%s\n \"\"\", (self.id,))\n\n def get_members(self):\n \"\"\"Return an OrderedDict of member dicts.\n \"\"\"\n takes = self.compute_actual_takes()\n last_week = self.get_takes_last_week()\n members = OrderedDict()\n members.leftover = takes.leftover\n for take in takes.values():\n member = {}\n m_id = member['id'] = take['member_id']\n member['username'] = take['member_name']\n member['nominal_take'] = take['nominal_take']\n member['actual_amount'] = take['actual_amount']\n member['last_week'] = last_week.get(m_id, D_ZERO)\n x = self.compute_max_this_week(m_id, last_week)\n member['max_this_week'] = x if x.is_finite() else None\n members[member['id']] = member\n return members\n\n @property\n def closed_by(self):\n assert self.status == 'closed'\n return self.db.one(\"\"\"\n SELECT member\n FROM takes\n WHERE team = %s\n ORDER BY mtime DESC\n LIMIT 1\n \"\"\", (self.id,))\n", "path": "liberapay/models/_mixin_team.py"}]} | 2,974 | 734 |
gh_patches_debug_685 | rasdani/github-patches | git_diff | pytorch__TensorRT-1849 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Test Suite for `torch.compile` backend Partitioning/Lowering Phases
- Add robust test suite for `torch.compile` backend, ensuring each phase functions correctly
- Add general-purpose utilities for test expansion as the backend evolves
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py/torch_tensorrt/dynamo/torch_compile/utils.py`
Content:
```
1 import torch
2
3 from typing import Any, Union, Sequence, Dict
4 from torch_tensorrt import _Input, Device
5
6
7 def prepare_inputs(
8 inputs: Union[_Input.Input, torch.Tensor, Sequence, Dict],
9 device: torch.device = torch.device("cuda"),
10 ) -> Any:
11 if isinstance(inputs, _Input.Input):
12 if isinstance(inputs.shape, dict):
13 return inputs.example_tensor(optimization_profile_field="opt_shape").to(
14 device
15 )
16 else:
17 return inputs.example_tensor().to(device)
18
19 elif isinstance(inputs, torch.Tensor):
20 return inputs
21
22 elif isinstance(inputs, list):
23 prepared_input = list()
24
25 for input_obj in inputs:
26 prepared_input.append(prepare_inputs(input_obj))
27
28 return prepared_input
29
30 elif isinstance(inputs, tuple):
31 prepared_input = list()
32
33 for input_obj in inputs:
34 prepared_input.append(prepare_inputs(input_obj))
35
36 return tuple(prepared_input)
37
38 elif isinstance(inputs, dict):
39 prepared_input = dict()
40
41 for key, input_obj in inputs.items():
42 prepared_input[key] = prepare_inputs(input_obj)
43
44 return prepared_input
45
46 else:
47 raise ValueError(
48 f"Invalid input type {type(inputs)} encountered in the torch_compile input parsing. "
49 + "Allowed input types: {torch_tensorrt.Input, torch.Tensor, list, tuple, dict}"
50 )
51
52
53 def prepare_device(device: Union[Device, torch.device]) -> torch.device:
54 if isinstance(device, Device):
55 if device.gpu_id != -1:
56 device = torch.device(device.gpu_id)
57 else:
58 raise ValueError("Invalid GPU ID provided for the CUDA device provided")
59
60 elif isinstance(device, torch.device):
61 device = device
62
63 else:
64 raise ValueError(
65 "Invalid device provided. Supported options: torch.device | torch_tensorrt.Device"
66 )
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/py/torch_tensorrt/dynamo/torch_compile/utils.py b/py/torch_tensorrt/dynamo/torch_compile/utils.py
--- a/py/torch_tensorrt/dynamo/torch_compile/utils.py
+++ b/py/torch_tensorrt/dynamo/torch_compile/utils.py
@@ -64,3 +64,5 @@
raise ValueError(
"Invalid device provided. Supported options: torch.device | torch_tensorrt.Device"
)
+
+ return device
| {"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/torch_compile/utils.py b/py/torch_tensorrt/dynamo/torch_compile/utils.py\n--- a/py/torch_tensorrt/dynamo/torch_compile/utils.py\n+++ b/py/torch_tensorrt/dynamo/torch_compile/utils.py\n@@ -64,3 +64,5 @@\n raise ValueError(\n \"Invalid device provided. Supported options: torch.device | torch_tensorrt.Device\"\n )\n+\n+ return device\n", "issue": "Add Test Suite for `torch.compile` backend Partitioning/Lowering Phases\n- Add robust test suite for `torch.compile` backend, ensuring each phase functions correctly\r\n- Add general-purpose utilities for test expansion as the backend evolves\n", "before_files": [{"content": "import torch\n\nfrom typing import Any, Union, Sequence, Dict\nfrom torch_tensorrt import _Input, Device\n\n\ndef prepare_inputs(\n inputs: Union[_Input.Input, torch.Tensor, Sequence, Dict],\n device: torch.device = torch.device(\"cuda\"),\n) -> Any:\n if isinstance(inputs, _Input.Input):\n if isinstance(inputs.shape, dict):\n return inputs.example_tensor(optimization_profile_field=\"opt_shape\").to(\n device\n )\n else:\n return inputs.example_tensor().to(device)\n\n elif isinstance(inputs, torch.Tensor):\n return inputs\n\n elif isinstance(inputs, list):\n prepared_input = list()\n\n for input_obj in inputs:\n prepared_input.append(prepare_inputs(input_obj))\n\n return prepared_input\n\n elif isinstance(inputs, tuple):\n prepared_input = list()\n\n for input_obj in inputs:\n prepared_input.append(prepare_inputs(input_obj))\n\n return tuple(prepared_input)\n\n elif isinstance(inputs, dict):\n prepared_input = dict()\n\n for key, input_obj in inputs.items():\n prepared_input[key] = prepare_inputs(input_obj)\n\n return prepared_input\n\n else:\n raise ValueError(\n f\"Invalid input type {type(inputs)} encountered in the torch_compile input parsing. \"\n + \"Allowed input types: {torch_tensorrt.Input, torch.Tensor, list, tuple, dict}\"\n )\n\n\ndef prepare_device(device: Union[Device, torch.device]) -> torch.device:\n if isinstance(device, Device):\n if device.gpu_id != -1:\n device = torch.device(device.gpu_id)\n else:\n raise ValueError(\"Invalid GPU ID provided for the CUDA device provided\")\n\n elif isinstance(device, torch.device):\n device = device\n\n else:\n raise ValueError(\n \"Invalid device provided. Supported options: torch.device | torch_tensorrt.Device\"\n )\n", "path": "py/torch_tensorrt/dynamo/torch_compile/utils.py"}], "after_files": [{"content": "import torch\n\nfrom typing import Any, Union, Sequence, Dict\nfrom torch_tensorrt import _Input, Device\n\n\ndef prepare_inputs(\n inputs: Union[_Input.Input, torch.Tensor, Sequence, Dict],\n device: torch.device = torch.device(\"cuda\"),\n) -> Any:\n if isinstance(inputs, _Input.Input):\n if isinstance(inputs.shape, dict):\n return inputs.example_tensor(optimization_profile_field=\"opt_shape\").to(\n device\n )\n else:\n return inputs.example_tensor().to(device)\n\n elif isinstance(inputs, torch.Tensor):\n return inputs\n\n elif isinstance(inputs, list):\n prepared_input = list()\n\n for input_obj in inputs:\n prepared_input.append(prepare_inputs(input_obj))\n\n return prepared_input\n\n elif isinstance(inputs, tuple):\n prepared_input = list()\n\n for input_obj in inputs:\n prepared_input.append(prepare_inputs(input_obj))\n\n return tuple(prepared_input)\n\n elif isinstance(inputs, dict):\n prepared_input = dict()\n\n for key, input_obj in inputs.items():\n prepared_input[key] = prepare_inputs(input_obj)\n\n return prepared_input\n\n else:\n raise ValueError(\n f\"Invalid input type {type(inputs)} encountered in the torch_compile input parsing. \"\n + \"Allowed input types: {torch_tensorrt.Input, torch.Tensor, list, tuple, dict}\"\n )\n\n\ndef prepare_device(device: Union[Device, torch.device]) -> torch.device:\n if isinstance(device, Device):\n if device.gpu_id != -1:\n device = torch.device(device.gpu_id)\n else:\n raise ValueError(\"Invalid GPU ID provided for the CUDA device provided\")\n\n elif isinstance(device, torch.device):\n device = device\n\n else:\n raise ValueError(\n \"Invalid device provided. Supported options: torch.device | torch_tensorrt.Device\"\n )\n\n return device\n", "path": "py/torch_tensorrt/dynamo/torch_compile/utils.py"}]} | 834 | 101 |
gh_patches_debug_38916 | rasdani/github-patches | git_diff | scrapy__scrapy-1944 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scrapy 1.1.0 RC3 - exception thrown with invalid ssl certificate
Hello,
I am crawling sometimes websites with an invalid ssl certificate. For example, Scrapy 1.1.0 RC3 fails to open when I do:
> scrapy shell https://www.directoriosanitario.com/directorio
> or
> scrapy shell https://saobinv.5go.cc/top/
and throws the following exception:
> twisted.web._newclient.ResponseNeverReceived: [<twisted.python.failure.Failure service_identity.exceptions.VerificationError: VerificationError(errors=[DNSMismatch(mismatched_id=DNS_ID(hostname=b'www.directoriosanitario.com'))])>]
I tried it with Scrapy 1.0.5 on python 2.7 and the spider opens but warns with:
> AttributeError: 'NoneType' object has no attribute 'failVerification'
Is there a way to force the spider to open with Scrapy 1.1.0 RC3?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/core/downloader/tls.py`
Content:
```
1 from OpenSSL import SSL
2
3
4 METHOD_SSLv3 = 'SSLv3'
5 METHOD_TLS = 'TLS'
6 METHOD_TLSv10 = 'TLSv1.0'
7 METHOD_TLSv11 = 'TLSv1.1'
8 METHOD_TLSv12 = 'TLSv1.2'
9
10 openssl_methods = {
11 METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)
12 METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended)
13 METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only
14 METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only
15 METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only
16 }
17
```
Path: `scrapy/core/downloader/contextfactory.py`
Content:
```
1 from OpenSSL import SSL
2 from twisted.internet.ssl import ClientContextFactory
3
4 try:
5
6 from zope.interface.declarations import implementer
7
8 # the following should be available from Twisted 14.0.0
9 from twisted.internet.ssl import optionsForClientTLS, CertificateOptions, platformTrust
10 from twisted.internet._sslverify import ClientTLSOptions
11 from twisted.web.client import BrowserLikePolicyForHTTPS
12 from twisted.web.iweb import IPolicyForHTTPS
13
14 @implementer(IPolicyForHTTPS)
15 class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):
16 """
17 Non-peer-certificate verifying HTTPS context factory
18
19 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)
20 which allows TLS protocol negotiation
21
22 'A TLS/SSL connection established with [this method] may
23 understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'
24 """
25
26 def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):
27 super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)
28 self._ssl_method = method
29
30 def getCertificateOptions(self):
31 # setting verify=True will require you to provide CAs
32 # to verify against; in other words: it's not that simple
33
34 # backward-compatible SSL/TLS method:
35 #
36 # * this will respect `method` attribute in often recommended
37 # `ScrapyClientContextFactory` subclass
38 # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)
39 #
40 # * getattr() for `_ssl_method` attribute for context factories
41 # not calling super(..., self).__init__
42 return CertificateOptions(verify=False,
43 method=getattr(self, 'method',
44 getattr(self, '_ssl_method', None)))
45
46 # kept for old-style HTTP/1.0 downloader context twisted calls,
47 # e.g. connectSSL()
48 def getContext(self, hostname=None, port=None):
49 return self.getCertificateOptions().getContext()
50
51 def creatorForNetloc(self, hostname, port):
52 return ClientTLSOptions(hostname.decode("ascii"), self.getContext())
53
54
55 @implementer(IPolicyForHTTPS)
56 class BrowserLikeContextFactory(ScrapyClientContextFactory):
57 """
58 Twisted-recommended context factory for web clients.
59
60 Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:
61 "The default is to use a BrowserLikePolicyForHTTPS,
62 so unless you have special requirements you can leave this as-is."
63
64 creatorForNetloc() is the same as BrowserLikePolicyForHTTPS
65 except this context factory allows setting the TLS/SSL method to use.
66
67 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)
68 which allows TLS protocol negotiation.
69 """
70 def creatorForNetloc(self, hostname, port):
71
72 # trustRoot set to platformTrust() will use the platform's root CAs.
73 #
74 # This means that a website like https://www.cacert.org will be rejected
75 # by default, since CAcert.org CA certificate is seldom shipped.
76 return optionsForClientTLS(hostname.decode("ascii"),
77 trustRoot=platformTrust(),
78 extraCertificateOptions={
79 'method': self._ssl_method,
80 })
81
82 except ImportError:
83
84 class ScrapyClientContextFactory(ClientContextFactory):
85 "A SSL context factory which is more permissive against SSL bugs."
86 # see https://github.com/scrapy/scrapy/issues/82
87 # and https://github.com/scrapy/scrapy/issues/26
88 # and https://github.com/scrapy/scrapy/issues/981
89
90 def __init__(self, method=SSL.SSLv23_METHOD):
91 self.method = method
92
93 def getContext(self, hostname=None, port=None):
94 ctx = ClientContextFactory.getContext(self)
95 # Enable all workarounds to SSL bugs as documented by
96 # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
97 ctx.set_options(SSL.OP_ALL)
98 return ctx
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py
--- a/scrapy/core/downloader/contextfactory.py
+++ b/scrapy/core/downloader/contextfactory.py
@@ -6,11 +6,16 @@
from zope.interface.declarations import implementer
# the following should be available from Twisted 14.0.0
- from twisted.internet.ssl import optionsForClientTLS, CertificateOptions, platformTrust
- from twisted.internet._sslverify import ClientTLSOptions
+ from twisted.internet.ssl import (optionsForClientTLS,
+ CertificateOptions,
+ platformTrust)
+
from twisted.web.client import BrowserLikePolicyForHTTPS
from twisted.web.iweb import IPolicyForHTTPS
+ from scrapy.core.downloader.tls import ScrapyClientTLSOptions
+
+
@implementer(IPolicyForHTTPS)
class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):
"""
@@ -49,7 +54,7 @@
return self.getCertificateOptions().getContext()
def creatorForNetloc(self, hostname, port):
- return ClientTLSOptions(hostname.decode("ascii"), self.getContext())
+ return ScrapyClientTLSOptions(hostname.decode("ascii"), self.getContext())
@implementer(IPolicyForHTTPS)
diff --git a/scrapy/core/downloader/tls.py b/scrapy/core/downloader/tls.py
--- a/scrapy/core/downloader/tls.py
+++ b/scrapy/core/downloader/tls.py
@@ -1,6 +1,9 @@
+import logging
from OpenSSL import SSL
+logger = logging.getLogger(__name__)
+
METHOD_SSLv3 = 'SSLv3'
METHOD_TLS = 'TLS'
METHOD_TLSv10 = 'TLSv1.0'
@@ -14,3 +17,36 @@
METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only
METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only
}
+
+# ClientTLSOptions requires a recent-enough version of Twisted
+try:
+
+ # taken from twisted/twisted/internet/_sslverify.py
+ try:
+ from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START
+ except ImportError:
+ SSL_CB_HANDSHAKE_START = 0x10
+ SSL_CB_HANDSHAKE_DONE = 0x20
+
+ from twisted.internet._sslverify import (ClientTLSOptions,
+ _maybeSetHostNameIndication,
+ verifyHostname,
+ VerificationError)
+
+ class ScrapyClientTLSOptions(ClientTLSOptions):
+ # same as Twisted's ClientTLSOptions,
+ # except that VerificationError is caught
+ # and doesn't close the connection
+ def _identityVerifyingInfoCallback(self, connection, where, ret):
+ if where & SSL_CB_HANDSHAKE_START:
+ _maybeSetHostNameIndication(connection, self._hostnameBytes)
+ elif where & SSL_CB_HANDSHAKE_DONE:
+ try:
+ verifyHostname(connection, self._hostnameASCII)
+ except VerificationError as e:
+ logger.warning(e)
+
+except ImportError:
+ # ImportError should not matter for older Twisted versions
+ # as the above is not used in the fallback ScrapyClientContextFactory
+ pass
| {"golden_diff": "diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py\n--- a/scrapy/core/downloader/contextfactory.py\n+++ b/scrapy/core/downloader/contextfactory.py\n@@ -6,11 +6,16 @@\n from zope.interface.declarations import implementer\n \n # the following should be available from Twisted 14.0.0\n- from twisted.internet.ssl import optionsForClientTLS, CertificateOptions, platformTrust\n- from twisted.internet._sslverify import ClientTLSOptions\n+ from twisted.internet.ssl import (optionsForClientTLS,\n+ CertificateOptions,\n+ platformTrust)\n+\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n \n+ from scrapy.core.downloader.tls import ScrapyClientTLSOptions\n+\n+\n @implementer(IPolicyForHTTPS)\n class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n@@ -49,7 +54,7 @@\n return self.getCertificateOptions().getContext()\n \n def creatorForNetloc(self, hostname, port):\n- return ClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n+ return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n \n \n @implementer(IPolicyForHTTPS)\ndiff --git a/scrapy/core/downloader/tls.py b/scrapy/core/downloader/tls.py\n--- a/scrapy/core/downloader/tls.py\n+++ b/scrapy/core/downloader/tls.py\n@@ -1,6 +1,9 @@\n+import logging\n from OpenSSL import SSL\n \n \n+logger = logging.getLogger(__name__)\n+\n METHOD_SSLv3 = 'SSLv3'\n METHOD_TLS = 'TLS'\n METHOD_TLSv10 = 'TLSv1.0'\n@@ -14,3 +17,36 @@\n METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only\n METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only\n }\n+\n+# ClientTLSOptions requires a recent-enough version of Twisted\n+try:\n+\n+ # taken from twisted/twisted/internet/_sslverify.py\n+ try:\n+ from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START\n+ except ImportError:\n+ SSL_CB_HANDSHAKE_START = 0x10\n+ SSL_CB_HANDSHAKE_DONE = 0x20\n+\n+ from twisted.internet._sslverify import (ClientTLSOptions,\n+ _maybeSetHostNameIndication,\n+ verifyHostname,\n+ VerificationError)\n+\n+ class ScrapyClientTLSOptions(ClientTLSOptions):\n+ # same as Twisted's ClientTLSOptions,\n+ # except that VerificationError is caught\n+ # and doesn't close the connection\n+ def _identityVerifyingInfoCallback(self, connection, where, ret):\n+ if where & SSL_CB_HANDSHAKE_START:\n+ _maybeSetHostNameIndication(connection, self._hostnameBytes)\n+ elif where & SSL_CB_HANDSHAKE_DONE:\n+ try:\n+ verifyHostname(connection, self._hostnameASCII)\n+ except VerificationError as e:\n+ logger.warning(e)\n+\n+except ImportError:\n+ # ImportError should not matter for older Twisted versions\n+ # as the above is not used in the fallback ScrapyClientContextFactory\n+ pass\n", "issue": "Scrapy 1.1.0 RC3 - exception thrown with invalid ssl certificate\nHello,\n\nI am crawling sometimes websites with an invalid ssl certificate. For example, Scrapy 1.1.0 RC3 fails to open when I do:\n\n> scrapy shell https://www.directoriosanitario.com/directorio\n> or\n> scrapy shell https://saobinv.5go.cc/top/\n\nand throws the following exception:\n\n> twisted.web._newclient.ResponseNeverReceived: [<twisted.python.failure.Failure service_identity.exceptions.VerificationError: VerificationError(errors=[DNSMismatch(mismatched_id=DNS_ID(hostname=b'www.directoriosanitario.com'))])>]\n\nI tried it with Scrapy 1.0.5 on python 2.7 and the spider opens but warns with: \n\n> AttributeError: 'NoneType' object has no attribute 'failVerification'\n\nIs there a way to force the spider to open with Scrapy 1.1.0 RC3?\n\n", "before_files": [{"content": "from OpenSSL import SSL\n\n\nMETHOD_SSLv3 = 'SSLv3'\nMETHOD_TLS = 'TLS'\nMETHOD_TLSv10 = 'TLSv1.0'\nMETHOD_TLSv11 = 'TLSv1.1'\nMETHOD_TLSv12 = 'TLSv1.2'\n\nopenssl_methods = {\n METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)\n METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended)\n METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only\n METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only\n METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only\n}\n", "path": "scrapy/core/downloader/tls.py"}, {"content": "from OpenSSL import SSL\nfrom twisted.internet.ssl import ClientContextFactory\n\ntry:\n\n from zope.interface.declarations import implementer\n\n # the following should be available from Twisted 14.0.0\n from twisted.internet.ssl import optionsForClientTLS, CertificateOptions, platformTrust\n from twisted.internet._sslverify import ClientTLSOptions\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n\n @implementer(IPolicyForHTTPS)\n class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n Non-peer-certificate verifying HTTPS context factory\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation\n\n 'A TLS/SSL connection established with [this method] may\n understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'\n \"\"\"\n\n def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):\n super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)\n self._ssl_method = method\n\n def getCertificateOptions(self):\n # setting verify=True will require you to provide CAs\n # to verify against; in other words: it's not that simple\n\n # backward-compatible SSL/TLS method:\n #\n # * this will respect `method` attribute in often recommended\n # `ScrapyClientContextFactory` subclass\n # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)\n #\n # * getattr() for `_ssl_method` attribute for context factories\n # not calling super(..., self).__init__\n return CertificateOptions(verify=False,\n method=getattr(self, 'method',\n getattr(self, '_ssl_method', None)))\n\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n return self.getCertificateOptions().getContext()\n\n def creatorForNetloc(self, hostname, port):\n return ClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n\n\n @implementer(IPolicyForHTTPS)\n class BrowserLikeContextFactory(ScrapyClientContextFactory):\n \"\"\"\n Twisted-recommended context factory for web clients.\n\n Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:\n \"The default is to use a BrowserLikePolicyForHTTPS,\n so unless you have special requirements you can leave this as-is.\"\n\n creatorForNetloc() is the same as BrowserLikePolicyForHTTPS\n except this context factory allows setting the TLS/SSL method to use.\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation.\n \"\"\"\n def creatorForNetloc(self, hostname, port):\n\n # trustRoot set to platformTrust() will use the platform's root CAs.\n #\n # This means that a website like https://www.cacert.org will be rejected\n # by default, since CAcert.org CA certificate is seldom shipped.\n return optionsForClientTLS(hostname.decode(\"ascii\"),\n trustRoot=platformTrust(),\n extraCertificateOptions={\n 'method': self._ssl_method,\n })\n\nexcept ImportError:\n\n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n # see https://github.com/scrapy/scrapy/issues/82\n # and https://github.com/scrapy/scrapy/issues/26\n # and https://github.com/scrapy/scrapy/issues/981\n\n def __init__(self, method=SSL.SSLv23_METHOD):\n self.method = method\n\n def getContext(self, hostname=None, port=None):\n ctx = ClientContextFactory.getContext(self)\n # Enable all workarounds to SSL bugs as documented by\n # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html\n ctx.set_options(SSL.OP_ALL)\n return ctx\n", "path": "scrapy/core/downloader/contextfactory.py"}], "after_files": [{"content": "import logging\nfrom OpenSSL import SSL\n\n\nlogger = logging.getLogger(__name__)\n\nMETHOD_SSLv3 = 'SSLv3'\nMETHOD_TLS = 'TLS'\nMETHOD_TLSv10 = 'TLSv1.0'\nMETHOD_TLSv11 = 'TLSv1.1'\nMETHOD_TLSv12 = 'TLSv1.2'\n\nopenssl_methods = {\n METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)\n METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended)\n METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only\n METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only\n METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only\n}\n\n# ClientTLSOptions requires a recent-enough version of Twisted\ntry:\n\n # taken from twisted/twisted/internet/_sslverify.py\n try:\n from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START\n except ImportError:\n SSL_CB_HANDSHAKE_START = 0x10\n SSL_CB_HANDSHAKE_DONE = 0x20\n\n from twisted.internet._sslverify import (ClientTLSOptions,\n _maybeSetHostNameIndication,\n verifyHostname,\n VerificationError)\n\n class ScrapyClientTLSOptions(ClientTLSOptions):\n # same as Twisted's ClientTLSOptions,\n # except that VerificationError is caught\n # and doesn't close the connection\n def _identityVerifyingInfoCallback(self, connection, where, ret):\n if where & SSL_CB_HANDSHAKE_START:\n _maybeSetHostNameIndication(connection, self._hostnameBytes)\n elif where & SSL_CB_HANDSHAKE_DONE:\n try:\n verifyHostname(connection, self._hostnameASCII)\n except VerificationError as e:\n logger.warning(e)\n\nexcept ImportError:\n # ImportError should not matter for older Twisted versions\n # as the above is not used in the fallback ScrapyClientContextFactory\n pass\n", "path": "scrapy/core/downloader/tls.py"}, {"content": "from OpenSSL import SSL\nfrom twisted.internet.ssl import ClientContextFactory\n\ntry:\n\n from zope.interface.declarations import implementer\n\n # the following should be available from Twisted 14.0.0\n from twisted.internet.ssl import (optionsForClientTLS,\n CertificateOptions,\n platformTrust)\n\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n\n from scrapy.core.downloader.tls import ScrapyClientTLSOptions\n\n\n @implementer(IPolicyForHTTPS)\n class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n Non-peer-certificate verifying HTTPS context factory\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation\n\n 'A TLS/SSL connection established with [this method] may\n understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'\n \"\"\"\n\n def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):\n super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)\n self._ssl_method = method\n\n def getCertificateOptions(self):\n # setting verify=True will require you to provide CAs\n # to verify against; in other words: it's not that simple\n\n # backward-compatible SSL/TLS method:\n #\n # * this will respect `method` attribute in often recommended\n # `ScrapyClientContextFactory` subclass\n # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)\n #\n # * getattr() for `_ssl_method` attribute for context factories\n # not calling super(..., self).__init__\n return CertificateOptions(verify=False,\n method=getattr(self, 'method',\n getattr(self, '_ssl_method', None)))\n\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n return self.getCertificateOptions().getContext()\n\n def creatorForNetloc(self, hostname, port):\n return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n\n\n @implementer(IPolicyForHTTPS)\n class BrowserLikeContextFactory(ScrapyClientContextFactory):\n \"\"\"\n Twisted-recommended context factory for web clients.\n\n Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:\n \"The default is to use a BrowserLikePolicyForHTTPS,\n so unless you have special requirements you can leave this as-is.\"\n\n creatorForNetloc() is the same as BrowserLikePolicyForHTTPS\n except this context factory allows setting the TLS/SSL method to use.\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation.\n \"\"\"\n def creatorForNetloc(self, hostname, port):\n\n # trustRoot set to platformTrust() will use the platform's root CAs.\n #\n # This means that a website like https://www.cacert.org will be rejected\n # by default, since CAcert.org CA certificate is seldom shipped.\n return optionsForClientTLS(hostname.decode(\"ascii\"),\n trustRoot=platformTrust(),\n extraCertificateOptions={\n 'method': self._ssl_method,\n })\n\nexcept ImportError:\n\n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n # see https://github.com/scrapy/scrapy/issues/82\n # and https://github.com/scrapy/scrapy/issues/26\n # and https://github.com/scrapy/scrapy/issues/981\n\n def __init__(self, method=SSL.SSLv23_METHOD):\n self.method = method\n\n def getContext(self, hostname=None, port=None):\n ctx = ClientContextFactory.getContext(self)\n # Enable all workarounds to SSL bugs as documented by\n # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html\n ctx.set_options(SSL.OP_ALL)\n return ctx\n", "path": "scrapy/core/downloader/contextfactory.py"}]} | 1,799 | 758 |
gh_patches_debug_34682 | rasdani/github-patches | git_diff | meltano__meltano-7983 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feat: consider clearing the catalog cache when `--full-refresh` present
This has come up in slack a bunch of times but most recently in https://meltano.slack.com/archives/C01TCRBBJD7/p1689207554179589
Lots of users run into this where the catalog is being cached so the output theyre seeing is not as expected, we end up recommending that they clear their `.meltano/run/tap-x` directory so it regenerates the catalog. If someone runs with the `--full-refresh` flag its probably because something in the source changed so they need to re-run the replication but the cached catalog is blocking those changes from propogating.
Related to:
- https://github.com/meltano/meltano/issues/6292
- https://github.com/meltano/meltano/issues/6763
- https://github.com/meltano/meltano/issues/2856
- https://github.com/meltano/meltano/issues/2848
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/cli/__init__.py`
Content:
```
1 """Main entry point for the meltano CLI."""
2
3 from __future__ import annotations
4
5 import logging
6 import os
7 import sys
8 import typing as t
9
10 from meltano.cli import ( # noqa: WPS235
11 add,
12 config,
13 discovery,
14 docs,
15 dragon,
16 elt,
17 environment,
18 hub,
19 initialize,
20 install,
21 invoke,
22 job,
23 lock,
24 remove,
25 run,
26 schedule,
27 schema,
28 select,
29 state,
30 upgrade,
31 validate,
32 )
33 from meltano.cli import compile as compile_module
34 from meltano.cli.cli import cli
35 from meltano.cli.utils import CliError
36 from meltano.cloud.cli import cloud
37 from meltano.core.error import MeltanoError, ProjectReadonly
38 from meltano.core.logging import setup_logging
39
40 if t.TYPE_CHECKING:
41 from meltano.core.tracking.tracker import Tracker
42
43 cli.add_command(add.add)
44 cli.add_command(cloud)
45 cli.add_command(compile_module.compile_command)
46 cli.add_command(config.config)
47 cli.add_command(discovery.discover)
48 cli.add_command(docs.docs)
49 cli.add_command(dragon.dragon)
50 cli.add_command(elt.elt)
51 cli.add_command(environment.meltano_environment)
52 cli.add_command(hub.hub)
53 cli.add_command(initialize.init)
54 cli.add_command(install.install)
55 cli.add_command(invoke.invoke)
56 cli.add_command(lock.lock)
57 cli.add_command(remove.remove)
58 cli.add_command(schedule.schedule)
59 cli.add_command(schema.schema)
60 cli.add_command(select.select)
61 cli.add_command(state.meltano_state)
62 cli.add_command(upgrade.upgrade)
63 cli.add_command(run.run)
64 cli.add_command(validate.test)
65 cli.add_command(job.job)
66
67 # Holds the exit code for error reporting during process exiting. In
68 # particular, a function registered by the `atexit` module uses this value.
69 exit_code: None | int = None
70
71 atexit_handler_registered = False
72 exit_code_reported = False
73 exit_event_tracker: Tracker | None = None
74
75 setup_logging()
76
77 logger = logging.getLogger(__name__)
78
79 troubleshooting_message = """\
80 Need help fixing this problem? Visit http://melta.no/ for troubleshooting steps, or to
81 join our friendly Slack community.
82 """
83
84
85 def handle_meltano_error(error: MeltanoError) -> t.NoReturn:
86 """Handle a MeltanoError.
87
88 Args:
89 error: The error to handle.
90
91 Raises:
92 CliError: always.
93 """
94 raise CliError(str(error)) from error
95
96
97 def _run_cli():
98 """Run the Meltano CLI.
99
100 Raises:
101 KeyboardInterrupt: if caught.
102 """
103 try:
104 try: # noqa: WPS225, WPS505
105 cli(obj={"project": None})
106 except ProjectReadonly as err:
107 raise CliError(
108 f"The requested action could not be completed: {err}",
109 ) from err
110 except KeyboardInterrupt: # noqa: WPS329
111 raise
112 except MeltanoError as err:
113 handle_meltano_error(err)
114 except Exception as err:
115 raise CliError(f"{troubleshooting_message}\n{err}") from err
116 except CliError as cli_error:
117 cli_error.print()
118 sys.exit(1)
119
120
121 def main():
122 """Entry point for the meltano CLI."""
123 # Mark the current process as executed via the CLI
124 os.environ["MELTANO_JOB_TRIGGER"] = os.getenv("MELTANO_JOB_TRIGGER", "cli")
125 try:
126 _run_cli()
127 finally:
128 global exit_code
129 ex = sys.exc_info()[1]
130 if ex is None:
131 exit_code = 0 # noqa: WPS442
132 elif isinstance(ex, SystemExit):
133 exit_code = 0 if ex.code is None else ex.code # noqa: WPS442
134 else:
135 exit_code = 1 # noqa: WPS442
136 # Track the exit event now to provide more details via the exception context.
137 # We assume the process will exit practically immediately after `main` returns.
138 if exit_event_tracker is not None:
139 exit_event_tracker.track_exit_event()
140
```
Path: `src/meltano/cli/discovery.py`
Content:
```
1 """Discoverable Plugins CLI."""
2
3 from __future__ import annotations
4
5 import typing as t
6
7 import click
8
9 from meltano.cli.params import pass_project
10 from meltano.cli.utils import InstrumentedCmd
11 from meltano.core.plugin import PluginType
12
13 if t.TYPE_CHECKING:
14 from meltano.core.project import Project
15
16
17 @click.command(
18 cls=InstrumentedCmd,
19 short_help="List the available plugins in Meltano Hub and their variants.",
20 )
21 @click.argument(
22 "plugin_type",
23 type=click.Choice([*list(PluginType), "all"]),
24 default="all",
25 )
26 @pass_project()
27 def discover( # noqa: C901
28 project: Project,
29 plugin_type: str,
30 ):
31 """
32 List the available discoverable plugins and their variants.
33
34 \b\nRead more at https://docs.meltano.com/reference/command-line-interface#discover
35 """
36 if plugin_type == "all":
37 plugin_types = [
38 plugin_type for plugin_type in list(PluginType) if plugin_type.discoverable
39 ]
40 else:
41 plugin_types = [PluginType.from_cli_argument(plugin_type)]
42
43 for idx, discovered_plugin_type in enumerate(plugin_types):
44 if idx > 0:
45 click.echo()
46
47 click.secho(f"{str(discovered_plugin_type).capitalize()}", fg="green")
48
49 try:
50 plugin_type_index = project.hub_service.get_plugins_of_type(
51 discovered_plugin_type,
52 )
53 except Exception:
54 click.secho(
55 f"Can not retrieve {discovered_plugin_type} from the Hub",
56 fg="yellow",
57 err=True,
58 )
59 continue
60
61 for plugin_name, plugin in plugin_type_index.items():
62 click.echo(plugin_name, nl=False)
63
64 if len(plugin.variants) > 1:
65 click.echo(f", variants: {', '.join(plugin.variant_labels)}")
66 else:
67 click.echo()
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/meltano/cli/__init__.py b/src/meltano/cli/__init__.py
--- a/src/meltano/cli/__init__.py
+++ b/src/meltano/cli/__init__.py
@@ -10,7 +10,6 @@
from meltano.cli import ( # noqa: WPS235
add,
config,
- discovery,
docs,
dragon,
elt,
@@ -44,7 +43,6 @@
cli.add_command(cloud)
cli.add_command(compile_module.compile_command)
cli.add_command(config.config)
-cli.add_command(discovery.discover)
cli.add_command(docs.docs)
cli.add_command(dragon.dragon)
cli.add_command(elt.elt)
diff --git a/src/meltano/cli/discovery.py b/src/meltano/cli/discovery.py
deleted file mode 100644
--- a/src/meltano/cli/discovery.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""Discoverable Plugins CLI."""
-
-from __future__ import annotations
-
-import typing as t
-
-import click
-
-from meltano.cli.params import pass_project
-from meltano.cli.utils import InstrumentedCmd
-from meltano.core.plugin import PluginType
-
-if t.TYPE_CHECKING:
- from meltano.core.project import Project
-
-
[email protected](
- cls=InstrumentedCmd,
- short_help="List the available plugins in Meltano Hub and their variants.",
-)
[email protected](
- "plugin_type",
- type=click.Choice([*list(PluginType), "all"]),
- default="all",
-)
-@pass_project()
-def discover( # noqa: C901
- project: Project,
- plugin_type: str,
-):
- """
- List the available discoverable plugins and their variants.
-
- \b\nRead more at https://docs.meltano.com/reference/command-line-interface#discover
- """
- if plugin_type == "all":
- plugin_types = [
- plugin_type for plugin_type in list(PluginType) if plugin_type.discoverable
- ]
- else:
- plugin_types = [PluginType.from_cli_argument(plugin_type)]
-
- for idx, discovered_plugin_type in enumerate(plugin_types):
- if idx > 0:
- click.echo()
-
- click.secho(f"{str(discovered_plugin_type).capitalize()}", fg="green")
-
- try:
- plugin_type_index = project.hub_service.get_plugins_of_type(
- discovered_plugin_type,
- )
- except Exception:
- click.secho(
- f"Can not retrieve {discovered_plugin_type} from the Hub",
- fg="yellow",
- err=True,
- )
- continue
-
- for plugin_name, plugin in plugin_type_index.items():
- click.echo(plugin_name, nl=False)
-
- if len(plugin.variants) > 1:
- click.echo(f", variants: {', '.join(plugin.variant_labels)}")
- else:
- click.echo()
| {"golden_diff": "diff --git a/src/meltano/cli/__init__.py b/src/meltano/cli/__init__.py\n--- a/src/meltano/cli/__init__.py\n+++ b/src/meltano/cli/__init__.py\n@@ -10,7 +10,6 @@\n from meltano.cli import ( # noqa: WPS235\n add,\n config,\n- discovery,\n docs,\n dragon,\n elt,\n@@ -44,7 +43,6 @@\n cli.add_command(cloud)\n cli.add_command(compile_module.compile_command)\n cli.add_command(config.config)\n-cli.add_command(discovery.discover)\n cli.add_command(docs.docs)\n cli.add_command(dragon.dragon)\n cli.add_command(elt.elt)\ndiff --git a/src/meltano/cli/discovery.py b/src/meltano/cli/discovery.py\ndeleted file mode 100644\n--- a/src/meltano/cli/discovery.py\n+++ /dev/null\n@@ -1,67 +0,0 @@\n-\"\"\"Discoverable Plugins CLI.\"\"\"\n-\n-from __future__ import annotations\n-\n-import typing as t\n-\n-import click\n-\n-from meltano.cli.params import pass_project\n-from meltano.cli.utils import InstrumentedCmd\n-from meltano.core.plugin import PluginType\n-\n-if t.TYPE_CHECKING:\n- from meltano.core.project import Project\n-\n-\[email protected](\n- cls=InstrumentedCmd,\n- short_help=\"List the available plugins in Meltano Hub and their variants.\",\n-)\[email protected](\n- \"plugin_type\",\n- type=click.Choice([*list(PluginType), \"all\"]),\n- default=\"all\",\n-)\n-@pass_project()\n-def discover( # noqa: C901\n- project: Project,\n- plugin_type: str,\n-):\n- \"\"\"\n- List the available discoverable plugins and their variants.\n-\n- \\b\\nRead more at https://docs.meltano.com/reference/command-line-interface#discover\n- \"\"\"\n- if plugin_type == \"all\":\n- plugin_types = [\n- plugin_type for plugin_type in list(PluginType) if plugin_type.discoverable\n- ]\n- else:\n- plugin_types = [PluginType.from_cli_argument(plugin_type)]\n-\n- for idx, discovered_plugin_type in enumerate(plugin_types):\n- if idx > 0:\n- click.echo()\n-\n- click.secho(f\"{str(discovered_plugin_type).capitalize()}\", fg=\"green\")\n-\n- try:\n- plugin_type_index = project.hub_service.get_plugins_of_type(\n- discovered_plugin_type,\n- )\n- except Exception:\n- click.secho(\n- f\"Can not retrieve {discovered_plugin_type} from the Hub\",\n- fg=\"yellow\",\n- err=True,\n- )\n- continue\n-\n- for plugin_name, plugin in plugin_type_index.items():\n- click.echo(plugin_name, nl=False)\n-\n- if len(plugin.variants) > 1:\n- click.echo(f\", variants: {', '.join(plugin.variant_labels)}\")\n- else:\n- click.echo()\n", "issue": "feat: consider clearing the catalog cache when `--full-refresh` present\nThis has come up in slack a bunch of times but most recently in https://meltano.slack.com/archives/C01TCRBBJD7/p1689207554179589\r\n\r\nLots of users run into this where the catalog is being cached so the output theyre seeing is not as expected, we end up recommending that they clear their `.meltano/run/tap-x` directory so it regenerates the catalog. If someone runs with the `--full-refresh` flag its probably because something in the source changed so they need to re-run the replication but the cached catalog is blocking those changes from propogating.\r\n\r\n\r\nRelated to:\r\n- https://github.com/meltano/meltano/issues/6292\r\n- https://github.com/meltano/meltano/issues/6763\r\n- https://github.com/meltano/meltano/issues/2856\r\n- https://github.com/meltano/meltano/issues/2848\r\n\r\n\n", "before_files": [{"content": "\"\"\"Main entry point for the meltano CLI.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport sys\nimport typing as t\n\nfrom meltano.cli import ( # noqa: WPS235\n add,\n config,\n discovery,\n docs,\n dragon,\n elt,\n environment,\n hub,\n initialize,\n install,\n invoke,\n job,\n lock,\n remove,\n run,\n schedule,\n schema,\n select,\n state,\n upgrade,\n validate,\n)\nfrom meltano.cli import compile as compile_module\nfrom meltano.cli.cli import cli\nfrom meltano.cli.utils import CliError\nfrom meltano.cloud.cli import cloud\nfrom meltano.core.error import MeltanoError, ProjectReadonly\nfrom meltano.core.logging import setup_logging\n\nif t.TYPE_CHECKING:\n from meltano.core.tracking.tracker import Tracker\n\ncli.add_command(add.add)\ncli.add_command(cloud)\ncli.add_command(compile_module.compile_command)\ncli.add_command(config.config)\ncli.add_command(discovery.discover)\ncli.add_command(docs.docs)\ncli.add_command(dragon.dragon)\ncli.add_command(elt.elt)\ncli.add_command(environment.meltano_environment)\ncli.add_command(hub.hub)\ncli.add_command(initialize.init)\ncli.add_command(install.install)\ncli.add_command(invoke.invoke)\ncli.add_command(lock.lock)\ncli.add_command(remove.remove)\ncli.add_command(schedule.schedule)\ncli.add_command(schema.schema)\ncli.add_command(select.select)\ncli.add_command(state.meltano_state)\ncli.add_command(upgrade.upgrade)\ncli.add_command(run.run)\ncli.add_command(validate.test)\ncli.add_command(job.job)\n\n# Holds the exit code for error reporting during process exiting. In\n# particular, a function registered by the `atexit` module uses this value.\nexit_code: None | int = None\n\natexit_handler_registered = False\nexit_code_reported = False\nexit_event_tracker: Tracker | None = None\n\nsetup_logging()\n\nlogger = logging.getLogger(__name__)\n\ntroubleshooting_message = \"\"\"\\\nNeed help fixing this problem? Visit http://melta.no/ for troubleshooting steps, or to\njoin our friendly Slack community.\n\"\"\"\n\n\ndef handle_meltano_error(error: MeltanoError) -> t.NoReturn:\n \"\"\"Handle a MeltanoError.\n\n Args:\n error: The error to handle.\n\n Raises:\n CliError: always.\n \"\"\"\n raise CliError(str(error)) from error\n\n\ndef _run_cli():\n \"\"\"Run the Meltano CLI.\n\n Raises:\n KeyboardInterrupt: if caught.\n \"\"\"\n try:\n try: # noqa: WPS225, WPS505\n cli(obj={\"project\": None})\n except ProjectReadonly as err:\n raise CliError(\n f\"The requested action could not be completed: {err}\",\n ) from err\n except KeyboardInterrupt: # noqa: WPS329\n raise\n except MeltanoError as err:\n handle_meltano_error(err)\n except Exception as err:\n raise CliError(f\"{troubleshooting_message}\\n{err}\") from err\n except CliError as cli_error:\n cli_error.print()\n sys.exit(1)\n\n\ndef main():\n \"\"\"Entry point for the meltano CLI.\"\"\"\n # Mark the current process as executed via the CLI\n os.environ[\"MELTANO_JOB_TRIGGER\"] = os.getenv(\"MELTANO_JOB_TRIGGER\", \"cli\")\n try:\n _run_cli()\n finally:\n global exit_code\n ex = sys.exc_info()[1]\n if ex is None:\n exit_code = 0 # noqa: WPS442\n elif isinstance(ex, SystemExit):\n exit_code = 0 if ex.code is None else ex.code # noqa: WPS442\n else:\n exit_code = 1 # noqa: WPS442\n # Track the exit event now to provide more details via the exception context.\n # We assume the process will exit practically immediately after `main` returns.\n if exit_event_tracker is not None:\n exit_event_tracker.track_exit_event()\n", "path": "src/meltano/cli/__init__.py"}, {"content": "\"\"\"Discoverable Plugins CLI.\"\"\"\n\nfrom __future__ import annotations\n\nimport typing as t\n\nimport click\n\nfrom meltano.cli.params import pass_project\nfrom meltano.cli.utils import InstrumentedCmd\nfrom meltano.core.plugin import PluginType\n\nif t.TYPE_CHECKING:\n from meltano.core.project import Project\n\n\[email protected](\n cls=InstrumentedCmd,\n short_help=\"List the available plugins in Meltano Hub and their variants.\",\n)\[email protected](\n \"plugin_type\",\n type=click.Choice([*list(PluginType), \"all\"]),\n default=\"all\",\n)\n@pass_project()\ndef discover( # noqa: C901\n project: Project,\n plugin_type: str,\n):\n \"\"\"\n List the available discoverable plugins and their variants.\n\n \\b\\nRead more at https://docs.meltano.com/reference/command-line-interface#discover\n \"\"\"\n if plugin_type == \"all\":\n plugin_types = [\n plugin_type for plugin_type in list(PluginType) if plugin_type.discoverable\n ]\n else:\n plugin_types = [PluginType.from_cli_argument(plugin_type)]\n\n for idx, discovered_plugin_type in enumerate(plugin_types):\n if idx > 0:\n click.echo()\n\n click.secho(f\"{str(discovered_plugin_type).capitalize()}\", fg=\"green\")\n\n try:\n plugin_type_index = project.hub_service.get_plugins_of_type(\n discovered_plugin_type,\n )\n except Exception:\n click.secho(\n f\"Can not retrieve {discovered_plugin_type} from the Hub\",\n fg=\"yellow\",\n err=True,\n )\n continue\n\n for plugin_name, plugin in plugin_type_index.items():\n click.echo(plugin_name, nl=False)\n\n if len(plugin.variants) > 1:\n click.echo(f\", variants: {', '.join(plugin.variant_labels)}\")\n else:\n click.echo()\n", "path": "src/meltano/cli/discovery.py"}], "after_files": [{"content": "\"\"\"Main entry point for the meltano CLI.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport sys\nimport typing as t\n\nfrom meltano.cli import ( # noqa: WPS235\n add,\n config,\n docs,\n dragon,\n elt,\n environment,\n hub,\n initialize,\n install,\n invoke,\n job,\n lock,\n remove,\n run,\n schedule,\n schema,\n select,\n state,\n upgrade,\n validate,\n)\nfrom meltano.cli import compile as compile_module\nfrom meltano.cli.cli import cli\nfrom meltano.cli.utils import CliError\nfrom meltano.cloud.cli import cloud\nfrom meltano.core.error import MeltanoError, ProjectReadonly\nfrom meltano.core.logging import setup_logging\n\nif t.TYPE_CHECKING:\n from meltano.core.tracking.tracker import Tracker\n\ncli.add_command(add.add)\ncli.add_command(cloud)\ncli.add_command(compile_module.compile_command)\ncli.add_command(config.config)\ncli.add_command(docs.docs)\ncli.add_command(dragon.dragon)\ncli.add_command(elt.elt)\ncli.add_command(environment.meltano_environment)\ncli.add_command(hub.hub)\ncli.add_command(initialize.init)\ncli.add_command(install.install)\ncli.add_command(invoke.invoke)\ncli.add_command(lock.lock)\ncli.add_command(remove.remove)\ncli.add_command(schedule.schedule)\ncli.add_command(schema.schema)\ncli.add_command(select.select)\ncli.add_command(state.meltano_state)\ncli.add_command(upgrade.upgrade)\ncli.add_command(run.run)\ncli.add_command(validate.test)\ncli.add_command(job.job)\n\n# Holds the exit code for error reporting during process exiting. In\n# particular, a function registered by the `atexit` module uses this value.\nexit_code: None | int = None\n\natexit_handler_registered = False\nexit_code_reported = False\nexit_event_tracker: Tracker | None = None\n\nsetup_logging()\n\nlogger = logging.getLogger(__name__)\n\ntroubleshooting_message = \"\"\"\\\nNeed help fixing this problem? Visit http://melta.no/ for troubleshooting steps, or to\njoin our friendly Slack community.\n\"\"\"\n\n\ndef handle_meltano_error(error: MeltanoError) -> t.NoReturn:\n \"\"\"Handle a MeltanoError.\n\n Args:\n error: The error to handle.\n\n Raises:\n CliError: always.\n \"\"\"\n raise CliError(str(error)) from error\n\n\ndef _run_cli():\n \"\"\"Run the Meltano CLI.\n\n Raises:\n KeyboardInterrupt: if caught.\n \"\"\"\n try:\n try: # noqa: WPS225, WPS505\n cli(obj={\"project\": None})\n except ProjectReadonly as err:\n raise CliError(\n f\"The requested action could not be completed: {err}\",\n ) from err\n except KeyboardInterrupt: # noqa: WPS329\n raise\n except MeltanoError as err:\n handle_meltano_error(err)\n except Exception as err:\n raise CliError(f\"{troubleshooting_message}\\n{err}\") from err\n except CliError as cli_error:\n cli_error.print()\n sys.exit(1)\n\n\ndef main():\n \"\"\"Entry point for the meltano CLI.\"\"\"\n # Mark the current process as executed via the CLI\n os.environ[\"MELTANO_JOB_TRIGGER\"] = os.getenv(\"MELTANO_JOB_TRIGGER\", \"cli\")\n try:\n _run_cli()\n finally:\n global exit_code\n ex = sys.exc_info()[1]\n if ex is None:\n exit_code = 0 # noqa: WPS442\n elif isinstance(ex, SystemExit):\n exit_code = 0 if ex.code is None else ex.code # noqa: WPS442\n else:\n exit_code = 1 # noqa: WPS442\n # Track the exit event now to provide more details via the exception context.\n # We assume the process will exit practically immediately after `main` returns.\n if exit_event_tracker is not None:\n exit_event_tracker.track_exit_event()\n", "path": "src/meltano/cli/__init__.py"}, {"content": null, "path": "src/meltano/cli/discovery.py"}]} | 2,256 | 675 |
gh_patches_debug_5633 | rasdani/github-patches | git_diff | conda-forge__conda-smithy-16 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Calling conda smithy github-create with --user raises exception
When I invoke `oncda smithy` to create a github repo (after setting the token correctly) I get this:
``` python
$ conda smithy github-create --user mwcraig mrjob-feedstock/
Traceback (most recent call last):
File "/Users/mcraig/miniconda/bin/conda-smithy", line 9, in <module>
load_entry_point('conda-smithy==0.1.0.dev0', 'console_scripts', 'conda-smithy')()
File "/Users/mcraig/miniconda/lib/python2.7/site-packages/conda_smithy-0.1.0.dev0-py2.7.egg/conda_smithy/conda_smithy.py", line 164, in main
args.subcommand_func(args)
File "/Users/mcraig/miniconda/lib/python2.7/site-packages/conda_smithy-0.1.0.dev0-py2.7.egg/conda_smithy/conda_smithy.py", line 94, in __call__
user_or_org.get_user(args.user)
UnboundLocalError: local variable 'user_or_org' referenced before assignment
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_smithy/conda_smithy.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import print_function, absolute_import
3
4 import os
5 import requests
6 import subprocess
7 import sys
8 import argparse
9
10 from conda_build.metadata import MetaData
11
12 import conda_smithy.configure_circle_ci as configure_circle_ci
13 import conda_smithy.configure_feedstock as configure_feedstock
14
15
16 def generate_feedstock_content(target_directory, recipe_dir):
17 target_recipe_dir = os.path.join(target_directory, 'recipe')
18 if not os.path.exists(target_recipe_dir):
19 os.makedirs(target_recipe_dir)
20 configure_feedstock.copytree(recipe_dir, target_recipe_dir)
21
22 forge_yml = os.path.join(target_directory, 'conda-forge.yml')
23 with open(forge_yml, 'w') as fh:
24 fh.write('[]')
25
26 configure_feedstock.main(target_directory)
27
28
29 def init_git_repo(target):
30 subprocess.check_call(['git', 'init'], cwd=target)
31
32
33 def create_git_repo(target, meta):
34 init_git_repo(target)
35 subprocess.check_call(['git', 'add', '*'], cwd=target)
36 msg = 'Initial commit of the {} feedstock.'.format(meta.name())
37 subprocess.check_call(['git', 'commit', '-m', msg], cwd=target)
38
39
40 class Subcommand(object):
41 #: The name of the subcommand
42 subcommand = None
43 def __init__(self, parser):
44 subcommand_parser = parser.add_parser(self.subcommand)
45 subcommand_parser.set_defaults(subcommand_func=self)
46 return subcommand_parser
47
48 def __call__(self, args):
49 pass
50
51
52 class Init(Subcommand):
53 subcommand = 'init'
54 def __init__(self, parser):
55 # conda-smithy init /path/to/udunits-recipe ./
56 subcommand_parser = Subcommand.__init__(self, parser)
57 subcommand_parser.add_argument("recipe_directory")
58 subcommand_parser.add_argument("--feedstock-directory",
59 default='./{package.name}-feedstock')
60 subcommand_parser.add_argument("--no-git-repo", action='store_true',
61 default=False)
62
63 def __call__(self, args):
64 meta = MetaData(args.recipe_directory)
65 feedstock_directory = args.feedstock_directory.format(package=argparse.Namespace(name=meta.name()))
66 generate_feedstock_content(feedstock_directory, args.recipe_directory)
67 if not args.no_git_repo:
68 create_git_repo(feedstock_directory, meta)
69
70
71 class GithubCreate(Subcommand):
72 subcommand = 'github-create'
73 def __init__(self, parser):
74 # conda-smithy github-create ./ --organization=conda-forge
75 subcommand_parser = Subcommand.__init__(self, parser)
76 subcommand_parser.add_argument("feedstock_directory")
77 group = subcommand_parser.add_mutually_exclusive_group()
78 group.add_argument("--user")
79 group.add_argument("--organization", default="conda-forge")
80
81 def __call__(self, args):
82 try:
83 with open(os.path.expanduser('~/.conda-smithy/github.token'), 'r') as fh:
84 token = fh.read().strip()
85 except IOError:
86 print('No github token. Put one in ~/.conda-smithy/github.token')
87 meta = configure_feedstock.meta_of_feedstock(args.feedstock_directory)
88
89 from github import Github
90 gh = Github(token)
91 if args.user is not None:
92 pass
93 # User has been defined, and organization has not.
94 user_or_org.get_user(args.user)
95 else:
96 # Use the organization provided.
97 user_or_org = gh.get_organization(args.organization)
98 repo = user_or_org.create_repo(os.path.basename(os.path.abspath(args.feedstock_directory)),
99 has_wiki=False,
100 description='A conda-smithy repository for {}.'.format(meta.name()))
101 print('Created {} on github'.format(repo.full_name))
102
103
104 class RegisterFeedstockCI(Subcommand):
105 subcommand = 'register-feedstock-ci'
106 def __init__(self, parser):
107 # conda-smithy register-feedstock-ci ./
108 subcommand_parser = Subcommand.__init__(self, parser)
109 subcommand_parser.add_argument("feedstock_directory")
110 group = subcommand_parser.add_mutually_exclusive_group()
111 group.add_argument("--user")
112 group.add_argument("--organization", default="conda-forge")
113
114 def add_project_to_appveyor(self, user, project):
115 headers = {'Authorization': 'Bearer {}'.format(appveyor_token),
116 'Content-Type': 'application/json'}
117 url = 'https://ci.appveyor.com/api/projects'
118
119 data = {'repositoryProvider': 'gitHub', 'repositoryName': '{}/{}'.format(user, project)}
120
121 response = requests.post(url, headers=headers, data=data)
122 response = requests.get(url, headers=headers)
123 if response.status_code != 201:
124 response.raise_for_status()
125
126 def __call__(self, args):
127 owner = args.user or args.organization
128 repo = os.path.basename(os.path.abspath(args.feedstock_directory))
129
130 print('CI Summary for {}/{} (may take some time):'.format(owner, repo))
131 configure_circle_ci.add_project_to_circle(owner, repo)
132 configure_circle_ci.add_project_to_appveyor(owner, repo)
133 configure_circle_ci.add_project_to_travis(owner, repo)
134
135
136 def main():
137 # UX:
138 # conda-smithy init /path/to/udunits-recipe ./
139 # conda-smithy github-create ./ --organization=conda-forge --remote-name=upstream
140 # conda-smithy register-feedstock-ci ./
141
142 # How about:
143 # conda smithy config
144 # conda smithy create-forge ./recipe
145
146 # conda smithy clone-all
147
148 parser = argparse.ArgumentParser("conda-smithy - conda recipe building, made powerful.")
149 subparser = parser.add_subparsers()
150 # TODO: Consider allowing plugins/extensions using entry_points.
151 # http://reinout.vanrees.org/weblog/2010/01/06/zest-releaser-entry-points.html
152 for subcommand in Subcommand.__subclasses__():
153 subcommand(subparser)
154
155 if not sys.argv[1:]:
156 # args = parser.parse_args(['--help'])
157 args = parser.parse_args(['init', '../udunits-feedstock/recipe',
158 '--feedstock-directory=../{package.name}-delme-feedstock'])
159 # args = parser.parse_args(['github-create', '../udunits-delme-feedstock'])
160 # args = parser.parse_args(['register-feedstock-ci', '../udunits-delme-feedstock'])
161 else:
162 args = parser.parse_args()
163
164 args.subcommand_func(args)
165
166
167 if __name__ == '__main__':
168 main()
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_smithy/conda_smithy.py b/conda_smithy/conda_smithy.py
--- a/conda_smithy/conda_smithy.py
+++ b/conda_smithy/conda_smithy.py
@@ -91,7 +91,7 @@
if args.user is not None:
pass
# User has been defined, and organization has not.
- user_or_org.get_user(args.user)
+ user_or_org = gh.get_user()
else:
# Use the organization provided.
user_or_org = gh.get_organization(args.organization)
| {"golden_diff": "diff --git a/conda_smithy/conda_smithy.py b/conda_smithy/conda_smithy.py\n--- a/conda_smithy/conda_smithy.py\n+++ b/conda_smithy/conda_smithy.py\n@@ -91,7 +91,7 @@\n if args.user is not None:\n pass\n # User has been defined, and organization has not.\n- user_or_org.get_user(args.user)\n+ user_or_org = gh.get_user()\n else:\n # Use the organization provided.\n user_or_org = gh.get_organization(args.organization)\n", "issue": "Calling conda smithy github-create with --user raises exception\nWhen I invoke `oncda smithy` to create a github repo (after setting the token correctly) I get this:\n\n``` python\n$ conda smithy github-create --user mwcraig mrjob-feedstock/\nTraceback (most recent call last):\n File \"/Users/mcraig/miniconda/bin/conda-smithy\", line 9, in <module>\n load_entry_point('conda-smithy==0.1.0.dev0', 'console_scripts', 'conda-smithy')()\n File \"/Users/mcraig/miniconda/lib/python2.7/site-packages/conda_smithy-0.1.0.dev0-py2.7.egg/conda_smithy/conda_smithy.py\", line 164, in main\n args.subcommand_func(args)\n File \"/Users/mcraig/miniconda/lib/python2.7/site-packages/conda_smithy-0.1.0.dev0-py2.7.egg/conda_smithy/conda_smithy.py\", line 94, in __call__\n user_or_org.get_user(args.user)\nUnboundLocalError: local variable 'user_or_org' referenced before assignment\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function, absolute_import\n\nimport os\nimport requests\nimport subprocess\nimport sys\nimport argparse\n\nfrom conda_build.metadata import MetaData\n\nimport conda_smithy.configure_circle_ci as configure_circle_ci\nimport conda_smithy.configure_feedstock as configure_feedstock\n\n\ndef generate_feedstock_content(target_directory, recipe_dir):\n target_recipe_dir = os.path.join(target_directory, 'recipe')\n if not os.path.exists(target_recipe_dir):\n os.makedirs(target_recipe_dir)\n configure_feedstock.copytree(recipe_dir, target_recipe_dir)\n\n forge_yml = os.path.join(target_directory, 'conda-forge.yml')\n with open(forge_yml, 'w') as fh:\n fh.write('[]')\n\n configure_feedstock.main(target_directory)\n\n\ndef init_git_repo(target):\n subprocess.check_call(['git', 'init'], cwd=target)\n\n\ndef create_git_repo(target, meta):\n init_git_repo(target)\n subprocess.check_call(['git', 'add', '*'], cwd=target)\n msg = 'Initial commit of the {} feedstock.'.format(meta.name())\n subprocess.check_call(['git', 'commit', '-m', msg], cwd=target)\n\n\nclass Subcommand(object):\n #: The name of the subcommand\n subcommand = None\n def __init__(self, parser):\n subcommand_parser = parser.add_parser(self.subcommand)\n subcommand_parser.set_defaults(subcommand_func=self)\n return subcommand_parser\n\n def __call__(self, args):\n pass\n\n\nclass Init(Subcommand):\n subcommand = 'init'\n def __init__(self, parser):\n # conda-smithy init /path/to/udunits-recipe ./\n subcommand_parser = Subcommand.__init__(self, parser)\n subcommand_parser.add_argument(\"recipe_directory\")\n subcommand_parser.add_argument(\"--feedstock-directory\",\n default='./{package.name}-feedstock')\n subcommand_parser.add_argument(\"--no-git-repo\", action='store_true',\n default=False)\n\n def __call__(self, args):\n meta = MetaData(args.recipe_directory)\n feedstock_directory = args.feedstock_directory.format(package=argparse.Namespace(name=meta.name()))\n generate_feedstock_content(feedstock_directory, args.recipe_directory)\n if not args.no_git_repo:\n create_git_repo(feedstock_directory, meta)\n\n\nclass GithubCreate(Subcommand):\n subcommand = 'github-create'\n def __init__(self, parser):\n # conda-smithy github-create ./ --organization=conda-forge\n subcommand_parser = Subcommand.__init__(self, parser)\n subcommand_parser.add_argument(\"feedstock_directory\")\n group = subcommand_parser.add_mutually_exclusive_group()\n group.add_argument(\"--user\")\n group.add_argument(\"--organization\", default=\"conda-forge\")\n\n def __call__(self, args):\n try:\n with open(os.path.expanduser('~/.conda-smithy/github.token'), 'r') as fh:\n token = fh.read().strip()\n except IOError:\n print('No github token. Put one in ~/.conda-smithy/github.token')\n meta = configure_feedstock.meta_of_feedstock(args.feedstock_directory)\n\n from github import Github\n gh = Github(token)\n if args.user is not None:\n pass\n # User has been defined, and organization has not.\n user_or_org.get_user(args.user)\n else:\n # Use the organization provided.\n user_or_org = gh.get_organization(args.organization)\n repo = user_or_org.create_repo(os.path.basename(os.path.abspath(args.feedstock_directory)),\n has_wiki=False,\n description='A conda-smithy repository for {}.'.format(meta.name()))\n print('Created {} on github'.format(repo.full_name))\n\n\nclass RegisterFeedstockCI(Subcommand):\n subcommand = 'register-feedstock-ci'\n def __init__(self, parser):\n # conda-smithy register-feedstock-ci ./\n subcommand_parser = Subcommand.__init__(self, parser)\n subcommand_parser.add_argument(\"feedstock_directory\")\n group = subcommand_parser.add_mutually_exclusive_group()\n group.add_argument(\"--user\")\n group.add_argument(\"--organization\", default=\"conda-forge\")\n\n def add_project_to_appveyor(self, user, project):\n headers = {'Authorization': 'Bearer {}'.format(appveyor_token),\n 'Content-Type': 'application/json'}\n url = 'https://ci.appveyor.com/api/projects'\n\n data = {'repositoryProvider': 'gitHub', 'repositoryName': '{}/{}'.format(user, project)}\n\n response = requests.post(url, headers=headers, data=data)\n response = requests.get(url, headers=headers)\n if response.status_code != 201:\n response.raise_for_status()\n\n def __call__(self, args):\n owner = args.user or args.organization\n repo = os.path.basename(os.path.abspath(args.feedstock_directory))\n\n print('CI Summary for {}/{} (may take some time):'.format(owner, repo))\n configure_circle_ci.add_project_to_circle(owner, repo)\n configure_circle_ci.add_project_to_appveyor(owner, repo)\n configure_circle_ci.add_project_to_travis(owner, repo)\n\n\ndef main():\n# UX:\n# conda-smithy init /path/to/udunits-recipe ./\n# conda-smithy github-create ./ --organization=conda-forge --remote-name=upstream\n# conda-smithy register-feedstock-ci ./\n\n# How about:\n# conda smithy config\n# conda smithy create-forge ./recipe\n\n# conda smithy clone-all\n\n parser = argparse.ArgumentParser(\"conda-smithy - conda recipe building, made powerful.\")\n subparser = parser.add_subparsers()\n # TODO: Consider allowing plugins/extensions using entry_points.\n # http://reinout.vanrees.org/weblog/2010/01/06/zest-releaser-entry-points.html\n for subcommand in Subcommand.__subclasses__():\n subcommand(subparser)\n\n if not sys.argv[1:]:\n# args = parser.parse_args(['--help'])\n args = parser.parse_args(['init', '../udunits-feedstock/recipe',\n '--feedstock-directory=../{package.name}-delme-feedstock'])\n# args = parser.parse_args(['github-create', '../udunits-delme-feedstock'])\n# args = parser.parse_args(['register-feedstock-ci', '../udunits-delme-feedstock'])\n else:\n args = parser.parse_args()\n\n args.subcommand_func(args)\n\n\nif __name__ == '__main__':\n main()\n", "path": "conda_smithy/conda_smithy.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function, absolute_import\n\nimport os\nimport requests\nimport subprocess\nimport sys\nimport argparse\n\nfrom conda_build.metadata import MetaData\n\nimport conda_smithy.configure_circle_ci as configure_circle_ci\nimport conda_smithy.configure_feedstock as configure_feedstock\n\n\ndef generate_feedstock_content(target_directory, recipe_dir):\n target_recipe_dir = os.path.join(target_directory, 'recipe')\n if not os.path.exists(target_recipe_dir):\n os.makedirs(target_recipe_dir)\n configure_feedstock.copytree(recipe_dir, target_recipe_dir)\n\n forge_yml = os.path.join(target_directory, 'conda-forge.yml')\n with open(forge_yml, 'w') as fh:\n fh.write('[]')\n\n configure_feedstock.main(target_directory)\n\n\ndef init_git_repo(target):\n subprocess.check_call(['git', 'init'], cwd=target)\n\n\ndef create_git_repo(target, meta):\n init_git_repo(target)\n subprocess.check_call(['git', 'add', '*'], cwd=target)\n msg = 'Initial commit of the {} feedstock.'.format(meta.name())\n subprocess.check_call(['git', 'commit', '-m', msg], cwd=target)\n\n\nclass Subcommand(object):\n #: The name of the subcommand\n subcommand = None\n def __init__(self, parser):\n subcommand_parser = parser.add_parser(self.subcommand)\n subcommand_parser.set_defaults(subcommand_func=self)\n return subcommand_parser\n\n def __call__(self, args):\n pass\n\n\nclass Init(Subcommand):\n subcommand = 'init'\n def __init__(self, parser):\n # conda-smithy init /path/to/udunits-recipe ./\n subcommand_parser = Subcommand.__init__(self, parser)\n subcommand_parser.add_argument(\"recipe_directory\")\n subcommand_parser.add_argument(\"--feedstock-directory\",\n default='./{package.name}-feedstock')\n subcommand_parser.add_argument(\"--no-git-repo\", action='store_true',\n default=False)\n\n def __call__(self, args):\n meta = MetaData(args.recipe_directory)\n feedstock_directory = args.feedstock_directory.format(package=argparse.Namespace(name=meta.name()))\n generate_feedstock_content(feedstock_directory, args.recipe_directory)\n if not args.no_git_repo:\n create_git_repo(feedstock_directory, meta)\n\n\nclass GithubCreate(Subcommand):\n subcommand = 'github-create'\n def __init__(self, parser):\n # conda-smithy github-create ./ --organization=conda-forge\n subcommand_parser = Subcommand.__init__(self, parser)\n subcommand_parser.add_argument(\"feedstock_directory\")\n group = subcommand_parser.add_mutually_exclusive_group()\n group.add_argument(\"--user\")\n group.add_argument(\"--organization\", default=\"conda-forge\")\n\n def __call__(self, args):\n try:\n with open(os.path.expanduser('~/.conda-smithy/github.token'), 'r') as fh:\n token = fh.read().strip()\n except IOError:\n print('No github token. Put one in ~/.conda-smithy/github.token')\n meta = configure_feedstock.meta_of_feedstock(args.feedstock_directory)\n\n from github import Github\n gh = Github(token)\n if args.user is not None:\n pass\n # User has been defined, and organization has not.\n user_or_org = gh.get_user()\n else:\n # Use the organization provided.\n user_or_org = gh.get_organization(args.organization)\n repo = user_or_org.create_repo(os.path.basename(os.path.abspath(args.feedstock_directory)),\n has_wiki=False,\n description='A conda-smithy repository for {}.'.format(meta.name()))\n print('Created {} on github'.format(repo.full_name))\n\n\nclass RegisterFeedstockCI(Subcommand):\n subcommand = 'register-feedstock-ci'\n def __init__(self, parser):\n # conda-smithy register-feedstock-ci ./\n subcommand_parser = Subcommand.__init__(self, parser)\n subcommand_parser.add_argument(\"feedstock_directory\")\n group = subcommand_parser.add_mutually_exclusive_group()\n group.add_argument(\"--user\")\n group.add_argument(\"--organization\", default=\"conda-forge\")\n\n def add_project_to_appveyor(self, user, project):\n headers = {'Authorization': 'Bearer {}'.format(appveyor_token),\n 'Content-Type': 'application/json'}\n url = 'https://ci.appveyor.com/api/projects'\n\n data = {'repositoryProvider': 'gitHub', 'repositoryName': '{}/{}'.format(user, project)}\n\n response = requests.post(url, headers=headers, data=data)\n response = requests.get(url, headers=headers)\n if response.status_code != 201:\n response.raise_for_status()\n\n def __call__(self, args):\n owner = args.user or args.organization\n repo = os.path.basename(os.path.abspath(args.feedstock_directory))\n\n print('CI Summary for {}/{} (may take some time):'.format(owner, repo))\n configure_circle_ci.add_project_to_circle(owner, repo)\n configure_circle_ci.add_project_to_appveyor(owner, repo)\n configure_circle_ci.add_project_to_travis(owner, repo)\n\n\ndef main():\n# UX:\n# conda-smithy init /path/to/udunits-recipe ./\n# conda-smithy github-create ./ --organization=conda-forge --remote-name=upstream\n# conda-smithy register-feedstock-ci ./\n\n# How about:\n# conda smithy config\n# conda smithy create-forge ./recipe\n\n# conda smithy clone-all\n\n parser = argparse.ArgumentParser(\"conda-smithy - conda recipe building, made powerful.\")\n subparser = parser.add_subparsers()\n # TODO: Consider allowing plugins/extensions using entry_points.\n # http://reinout.vanrees.org/weblog/2010/01/06/zest-releaser-entry-points.html\n for subcommand in Subcommand.__subclasses__():\n subcommand(subparser)\n\n if not sys.argv[1:]:\n# args = parser.parse_args(['--help'])\n args = parser.parse_args(['init', '../udunits-feedstock/recipe',\n '--feedstock-directory=../{package.name}-delme-feedstock'])\n# args = parser.parse_args(['github-create', '../udunits-delme-feedstock'])\n# args = parser.parse_args(['register-feedstock-ci', '../udunits-delme-feedstock'])\n else:\n args = parser.parse_args()\n\n args.subcommand_func(args)\n\n\nif __name__ == '__main__':\n main()\n", "path": "conda_smithy/conda_smithy.py"}]} | 2,372 | 132 |
gh_patches_debug_21907 | rasdani/github-patches | git_diff | webkom__lego-1985 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Timezone email
Format dates in emails in the same language as the email template (Norwegian), and converted to the proper timezone.


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lego/apps/events/notifications.py`
Content:
```
1 from lego.apps.notifications.constants import (
2 EVENT_ADMIN_REGISTRATION,
3 EVENT_ADMIN_UNREGISTRATION,
4 EVENT_BUMP,
5 EVENT_PAYMENT_OVERDUE,
6 EVENT_PAYMENT_OVERDUE_CREATOR,
7 )
8 from lego.apps.notifications.notification import Notification
9
10
11 class EventBumpNotification(Notification):
12
13 name = EVENT_BUMP
14
15 def generate_mail(self):
16 event = self.kwargs["event"]
17
18 return self._delay_mail(
19 to_email=self.user.email,
20 context={"event": event.title, "name": self.user.full_name, "id": event.id},
21 subject=f"Du er flyttet opp fra ventelisten på arrangementet {event.title}",
22 plain_template="events/email/bump.txt",
23 html_template="events/email/bump.html",
24 )
25
26 def generate_push(self):
27 event = self.kwargs["event"]
28
29 return self._delay_push(
30 template="events/push/bump.txt",
31 context={"event": event.title},
32 instance=event,
33 )
34
35
36 class EventPaymentOverdueNotification(Notification):
37
38 name = EVENT_PAYMENT_OVERDUE
39
40 def generate_mail(self):
41 event = self.kwargs["event"]
42
43 return self._delay_mail(
44 to_email=self.user.email,
45 context={
46 "event": event.title,
47 "name": self.user.full_name,
48 "due_date": event.payment_due_date,
49 "id": event.id,
50 },
51 subject=f"Du har ikke betalt påmeldingen på arrangementet {event.title}",
52 plain_template="events/email/payment_overdue.txt",
53 html_template="events/email/payment_overdue.html",
54 )
55
56 def generate_push(self):
57 event = self.kwargs["event"]
58
59 return self._delay_push(
60 template="events/push/payment_overdue.txt",
61 context={"event": event.title},
62 instance=event,
63 )
64
65
66 class EventPaymentOverdueCreatorNotification(Notification):
67
68 name = EVENT_PAYMENT_OVERDUE_CREATOR
69
70 def generate_mail(self):
71 event = self.kwargs["event"]
72 users = self.kwargs["users"]
73
74 return self._delay_mail(
75 to_email=self.user.email,
76 context={
77 "event": event.title,
78 "users": users,
79 "name": self.user.full_name,
80 "id": event.id,
81 },
82 subject=f"Følgende registrerte har ikke betalt påmeldingen til arrangementet"
83 f" {event.title}",
84 plain_template="events/email/payment_overdue_author.txt",
85 html_template="events/email/payment_overdue_author.html",
86 )
87
88
89 class EventAdminRegistrationNotification(Notification):
90
91 name = EVENT_ADMIN_REGISTRATION
92
93 def generate_mail(self):
94 event = self.kwargs["event"]
95 reason = self.kwargs["reason"]
96
97 return self._delay_mail(
98 to_email=self.user.email,
99 context={
100 "event": event.title,
101 "name": self.user.full_name,
102 "reason": reason,
103 "id": event.id,
104 },
105 subject=f"Du har blitt adminpåmeldt på arrangementet {event.title}",
106 plain_template="events/email/admin_registration.txt",
107 html_template="events/email/admin_registration.html",
108 )
109
110 def generate_push(self):
111 event = self.kwargs["event"]
112
113 return self._delay_push(
114 template="events/push/admin_registration.txt",
115 context={"event": event.title},
116 instance=event,
117 )
118
119
120 class EventAdminUnregistrationNotification(Notification):
121
122 name = EVENT_ADMIN_UNREGISTRATION
123
124 def generate_mail(self):
125 event = self.kwargs["event"]
126 creator = self.kwargs["creator"]
127 reason = self.kwargs["reason"]
128
129 return self._delay_mail(
130 to_email=self.user.email,
131 context={
132 "event": event.title,
133 "creator_name": creator.full_name,
134 "creator_email": creator.email,
135 "name": self.user.full_name,
136 "reason": reason,
137 "id": event.id,
138 },
139 subject=f"Du har blitt fjernet fra arrangementet {event.title}",
140 plain_template="events/email/admin_unregistration.txt",
141 html_template="events/email/admin_unregistration.html",
142 )
143
144 def generate_push(self):
145 event = self.kwargs["event"]
146
147 return self._delay_push(
148 template="events/push/admin_unregistration.txt",
149 context={"event": event.title},
150 instance=event,
151 )
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lego/apps/events/notifications.py b/lego/apps/events/notifications.py
--- a/lego/apps/events/notifications.py
+++ b/lego/apps/events/notifications.py
@@ -1,3 +1,7 @@
+from django.utils import timezone
+
+import pytz
+
from lego.apps.notifications.constants import (
EVENT_ADMIN_REGISTRATION,
EVENT_ADMIN_UNREGISTRATION,
@@ -40,12 +44,18 @@
def generate_mail(self):
event = self.kwargs["event"]
+ date = timezone.localtime(
+ value=event.payment_due_date, timezone=pytz.timezone("Europe/Oslo")
+ )
+
+ due_date = date.strftime("%d.%m.%y, kl. %H:%M")
+
return self._delay_mail(
to_email=self.user.email,
context={
"event": event.title,
"name": self.user.full_name,
- "due_date": event.payment_due_date,
+ "due_date": due_date,
"id": event.id,
},
subject=f"Du har ikke betalt påmeldingen på arrangementet {event.title}",
| {"golden_diff": "diff --git a/lego/apps/events/notifications.py b/lego/apps/events/notifications.py\n--- a/lego/apps/events/notifications.py\n+++ b/lego/apps/events/notifications.py\n@@ -1,3 +1,7 @@\n+from django.utils import timezone\n+\n+import pytz\n+\n from lego.apps.notifications.constants import (\n EVENT_ADMIN_REGISTRATION,\n EVENT_ADMIN_UNREGISTRATION,\n@@ -40,12 +44,18 @@\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n \n+ date = timezone.localtime(\n+ value=event.payment_due_date, timezone=pytz.timezone(\"Europe/Oslo\")\n+ )\n+\n+ due_date = date.strftime(\"%d.%m.%y, kl. %H:%M\")\n+\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"name\": self.user.full_name,\n- \"due_date\": event.payment_due_date,\n+ \"due_date\": due_date,\n \"id\": event.id,\n },\n subject=f\"Du har ikke betalt p\u00e5meldingen p\u00e5 arrangementet {event.title}\",\n", "issue": "Timezone email\nFormat dates in emails in the same language as the email template (Norwegian), and converted to the proper timezone. \r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from lego.apps.notifications.constants import (\n EVENT_ADMIN_REGISTRATION,\n EVENT_ADMIN_UNREGISTRATION,\n EVENT_BUMP,\n EVENT_PAYMENT_OVERDUE,\n EVENT_PAYMENT_OVERDUE_CREATOR,\n)\nfrom lego.apps.notifications.notification import Notification\n\n\nclass EventBumpNotification(Notification):\n\n name = EVENT_BUMP\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\"event\": event.title, \"name\": self.user.full_name, \"id\": event.id},\n subject=f\"Du er flyttet opp fra ventelisten p\u00e5 arrangementet {event.title}\",\n plain_template=\"events/email/bump.txt\",\n html_template=\"events/email/bump.html\",\n )\n\n def generate_push(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_push(\n template=\"events/push/bump.txt\",\n context={\"event\": event.title},\n instance=event,\n )\n\n\nclass EventPaymentOverdueNotification(Notification):\n\n name = EVENT_PAYMENT_OVERDUE\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"name\": self.user.full_name,\n \"due_date\": event.payment_due_date,\n \"id\": event.id,\n },\n subject=f\"Du har ikke betalt p\u00e5meldingen p\u00e5 arrangementet {event.title}\",\n plain_template=\"events/email/payment_overdue.txt\",\n html_template=\"events/email/payment_overdue.html\",\n )\n\n def generate_push(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_push(\n template=\"events/push/payment_overdue.txt\",\n context={\"event\": event.title},\n instance=event,\n )\n\n\nclass EventPaymentOverdueCreatorNotification(Notification):\n\n name = EVENT_PAYMENT_OVERDUE_CREATOR\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n users = self.kwargs[\"users\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"users\": users,\n \"name\": self.user.full_name,\n \"id\": event.id,\n },\n subject=f\"F\u00f8lgende registrerte har ikke betalt p\u00e5meldingen til arrangementet\"\n f\" {event.title}\",\n plain_template=\"events/email/payment_overdue_author.txt\",\n html_template=\"events/email/payment_overdue_author.html\",\n )\n\n\nclass EventAdminRegistrationNotification(Notification):\n\n name = EVENT_ADMIN_REGISTRATION\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n reason = self.kwargs[\"reason\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"name\": self.user.full_name,\n \"reason\": reason,\n \"id\": event.id,\n },\n subject=f\"Du har blitt adminp\u00e5meldt p\u00e5 arrangementet {event.title}\",\n plain_template=\"events/email/admin_registration.txt\",\n html_template=\"events/email/admin_registration.html\",\n )\n\n def generate_push(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_push(\n template=\"events/push/admin_registration.txt\",\n context={\"event\": event.title},\n instance=event,\n )\n\n\nclass EventAdminUnregistrationNotification(Notification):\n\n name = EVENT_ADMIN_UNREGISTRATION\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n creator = self.kwargs[\"creator\"]\n reason = self.kwargs[\"reason\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"creator_name\": creator.full_name,\n \"creator_email\": creator.email,\n \"name\": self.user.full_name,\n \"reason\": reason,\n \"id\": event.id,\n },\n subject=f\"Du har blitt fjernet fra arrangementet {event.title}\",\n plain_template=\"events/email/admin_unregistration.txt\",\n html_template=\"events/email/admin_unregistration.html\",\n )\n\n def generate_push(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_push(\n template=\"events/push/admin_unregistration.txt\",\n context={\"event\": event.title},\n instance=event,\n )\n", "path": "lego/apps/events/notifications.py"}], "after_files": [{"content": "from django.utils import timezone\n\nimport pytz\n\nfrom lego.apps.notifications.constants import (\n EVENT_ADMIN_REGISTRATION,\n EVENT_ADMIN_UNREGISTRATION,\n EVENT_BUMP,\n EVENT_PAYMENT_OVERDUE,\n EVENT_PAYMENT_OVERDUE_CREATOR,\n)\nfrom lego.apps.notifications.notification import Notification\n\n\nclass EventBumpNotification(Notification):\n\n name = EVENT_BUMP\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\"event\": event.title, \"name\": self.user.full_name, \"id\": event.id},\n subject=f\"Du er flyttet opp fra ventelisten p\u00e5 arrangementet {event.title}\",\n plain_template=\"events/email/bump.txt\",\n html_template=\"events/email/bump.html\",\n )\n\n def generate_push(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_push(\n template=\"events/push/bump.txt\",\n context={\"event\": event.title},\n instance=event,\n )\n\n\nclass EventPaymentOverdueNotification(Notification):\n\n name = EVENT_PAYMENT_OVERDUE\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n\n date = timezone.localtime(\n value=event.payment_due_date, timezone=pytz.timezone(\"Europe/Oslo\")\n )\n\n due_date = date.strftime(\"%d.%m.%y, kl. %H:%M\")\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"name\": self.user.full_name,\n \"due_date\": due_date,\n \"id\": event.id,\n },\n subject=f\"Du har ikke betalt p\u00e5meldingen p\u00e5 arrangementet {event.title}\",\n plain_template=\"events/email/payment_overdue.txt\",\n html_template=\"events/email/payment_overdue.html\",\n )\n\n def generate_push(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_push(\n template=\"events/push/payment_overdue.txt\",\n context={\"event\": event.title},\n instance=event,\n )\n\n\nclass EventPaymentOverdueCreatorNotification(Notification):\n\n name = EVENT_PAYMENT_OVERDUE_CREATOR\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n users = self.kwargs[\"users\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"users\": users,\n \"name\": self.user.full_name,\n \"id\": event.id,\n },\n subject=f\"F\u00f8lgende registrerte har ikke betalt p\u00e5meldingen til arrangementet\"\n f\" {event.title}\",\n plain_template=\"events/email/payment_overdue_author.txt\",\n html_template=\"events/email/payment_overdue_author.html\",\n )\n\n\nclass EventAdminRegistrationNotification(Notification):\n\n name = EVENT_ADMIN_REGISTRATION\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n reason = self.kwargs[\"reason\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"name\": self.user.full_name,\n \"reason\": reason,\n \"id\": event.id,\n },\n subject=f\"Du har blitt adminp\u00e5meldt p\u00e5 arrangementet {event.title}\",\n plain_template=\"events/email/admin_registration.txt\",\n html_template=\"events/email/admin_registration.html\",\n )\n\n def generate_push(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_push(\n template=\"events/push/admin_registration.txt\",\n context={\"event\": event.title},\n instance=event,\n )\n\n\nclass EventAdminUnregistrationNotification(Notification):\n\n name = EVENT_ADMIN_UNREGISTRATION\n\n def generate_mail(self):\n event = self.kwargs[\"event\"]\n creator = self.kwargs[\"creator\"]\n reason = self.kwargs[\"reason\"]\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n \"event\": event.title,\n \"creator_name\": creator.full_name,\n \"creator_email\": creator.email,\n \"name\": self.user.full_name,\n \"reason\": reason,\n \"id\": event.id,\n },\n subject=f\"Du har blitt fjernet fra arrangementet {event.title}\",\n plain_template=\"events/email/admin_unregistration.txt\",\n html_template=\"events/email/admin_unregistration.html\",\n )\n\n def generate_push(self):\n event = self.kwargs[\"event\"]\n\n return self._delay_push(\n template=\"events/push/admin_unregistration.txt\",\n context={\"event\": event.title},\n instance=event,\n )\n", "path": "lego/apps/events/notifications.py"}]} | 1,704 | 251 |
gh_patches_debug_38958 | rasdani/github-patches | git_diff | ansible-collections__community.general-2217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enable no-bin-links for community.general.npm
### Summary
I'm working on getting a vagrant development environment setup and am running into an issue where npm is having trouble installing on a synced folder. A solution would be to enable a flag to add the --no-bin-links setting to disable symlinking executables but there's no way to set this currently.
### Issue Type
Feature Idea
### Component Name
community.general.npm
### Additional Information
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: NPM install for Login Service
community.general.npm:
path: "{{ project_root }}"
links: no
become_user: vagrant
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/packaging/language/npm.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3 # Copyright (c) 2017 Chris Hoffman <[email protected]>
4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
5
6 from __future__ import absolute_import, division, print_function
7 __metaclass__ = type
8
9
10 DOCUMENTATION = r'''
11 ---
12 module: npm
13 short_description: Manage node.js packages with npm
14 description:
15 - Manage node.js packages with Node Package Manager (npm).
16 author: "Chris Hoffman (@chrishoffman)"
17 options:
18 name:
19 description:
20 - The name of a node.js library to install.
21 type: str
22 required: false
23 path:
24 description:
25 - The base path where to install the node.js libraries.
26 type: path
27 required: false
28 version:
29 description:
30 - The version to be installed.
31 type: str
32 required: false
33 global:
34 description:
35 - Install the node.js library globally.
36 required: false
37 default: no
38 type: bool
39 executable:
40 description:
41 - The executable location for npm.
42 - This is useful if you are using a version manager, such as nvm.
43 type: path
44 required: false
45 ignore_scripts:
46 description:
47 - Use the C(--ignore-scripts) flag when installing.
48 required: false
49 type: bool
50 default: no
51 unsafe_perm:
52 description:
53 - Use the C(--unsafe-perm) flag when installing.
54 type: bool
55 default: no
56 ci:
57 description:
58 - Install packages based on package-lock file, same as running C(npm ci).
59 type: bool
60 default: no
61 production:
62 description:
63 - Install dependencies in production mode, excluding devDependencies.
64 required: false
65 type: bool
66 default: no
67 registry:
68 description:
69 - The registry to install modules from.
70 required: false
71 type: str
72 state:
73 description:
74 - The state of the node.js library.
75 required: false
76 type: str
77 default: present
78 choices: [ "present", "absent", "latest" ]
79 no_optional:
80 description:
81 - Use the C(--no-optional) flag when installing.
82 type: bool
83 default: no
84 version_added: 2.0.0
85 requirements:
86 - npm installed in bin path (recommended /usr/local/bin)
87 '''
88
89 EXAMPLES = r'''
90 - name: Install "coffee-script" node.js package.
91 community.general.npm:
92 name: coffee-script
93 path: /app/location
94
95 - name: Install "coffee-script" node.js package on version 1.6.1.
96 community.general.npm:
97 name: coffee-script
98 version: '1.6.1'
99 path: /app/location
100
101 - name: Install "coffee-script" node.js package globally.
102 community.general.npm:
103 name: coffee-script
104 global: yes
105
106 - name: Remove the globally package "coffee-script".
107 community.general.npm:
108 name: coffee-script
109 global: yes
110 state: absent
111
112 - name: Install "coffee-script" node.js package from custom registry.
113 community.general.npm:
114 name: coffee-script
115 registry: 'http://registry.mysite.com'
116
117 - name: Install packages based on package.json.
118 community.general.npm:
119 path: /app/location
120
121 - name: Update packages based on package.json to their latest version.
122 community.general.npm:
123 path: /app/location
124 state: latest
125
126 - name: Install packages based on package.json using the npm installed with nvm v0.10.1.
127 community.general.npm:
128 path: /app/location
129 executable: /opt/nvm/v0.10.1/bin/npm
130 state: present
131 '''
132
133 import json
134 import os
135 import re
136
137 from ansible.module_utils.basic import AnsibleModule
138 from ansible.module_utils._text import to_native
139
140
141 class Npm(object):
142 def __init__(self, module, **kwargs):
143 self.module = module
144 self.glbl = kwargs['glbl']
145 self.name = kwargs['name']
146 self.version = kwargs['version']
147 self.path = kwargs['path']
148 self.registry = kwargs['registry']
149 self.production = kwargs['production']
150 self.ignore_scripts = kwargs['ignore_scripts']
151 self.unsafe_perm = kwargs['unsafe_perm']
152 self.state = kwargs['state']
153 self.no_optional = kwargs['no_optional']
154
155 if kwargs['executable']:
156 self.executable = kwargs['executable'].split(' ')
157 else:
158 self.executable = [module.get_bin_path('npm', True)]
159
160 if kwargs['version'] and self.state != 'absent':
161 self.name_version = self.name + '@' + str(self.version)
162 else:
163 self.name_version = self.name
164
165 def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True):
166 if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
167 cmd = self.executable + args
168
169 if self.glbl:
170 cmd.append('--global')
171 if self.production and ('install' in cmd or 'update' in cmd):
172 cmd.append('--production')
173 if self.ignore_scripts:
174 cmd.append('--ignore-scripts')
175 if self.unsafe_perm:
176 cmd.append('--unsafe-perm')
177 if self.name and add_package_name:
178 cmd.append(self.name_version)
179 if self.registry:
180 cmd.append('--registry')
181 cmd.append(self.registry)
182 if self.no_optional:
183 cmd.append('--no-optional')
184
185 # If path is specified, cd into that path and run the command.
186 cwd = None
187 if self.path:
188 if not os.path.exists(self.path):
189 os.makedirs(self.path)
190 if not os.path.isdir(self.path):
191 self.module.fail_json(msg="path %s is not a directory" % self.path)
192 cwd = self.path
193
194 rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
195 return out
196 return ''
197
198 def list(self):
199 cmd = ['list', '--json', '--long']
200
201 installed = list()
202 missing = list()
203 data = {}
204 try:
205 data = json.loads(self._exec(cmd, True, False, False) or '{}')
206 except (getattr(json, 'JSONDecodeError', ValueError)) as e:
207 self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e))
208 if 'dependencies' in data:
209 for dep in data['dependencies']:
210 if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
211 missing.append(dep)
212 elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:
213 missing.append(dep)
214 else:
215 installed.append(dep)
216 if self.name and self.name not in installed:
217 missing.append(self.name)
218 # Named dependency not installed
219 else:
220 missing.append(self.name)
221
222 return installed, missing
223
224 def install(self):
225 return self._exec(['install'])
226
227 def ci_install(self):
228 return self._exec(['ci'])
229
230 def update(self):
231 return self._exec(['update'])
232
233 def uninstall(self):
234 return self._exec(['uninstall'])
235
236 def list_outdated(self):
237 outdated = list()
238 data = self._exec(['outdated'], True, False)
239 for dep in data.splitlines():
240 if dep:
241 # node.js v0.10.22 changed the `npm outdated` module separator
242 # from "@" to " ". Split on both for backwards compatibility.
243 pkg, other = re.split(r'\s|@', dep, 1)
244 outdated.append(pkg)
245
246 return outdated
247
248
249 def main():
250 arg_spec = dict(
251 name=dict(default=None, type='str'),
252 path=dict(default=None, type='path'),
253 version=dict(default=None, type='str'),
254 production=dict(default=False, type='bool'),
255 executable=dict(default=None, type='path'),
256 registry=dict(default=None, type='str'),
257 state=dict(default='present', choices=['present', 'absent', 'latest']),
258 ignore_scripts=dict(default=False, type='bool'),
259 unsafe_perm=dict(default=False, type='bool'),
260 ci=dict(default=False, type='bool'),
261 no_optional=dict(default=False, type='bool'),
262 )
263 arg_spec['global'] = dict(default=False, type='bool')
264 module = AnsibleModule(
265 argument_spec=arg_spec,
266 supports_check_mode=True
267 )
268
269 name = module.params['name']
270 path = module.params['path']
271 version = module.params['version']
272 glbl = module.params['global']
273 production = module.params['production']
274 executable = module.params['executable']
275 registry = module.params['registry']
276 state = module.params['state']
277 ignore_scripts = module.params['ignore_scripts']
278 unsafe_perm = module.params['unsafe_perm']
279 ci = module.params['ci']
280 no_optional = module.params['no_optional']
281
282 if not path and not glbl:
283 module.fail_json(msg='path must be specified when not using global')
284 if state == 'absent' and not name:
285 module.fail_json(msg='uninstalling a package is only available for named packages')
286
287 npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,
288 executable=executable, registry=registry, ignore_scripts=ignore_scripts,
289 unsafe_perm=unsafe_perm, state=state, no_optional=no_optional)
290
291 changed = False
292 if ci:
293 npm.ci_install()
294 changed = True
295 elif state == 'present':
296 installed, missing = npm.list()
297 if missing:
298 changed = True
299 npm.install()
300 elif state == 'latest':
301 installed, missing = npm.list()
302 outdated = npm.list_outdated()
303 if missing:
304 changed = True
305 npm.install()
306 if outdated:
307 changed = True
308 npm.update()
309 else: # absent
310 installed, missing = npm.list()
311 if name in installed:
312 changed = True
313 npm.uninstall()
314
315 module.exit_json(changed=changed)
316
317
318 if __name__ == '__main__':
319 main()
320
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/packaging/language/npm.py b/plugins/modules/packaging/language/npm.py
--- a/plugins/modules/packaging/language/npm.py
+++ b/plugins/modules/packaging/language/npm.py
@@ -82,6 +82,12 @@
type: bool
default: no
version_added: 2.0.0
+ no_bin_links:
+ description:
+ - Use the C(--no-bin-links) flag when installing.
+ type: bool
+ default: no
+ version_added: 2.5.0
requirements:
- npm installed in bin path (recommended /usr/local/bin)
'''
@@ -151,6 +157,7 @@
self.unsafe_perm = kwargs['unsafe_perm']
self.state = kwargs['state']
self.no_optional = kwargs['no_optional']
+ self.no_bin_links = kwargs['no_bin_links']
if kwargs['executable']:
self.executable = kwargs['executable'].split(' ')
@@ -181,6 +188,8 @@
cmd.append(self.registry)
if self.no_optional:
cmd.append('--no-optional')
+ if self.no_bin_links:
+ cmd.append('--no-bin-links')
# If path is specified, cd into that path and run the command.
cwd = None
@@ -259,6 +268,7 @@
unsafe_perm=dict(default=False, type='bool'),
ci=dict(default=False, type='bool'),
no_optional=dict(default=False, type='bool'),
+ no_bin_links=dict(default=False, type='bool'),
)
arg_spec['global'] = dict(default=False, type='bool')
module = AnsibleModule(
@@ -278,6 +288,7 @@
unsafe_perm = module.params['unsafe_perm']
ci = module.params['ci']
no_optional = module.params['no_optional']
+ no_bin_links = module.params['no_bin_links']
if not path and not glbl:
module.fail_json(msg='path must be specified when not using global')
@@ -286,7 +297,7 @@
npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,
executable=executable, registry=registry, ignore_scripts=ignore_scripts,
- unsafe_perm=unsafe_perm, state=state, no_optional=no_optional)
+ unsafe_perm=unsafe_perm, state=state, no_optional=no_optional, no_bin_links=no_bin_links)
changed = False
if ci:
| {"golden_diff": "diff --git a/plugins/modules/packaging/language/npm.py b/plugins/modules/packaging/language/npm.py\n--- a/plugins/modules/packaging/language/npm.py\n+++ b/plugins/modules/packaging/language/npm.py\n@@ -82,6 +82,12 @@\n type: bool\n default: no\n version_added: 2.0.0\n+ no_bin_links:\n+ description:\n+ - Use the C(--no-bin-links) flag when installing.\n+ type: bool\n+ default: no\n+ version_added: 2.5.0\n requirements:\n - npm installed in bin path (recommended /usr/local/bin)\n '''\n@@ -151,6 +157,7 @@\n self.unsafe_perm = kwargs['unsafe_perm']\n self.state = kwargs['state']\n self.no_optional = kwargs['no_optional']\n+ self.no_bin_links = kwargs['no_bin_links']\n \n if kwargs['executable']:\n self.executable = kwargs['executable'].split(' ')\n@@ -181,6 +188,8 @@\n cmd.append(self.registry)\n if self.no_optional:\n cmd.append('--no-optional')\n+ if self.no_bin_links:\n+ cmd.append('--no-bin-links')\n \n # If path is specified, cd into that path and run the command.\n cwd = None\n@@ -259,6 +268,7 @@\n unsafe_perm=dict(default=False, type='bool'),\n ci=dict(default=False, type='bool'),\n no_optional=dict(default=False, type='bool'),\n+ no_bin_links=dict(default=False, type='bool'),\n )\n arg_spec['global'] = dict(default=False, type='bool')\n module = AnsibleModule(\n@@ -278,6 +288,7 @@\n unsafe_perm = module.params['unsafe_perm']\n ci = module.params['ci']\n no_optional = module.params['no_optional']\n+ no_bin_links = module.params['no_bin_links']\n \n if not path and not glbl:\n module.fail_json(msg='path must be specified when not using global')\n@@ -286,7 +297,7 @@\n \n npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,\n executable=executable, registry=registry, ignore_scripts=ignore_scripts,\n- unsafe_perm=unsafe_perm, state=state, no_optional=no_optional)\n+ unsafe_perm=unsafe_perm, state=state, no_optional=no_optional, no_bin_links=no_bin_links)\n \n changed = False\n if ci:\n", "issue": "Enable no-bin-links for community.general.npm\n### Summary\n\nI'm working on getting a vagrant development environment setup and am running into an issue where npm is having trouble installing on a synced folder. A solution would be to enable a flag to add the --no-bin-links setting to disable symlinking executables but there's no way to set this currently.\n\n### Issue Type\n\nFeature Idea\n\n### Component Name\n\ncommunity.general.npm\n\n### Additional Information\n\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n- name: NPM install for Login Service\r\n community.general.npm:\r\n path: \"{{ project_root }}\"\r\n links: no\r\n become_user: vagrant\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2017 Chris Hoffman <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: npm\nshort_description: Manage node.js packages with npm\ndescription:\n - Manage node.js packages with Node Package Manager (npm).\nauthor: \"Chris Hoffman (@chrishoffman)\"\noptions:\n name:\n description:\n - The name of a node.js library to install.\n type: str\n required: false\n path:\n description:\n - The base path where to install the node.js libraries.\n type: path\n required: false\n version:\n description:\n - The version to be installed.\n type: str\n required: false\n global:\n description:\n - Install the node.js library globally.\n required: false\n default: no\n type: bool\n executable:\n description:\n - The executable location for npm.\n - This is useful if you are using a version manager, such as nvm.\n type: path\n required: false\n ignore_scripts:\n description:\n - Use the C(--ignore-scripts) flag when installing.\n required: false\n type: bool\n default: no\n unsafe_perm:\n description:\n - Use the C(--unsafe-perm) flag when installing.\n type: bool\n default: no\n ci:\n description:\n - Install packages based on package-lock file, same as running C(npm ci).\n type: bool\n default: no\n production:\n description:\n - Install dependencies in production mode, excluding devDependencies.\n required: false\n type: bool\n default: no\n registry:\n description:\n - The registry to install modules from.\n required: false\n type: str\n state:\n description:\n - The state of the node.js library.\n required: false\n type: str\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\n no_optional:\n description:\n - Use the C(--no-optional) flag when installing.\n type: bool\n default: no\n version_added: 2.0.0\nrequirements:\n - npm installed in bin path (recommended /usr/local/bin)\n'''\n\nEXAMPLES = r'''\n- name: Install \"coffee-script\" node.js package.\n community.general.npm:\n name: coffee-script\n path: /app/location\n\n- name: Install \"coffee-script\" node.js package on version 1.6.1.\n community.general.npm:\n name: coffee-script\n version: '1.6.1'\n path: /app/location\n\n- name: Install \"coffee-script\" node.js package globally.\n community.general.npm:\n name: coffee-script\n global: yes\n\n- name: Remove the globally package \"coffee-script\".\n community.general.npm:\n name: coffee-script\n global: yes\n state: absent\n\n- name: Install \"coffee-script\" node.js package from custom registry.\n community.general.npm:\n name: coffee-script\n registry: 'http://registry.mysite.com'\n\n- name: Install packages based on package.json.\n community.general.npm:\n path: /app/location\n\n- name: Update packages based on package.json to their latest version.\n community.general.npm:\n path: /app/location\n state: latest\n\n- name: Install packages based on package.json using the npm installed with nvm v0.10.1.\n community.general.npm:\n path: /app/location\n executable: /opt/nvm/v0.10.1/bin/npm\n state: present\n'''\n\nimport json\nimport os\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\n\n\nclass Npm(object):\n def __init__(self, module, **kwargs):\n self.module = module\n self.glbl = kwargs['glbl']\n self.name = kwargs['name']\n self.version = kwargs['version']\n self.path = kwargs['path']\n self.registry = kwargs['registry']\n self.production = kwargs['production']\n self.ignore_scripts = kwargs['ignore_scripts']\n self.unsafe_perm = kwargs['unsafe_perm']\n self.state = kwargs['state']\n self.no_optional = kwargs['no_optional']\n\n if kwargs['executable']:\n self.executable = kwargs['executable'].split(' ')\n else:\n self.executable = [module.get_bin_path('npm', True)]\n\n if kwargs['version'] and self.state != 'absent':\n self.name_version = self.name + '@' + str(self.version)\n else:\n self.name_version = self.name\n\n def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True):\n if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):\n cmd = self.executable + args\n\n if self.glbl:\n cmd.append('--global')\n if self.production and ('install' in cmd or 'update' in cmd):\n cmd.append('--production')\n if self.ignore_scripts:\n cmd.append('--ignore-scripts')\n if self.unsafe_perm:\n cmd.append('--unsafe-perm')\n if self.name and add_package_name:\n cmd.append(self.name_version)\n if self.registry:\n cmd.append('--registry')\n cmd.append(self.registry)\n if self.no_optional:\n cmd.append('--no-optional')\n\n # If path is specified, cd into that path and run the command.\n cwd = None\n if self.path:\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n if not os.path.isdir(self.path):\n self.module.fail_json(msg=\"path %s is not a directory\" % self.path)\n cwd = self.path\n\n rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)\n return out\n return ''\n\n def list(self):\n cmd = ['list', '--json', '--long']\n\n installed = list()\n missing = list()\n data = {}\n try:\n data = json.loads(self._exec(cmd, True, False, False) or '{}')\n except (getattr(json, 'JSONDecodeError', ValueError)) as e:\n self.module.fail_json(msg=\"Failed to parse NPM output with error %s\" % to_native(e))\n if 'dependencies' in data:\n for dep in data['dependencies']:\n if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:\n missing.append(dep)\n elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:\n missing.append(dep)\n else:\n installed.append(dep)\n if self.name and self.name not in installed:\n missing.append(self.name)\n # Named dependency not installed\n else:\n missing.append(self.name)\n\n return installed, missing\n\n def install(self):\n return self._exec(['install'])\n\n def ci_install(self):\n return self._exec(['ci'])\n\n def update(self):\n return self._exec(['update'])\n\n def uninstall(self):\n return self._exec(['uninstall'])\n\n def list_outdated(self):\n outdated = list()\n data = self._exec(['outdated'], True, False)\n for dep in data.splitlines():\n if dep:\n # node.js v0.10.22 changed the `npm outdated` module separator\n # from \"@\" to \" \". Split on both for backwards compatibility.\n pkg, other = re.split(r'\\s|@', dep, 1)\n outdated.append(pkg)\n\n return outdated\n\n\ndef main():\n arg_spec = dict(\n name=dict(default=None, type='str'),\n path=dict(default=None, type='path'),\n version=dict(default=None, type='str'),\n production=dict(default=False, type='bool'),\n executable=dict(default=None, type='path'),\n registry=dict(default=None, type='str'),\n state=dict(default='present', choices=['present', 'absent', 'latest']),\n ignore_scripts=dict(default=False, type='bool'),\n unsafe_perm=dict(default=False, type='bool'),\n ci=dict(default=False, type='bool'),\n no_optional=dict(default=False, type='bool'),\n )\n arg_spec['global'] = dict(default=False, type='bool')\n module = AnsibleModule(\n argument_spec=arg_spec,\n supports_check_mode=True\n )\n\n name = module.params['name']\n path = module.params['path']\n version = module.params['version']\n glbl = module.params['global']\n production = module.params['production']\n executable = module.params['executable']\n registry = module.params['registry']\n state = module.params['state']\n ignore_scripts = module.params['ignore_scripts']\n unsafe_perm = module.params['unsafe_perm']\n ci = module.params['ci']\n no_optional = module.params['no_optional']\n\n if not path and not glbl:\n module.fail_json(msg='path must be specified when not using global')\n if state == 'absent' and not name:\n module.fail_json(msg='uninstalling a package is only available for named packages')\n\n npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,\n executable=executable, registry=registry, ignore_scripts=ignore_scripts,\n unsafe_perm=unsafe_perm, state=state, no_optional=no_optional)\n\n changed = False\n if ci:\n npm.ci_install()\n changed = True\n elif state == 'present':\n installed, missing = npm.list()\n if missing:\n changed = True\n npm.install()\n elif state == 'latest':\n installed, missing = npm.list()\n outdated = npm.list_outdated()\n if missing:\n changed = True\n npm.install()\n if outdated:\n changed = True\n npm.update()\n else: # absent\n installed, missing = npm.list()\n if name in installed:\n changed = True\n npm.uninstall()\n\n module.exit_json(changed=changed)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/packaging/language/npm.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2017 Chris Hoffman <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: npm\nshort_description: Manage node.js packages with npm\ndescription:\n - Manage node.js packages with Node Package Manager (npm).\nauthor: \"Chris Hoffman (@chrishoffman)\"\noptions:\n name:\n description:\n - The name of a node.js library to install.\n type: str\n required: false\n path:\n description:\n - The base path where to install the node.js libraries.\n type: path\n required: false\n version:\n description:\n - The version to be installed.\n type: str\n required: false\n global:\n description:\n - Install the node.js library globally.\n required: false\n default: no\n type: bool\n executable:\n description:\n - The executable location for npm.\n - This is useful if you are using a version manager, such as nvm.\n type: path\n required: false\n ignore_scripts:\n description:\n - Use the C(--ignore-scripts) flag when installing.\n required: false\n type: bool\n default: no\n unsafe_perm:\n description:\n - Use the C(--unsafe-perm) flag when installing.\n type: bool\n default: no\n ci:\n description:\n - Install packages based on package-lock file, same as running C(npm ci).\n type: bool\n default: no\n production:\n description:\n - Install dependencies in production mode, excluding devDependencies.\n required: false\n type: bool\n default: no\n registry:\n description:\n - The registry to install modules from.\n required: false\n type: str\n state:\n description:\n - The state of the node.js library.\n required: false\n type: str\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\n no_optional:\n description:\n - Use the C(--no-optional) flag when installing.\n type: bool\n default: no\n version_added: 2.0.0\n no_bin_links:\n description:\n - Use the C(--no-bin-links) flag when installing.\n type: bool\n default: no\n version_added: 2.5.0\nrequirements:\n - npm installed in bin path (recommended /usr/local/bin)\n'''\n\nEXAMPLES = r'''\n- name: Install \"coffee-script\" node.js package.\n community.general.npm:\n name: coffee-script\n path: /app/location\n\n- name: Install \"coffee-script\" node.js package on version 1.6.1.\n community.general.npm:\n name: coffee-script\n version: '1.6.1'\n path: /app/location\n\n- name: Install \"coffee-script\" node.js package globally.\n community.general.npm:\n name: coffee-script\n global: yes\n\n- name: Remove the globally package \"coffee-script\".\n community.general.npm:\n name: coffee-script\n global: yes\n state: absent\n\n- name: Install \"coffee-script\" node.js package from custom registry.\n community.general.npm:\n name: coffee-script\n registry: 'http://registry.mysite.com'\n\n- name: Install packages based on package.json.\n community.general.npm:\n path: /app/location\n\n- name: Update packages based on package.json to their latest version.\n community.general.npm:\n path: /app/location\n state: latest\n\n- name: Install packages based on package.json using the npm installed with nvm v0.10.1.\n community.general.npm:\n path: /app/location\n executable: /opt/nvm/v0.10.1/bin/npm\n state: present\n'''\n\nimport json\nimport os\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\n\n\nclass Npm(object):\n def __init__(self, module, **kwargs):\n self.module = module\n self.glbl = kwargs['glbl']\n self.name = kwargs['name']\n self.version = kwargs['version']\n self.path = kwargs['path']\n self.registry = kwargs['registry']\n self.production = kwargs['production']\n self.ignore_scripts = kwargs['ignore_scripts']\n self.unsafe_perm = kwargs['unsafe_perm']\n self.state = kwargs['state']\n self.no_optional = kwargs['no_optional']\n self.no_bin_links = kwargs['no_bin_links']\n\n if kwargs['executable']:\n self.executable = kwargs['executable'].split(' ')\n else:\n self.executable = [module.get_bin_path('npm', True)]\n\n if kwargs['version'] and self.state != 'absent':\n self.name_version = self.name + '@' + str(self.version)\n else:\n self.name_version = self.name\n\n def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True):\n if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):\n cmd = self.executable + args\n\n if self.glbl:\n cmd.append('--global')\n if self.production and ('install' in cmd or 'update' in cmd):\n cmd.append('--production')\n if self.ignore_scripts:\n cmd.append('--ignore-scripts')\n if self.unsafe_perm:\n cmd.append('--unsafe-perm')\n if self.name and add_package_name:\n cmd.append(self.name_version)\n if self.registry:\n cmd.append('--registry')\n cmd.append(self.registry)\n if self.no_optional:\n cmd.append('--no-optional')\n if self.no_bin_links:\n cmd.append('--no-bin-links')\n\n # If path is specified, cd into that path and run the command.\n cwd = None\n if self.path:\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n if not os.path.isdir(self.path):\n self.module.fail_json(msg=\"path %s is not a directory\" % self.path)\n cwd = self.path\n\n rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)\n return out\n return ''\n\n def list(self):\n cmd = ['list', '--json', '--long']\n\n installed = list()\n missing = list()\n data = {}\n try:\n data = json.loads(self._exec(cmd, True, False, False) or '{}')\n except (getattr(json, 'JSONDecodeError', ValueError)) as e:\n self.module.fail_json(msg=\"Failed to parse NPM output with error %s\" % to_native(e))\n if 'dependencies' in data:\n for dep in data['dependencies']:\n if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:\n missing.append(dep)\n elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:\n missing.append(dep)\n else:\n installed.append(dep)\n if self.name and self.name not in installed:\n missing.append(self.name)\n # Named dependency not installed\n else:\n missing.append(self.name)\n\n return installed, missing\n\n def install(self):\n return self._exec(['install'])\n\n def ci_install(self):\n return self._exec(['ci'])\n\n def update(self):\n return self._exec(['update'])\n\n def uninstall(self):\n return self._exec(['uninstall'])\n\n def list_outdated(self):\n outdated = list()\n data = self._exec(['outdated'], True, False)\n for dep in data.splitlines():\n if dep:\n # node.js v0.10.22 changed the `npm outdated` module separator\n # from \"@\" to \" \". Split on both for backwards compatibility.\n pkg, other = re.split(r'\\s|@', dep, 1)\n outdated.append(pkg)\n\n return outdated\n\n\ndef main():\n arg_spec = dict(\n name=dict(default=None, type='str'),\n path=dict(default=None, type='path'),\n version=dict(default=None, type='str'),\n production=dict(default=False, type='bool'),\n executable=dict(default=None, type='path'),\n registry=dict(default=None, type='str'),\n state=dict(default='present', choices=['present', 'absent', 'latest']),\n ignore_scripts=dict(default=False, type='bool'),\n unsafe_perm=dict(default=False, type='bool'),\n ci=dict(default=False, type='bool'),\n no_optional=dict(default=False, type='bool'),\n no_bin_links=dict(default=False, type='bool'),\n )\n arg_spec['global'] = dict(default=False, type='bool')\n module = AnsibleModule(\n argument_spec=arg_spec,\n supports_check_mode=True\n )\n\n name = module.params['name']\n path = module.params['path']\n version = module.params['version']\n glbl = module.params['global']\n production = module.params['production']\n executable = module.params['executable']\n registry = module.params['registry']\n state = module.params['state']\n ignore_scripts = module.params['ignore_scripts']\n unsafe_perm = module.params['unsafe_perm']\n ci = module.params['ci']\n no_optional = module.params['no_optional']\n no_bin_links = module.params['no_bin_links']\n\n if not path and not glbl:\n module.fail_json(msg='path must be specified when not using global')\n if state == 'absent' and not name:\n module.fail_json(msg='uninstalling a package is only available for named packages')\n\n npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,\n executable=executable, registry=registry, ignore_scripts=ignore_scripts,\n unsafe_perm=unsafe_perm, state=state, no_optional=no_optional, no_bin_links=no_bin_links)\n\n changed = False\n if ci:\n npm.ci_install()\n changed = True\n elif state == 'present':\n installed, missing = npm.list()\n if missing:\n changed = True\n npm.install()\n elif state == 'latest':\n installed, missing = npm.list()\n outdated = npm.list_outdated()\n if missing:\n changed = True\n npm.install()\n if outdated:\n changed = True\n npm.update()\n else: # absent\n installed, missing = npm.list()\n if name in installed:\n changed = True\n npm.uninstall()\n\n module.exit_json(changed=changed)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/packaging/language/npm.py"}]} | 3,547 | 563 |
gh_patches_debug_30420 | rasdani/github-patches | git_diff | PrefectHQ__prefect-347 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add back `environment.yml` file
I realized why we might want to maintain an `environment.yml` file in parallel with our `requirements.txt` file: `requirements.txt` will be installed via `pip`, whereas if you create an environment via `conda`, the packages will be installed / maintained via `conda`. This can be useful for those who try to `conda install` everything (since it has different package version logic + handles non-python dependencies).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula
2
3 from setuptools import find_packages, setup
4
5 import sys
6 import versioneer
7
8 install_requires = [
9 "click >= 6.7, < 7.0",
10 "cloudpickle >= 0.6.0",
11 "croniter >= 0.3.23, < 0.4",
12 "cryptography >= 2.2.2, < 3.0",
13 "dask >= 0.18, < 0.19",
14 "distributed >= 1.21.8, < 2.0",
15 "docker >= 3.4.1, < 3.5",
16 "marshmallow == 3.0.0b19",
17 "marshmallow-oneofschema >= 2.0.0b2, < 3.0",
18 "mypy >= 0.600, < 0.700",
19 "mypy_extensions >= 0.4.0, < 0.5",
20 "pendulum >= 2.0.4, < 3.0",
21 "python-dateutil >= 2.7.3, < 3.0",
22 "requests >= 2.20, < 3.0",
23 "toml >= 0.9.4, < 1.0",
24 "typing >= 3.6.4, < 4.0",
25 "typing_extensions >= 3.6.4, < 4.0",
26 "xxhash >= 1.2.0, < 2.0",
27 ]
28
29 templates = ["jinja2 >= 2.0, < 3.0"]
30 viz = ["bokeh == 0.13.0", "graphviz >= 0.8.3"]
31 dev = [
32 "pre-commit",
33 "pytest >= 3.8, < 4.0",
34 "pytest-cov",
35 "pytest-env",
36 "pytest-xdist",
37 "Pygments == 2.2.0",
38 ]
39
40 if sys.version_info >= (3, 6):
41 dev += ["black"]
42
43 extras = {
44 "dev": dev + viz,
45 "viz": viz,
46 "templates": templates,
47 "all_extras": dev + templates + viz,
48 }
49
50 setup(
51 name="prefect",
52 version=versioneer.get_version(),
53 cmdclass=versioneer.get_cmdclass(),
54 description="",
55 long_description=open("README.md").read(),
56 url="https://www.github.com/prefecthq/prefect",
57 author="Prefect Technologies, Inc.",
58 author_email="[email protected]",
59 install_requires=install_requires,
60 extras_require=extras,
61 scripts=[],
62 packages=find_packages(where="src"),
63 package_dir={"": "src"},
64 include_package_data=True,
65 entry_points={"console_scripts": ["prefect=prefect.cli:cli"]},
66 )
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -2,51 +2,39 @@
from setuptools import find_packages, setup
+import configparser
import sys
import versioneer
-install_requires = [
- "click >= 6.7, < 7.0",
- "cloudpickle >= 0.6.0",
- "croniter >= 0.3.23, < 0.4",
- "cryptography >= 2.2.2, < 3.0",
- "dask >= 0.18, < 0.19",
- "distributed >= 1.21.8, < 2.0",
- "docker >= 3.4.1, < 3.5",
- "marshmallow == 3.0.0b19",
- "marshmallow-oneofschema >= 2.0.0b2, < 3.0",
- "mypy >= 0.600, < 0.700",
- "mypy_extensions >= 0.4.0, < 0.5",
- "pendulum >= 2.0.4, < 3.0",
- "python-dateutil >= 2.7.3, < 3.0",
- "requests >= 2.20, < 3.0",
- "toml >= 0.9.4, < 1.0",
- "typing >= 3.6.4, < 4.0",
- "typing_extensions >= 3.6.4, < 4.0",
- "xxhash >= 1.2.0, < 2.0",
-]
+config = configparser.ConfigParser()
+config.read("requirements.ini")
-templates = ["jinja2 >= 2.0, < 3.0"]
-viz = ["bokeh == 0.13.0", "graphviz >= 0.8.3"]
-dev = [
- "pre-commit",
- "pytest >= 3.8, < 4.0",
- "pytest-cov",
- "pytest-env",
- "pytest-xdist",
- "Pygments == 2.2.0",
-]
+## base requirements
+install_requires = ["".join(req) for req in config["base"].items()]
-if sys.version_info >= (3, 6):
- dev += ["black"]
+## section dependencies
+includes = {}
+for section in config.sections():
+ includes[section] = config[section].pop("include", "").split(",")
extras = {
- "dev": dev + viz,
- "viz": viz,
- "templates": templates,
- "all_extras": dev + templates + viz,
+ "dev": ["".join(req) for req in config["dev"].items()],
+ "viz": ["".join(req) for req in config["viz"].items()],
+ "templates": ["".join(req) for req in config["templates"].items()],
}
+## process include keyword for related sections
+for section in extras:
+ for other in includes[section]:
+ extras[section] += extras.get(other.strip(), [])
+
+
+if sys.version_info >= (3, 6):
+ extras["dev"] += ["black"]
+
+extras["all_extras"] = extras["dev"] + extras["viz"] + extras["templates"]
+
+
setup(
name="prefect",
version=versioneer.get_version(),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -2,51 +2,39 @@\n \n from setuptools import find_packages, setup\n \n+import configparser\n import sys\n import versioneer\n \n-install_requires = [\n- \"click >= 6.7, < 7.0\",\n- \"cloudpickle >= 0.6.0\",\n- \"croniter >= 0.3.23, < 0.4\",\n- \"cryptography >= 2.2.2, < 3.0\",\n- \"dask >= 0.18, < 0.19\",\n- \"distributed >= 1.21.8, < 2.0\",\n- \"docker >= 3.4.1, < 3.5\",\n- \"marshmallow == 3.0.0b19\",\n- \"marshmallow-oneofschema >= 2.0.0b2, < 3.0\",\n- \"mypy >= 0.600, < 0.700\",\n- \"mypy_extensions >= 0.4.0, < 0.5\",\n- \"pendulum >= 2.0.4, < 3.0\",\n- \"python-dateutil >= 2.7.3, < 3.0\",\n- \"requests >= 2.20, < 3.0\",\n- \"toml >= 0.9.4, < 1.0\",\n- \"typing >= 3.6.4, < 4.0\",\n- \"typing_extensions >= 3.6.4, < 4.0\",\n- \"xxhash >= 1.2.0, < 2.0\",\n-]\n+config = configparser.ConfigParser()\n+config.read(\"requirements.ini\")\n \n-templates = [\"jinja2 >= 2.0, < 3.0\"]\n-viz = [\"bokeh == 0.13.0\", \"graphviz >= 0.8.3\"]\n-dev = [\n- \"pre-commit\",\n- \"pytest >= 3.8, < 4.0\",\n- \"pytest-cov\",\n- \"pytest-env\",\n- \"pytest-xdist\",\n- \"Pygments == 2.2.0\",\n-]\n+## base requirements\n+install_requires = [\"\".join(req) for req in config[\"base\"].items()]\n \n-if sys.version_info >= (3, 6):\n- dev += [\"black\"]\n+## section dependencies\n+includes = {}\n+for section in config.sections():\n+ includes[section] = config[section].pop(\"include\", \"\").split(\",\")\n \n extras = {\n- \"dev\": dev + viz,\n- \"viz\": viz,\n- \"templates\": templates,\n- \"all_extras\": dev + templates + viz,\n+ \"dev\": [\"\".join(req) for req in config[\"dev\"].items()],\n+ \"viz\": [\"\".join(req) for req in config[\"viz\"].items()],\n+ \"templates\": [\"\".join(req) for req in config[\"templates\"].items()],\n }\n \n+## process include keyword for related sections\n+for section in extras:\n+ for other in includes[section]:\n+ extras[section] += extras.get(other.strip(), [])\n+\n+\n+if sys.version_info >= (3, 6):\n+ extras[\"dev\"] += [\"black\"]\n+\n+extras[\"all_extras\"] = extras[\"dev\"] + extras[\"viz\"] + extras[\"templates\"]\n+\n+\n setup(\n name=\"prefect\",\n version=versioneer.get_version(),\n", "issue": "Add back `environment.yml` file\nI realized why we might want to maintain an `environment.yml` file in parallel with our `requirements.txt` file: `requirements.txt` will be installed via `pip`, whereas if you create an environment via `conda`, the packages will be installed / maintained via `conda`. This can be useful for those who try to `conda install` everything (since it has different package version logic + handles non-python dependencies).\n", "before_files": [{"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula\n\nfrom setuptools import find_packages, setup\n\nimport sys\nimport versioneer\n\ninstall_requires = [\n \"click >= 6.7, < 7.0\",\n \"cloudpickle >= 0.6.0\",\n \"croniter >= 0.3.23, < 0.4\",\n \"cryptography >= 2.2.2, < 3.0\",\n \"dask >= 0.18, < 0.19\",\n \"distributed >= 1.21.8, < 2.0\",\n \"docker >= 3.4.1, < 3.5\",\n \"marshmallow == 3.0.0b19\",\n \"marshmallow-oneofschema >= 2.0.0b2, < 3.0\",\n \"mypy >= 0.600, < 0.700\",\n \"mypy_extensions >= 0.4.0, < 0.5\",\n \"pendulum >= 2.0.4, < 3.0\",\n \"python-dateutil >= 2.7.3, < 3.0\",\n \"requests >= 2.20, < 3.0\",\n \"toml >= 0.9.4, < 1.0\",\n \"typing >= 3.6.4, < 4.0\",\n \"typing_extensions >= 3.6.4, < 4.0\",\n \"xxhash >= 1.2.0, < 2.0\",\n]\n\ntemplates = [\"jinja2 >= 2.0, < 3.0\"]\nviz = [\"bokeh == 0.13.0\", \"graphviz >= 0.8.3\"]\ndev = [\n \"pre-commit\",\n \"pytest >= 3.8, < 4.0\",\n \"pytest-cov\",\n \"pytest-env\",\n \"pytest-xdist\",\n \"Pygments == 2.2.0\",\n]\n\nif sys.version_info >= (3, 6):\n dev += [\"black\"]\n\nextras = {\n \"dev\": dev + viz,\n \"viz\": viz,\n \"templates\": templates,\n \"all_extras\": dev + templates + viz,\n}\n\nsetup(\n name=\"prefect\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"\",\n long_description=open(\"README.md\").read(),\n url=\"https://www.github.com/prefecthq/prefect\",\n author=\"Prefect Technologies, Inc.\",\n author_email=\"[email protected]\",\n install_requires=install_requires,\n extras_require=extras,\n scripts=[],\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n entry_points={\"console_scripts\": [\"prefect=prefect.cli:cli\"]},\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula\n\nfrom setuptools import find_packages, setup\n\nimport configparser\nimport sys\nimport versioneer\n\nconfig = configparser.ConfigParser()\nconfig.read(\"requirements.ini\")\n\n## base requirements\ninstall_requires = [\"\".join(req) for req in config[\"base\"].items()]\n\n## section dependencies\nincludes = {}\nfor section in config.sections():\n includes[section] = config[section].pop(\"include\", \"\").split(\",\")\n\nextras = {\n \"dev\": [\"\".join(req) for req in config[\"dev\"].items()],\n \"viz\": [\"\".join(req) for req in config[\"viz\"].items()],\n \"templates\": [\"\".join(req) for req in config[\"templates\"].items()],\n}\n\n## process include keyword for related sections\nfor section in extras:\n for other in includes[section]:\n extras[section] += extras.get(other.strip(), [])\n\n\nif sys.version_info >= (3, 6):\n extras[\"dev\"] += [\"black\"]\n\nextras[\"all_extras\"] = extras[\"dev\"] + extras[\"viz\"] + extras[\"templates\"]\n\n\nsetup(\n name=\"prefect\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"\",\n long_description=open(\"README.md\").read(),\n url=\"https://www.github.com/prefecthq/prefect\",\n author=\"Prefect Technologies, Inc.\",\n author_email=\"[email protected]\",\n install_requires=install_requires,\n extras_require=extras,\n scripts=[],\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n entry_points={\"console_scripts\": [\"prefect=prefect.cli:cli\"]},\n)\n", "path": "setup.py"}]} | 1,122 | 787 |
gh_patches_debug_7434 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2640 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Brazil flag for nice display in the language picker
Since the `pt_BR` locale is ready to go (see #2630), we should add a Brazil flag to the language picker.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/version.py`
Content:
```
1 __version__ = '0.4.4'
2
```
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # SecureDrop documentation build configuration file, created by
4 # sphinx-quickstart on Tue Oct 13 12:08:52 2015.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 import sys
16 import os
17 import shlex
18
19 # Detect if we're being built by Read the Docs
20 # https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs
21 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
22
23 # If extensions (or modules to document with autodoc) are in another directory,
24 # add these directories to sys.path here. If the directory is relative to the
25 # documentation root, use os.path.abspath to make it absolute, like shown here.
26 #sys.path.insert(0, os.path.abspath('.'))
27
28 # -- General configuration ------------------------------------------------
29
30 # If your documentation needs a minimal Sphinx version, state it here.
31 #needs_sphinx = '1.0'
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = ['sphinx.ext.todo', ]
37
38 # Add any paths that contain templates here, relative to this directory.
39 templates_path = ['_templates']
40
41 # The suffix(es) of source filenames.
42 # You can specify multiple suffix as a list of string:
43 # source_suffix = ['.rst', '.md']
44 source_suffix = '.rst'
45
46 # The encoding of source files.
47 #source_encoding = 'utf-8-sig'
48
49 # The master toctree document.
50 master_doc = 'index'
51
52 # General information about the project.
53 project = u'SecureDrop'
54 copyright = u'2017, Freedom of the Press Foundation'
55 author = u'SecureDrop Team and Contributors'
56
57 # The version info for the project you're documenting, acts as replacement for
58 # |version| and |release|, also used in various other places throughout the
59 # built documents.
60 #
61 # The short X.Y version.
62 version = '0.4.4'
63 # The full version, including alpha/beta/rc tags.
64 release = '0.4.4'
65
66 # The language for content autogenerated by Sphinx. Refer to documentation
67 # for a list of supported languages.
68 #
69 # This is also used if you do content translation via gettext catalogs.
70 # Usually you set "language" from the command line for these cases.
71 language = None
72
73 # There are two options for replacing |today|: either, you set today to some
74 # non-false value, then it is used:
75 #today = ''
76 # Else, today_fmt is used as the format for a strftime call.
77 #today_fmt = '%B %d, %Y'
78
79 # List of patterns, relative to source directory, that match files and
80 # directories to ignore when looking for source files.
81 exclude_patterns = ['_build']
82
83 # The reST default role (used for this markup: `text`) to use for all
84 # documents.
85 #default_role = None
86
87 # If true, '()' will be appended to :func: etc. cross-reference text.
88 #add_function_parentheses = True
89
90 # If true, the current module name will be prepended to all description
91 # unit titles (such as .. function::).
92 #add_module_names = True
93
94 # If true, sectionauthor and moduleauthor directives will be shown in the
95 # output. They are ignored by default.
96 #show_authors = False
97
98 # The name of the Pygments (syntax highlighting) style to use.
99 pygments_style = 'sphinx'
100
101 # A list of ignored prefixes for module index sorting.
102 #modindex_common_prefix = []
103
104 # If true, keep warnings as "system message" paragraphs in the built documents.
105 #keep_warnings = False
106
107 # If true, `todo` and `todoList` produce output, else they produce nothing.
108 todo_include_todos = False
109
110
111 # -- Options for HTML output ----------------------------------------------
112
113 # The theme to use for HTML and HTML Help pages. See the documentation for
114 # a list of builtin themes.
115 if on_rtd:
116 html_theme = 'default'
117 else:
118 try:
119 # If you want to build the docs locally using the RTD theme,
120 # you may need to install it: ``pip install sphinx_rtd_theme``.
121 # https://github.com/snide/sphinx_rtd_theme#via-package
122 import sphinx_rtd_theme
123 html_theme = "sphinx_rtd_theme"
124 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
125 except ImportError:
126 # This theme is included with Sphinx and is quite nice (based
127 # on the Pocoo themes), but since we're using the RTD theme
128 # for the production docs, it's best to use that to avoid
129 # issues due to discrepancies between the themes.
130 html_theme = 'alabaster'
131
132 # Theme options are theme-specific and customize the look and feel of a theme
133 # further. For a list of options available for each theme, see the
134 # documentation.
135 #html_theme_options = {}
136
137 # Add any paths that contain custom themes here, relative to this directory.
138 #html_theme_path = []
139
140 # The name for this set of Sphinx documents. If None, it defaults to
141 # "<project> v<release> documentation".
142 #html_title = None
143
144 # A shorter title for the navigation bar. Default is the same as html_title.
145 #html_short_title = None
146
147 # The name of an image file (relative to this directory) to place at the top
148 # of the sidebar.
149 html_logo = '../securedrop/static/i/favicon.png'
150
151 # The name of an image file (within the static path) to use as favicon of the
152 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
153 # pixels large.
154 #html_favicon = None
155
156 # Add any paths that contain custom static files (such as style sheets) here,
157 # relative to this directory. They are copied after the builtin static files,
158 # so a file named "default.css" will overwrite the builtin "default.css".
159 # html_static_path = ['_static']
160
161 # Add any extra paths that contain custom files (such as robots.txt or
162 # .htaccess) here, relative to this directory. These files are copied
163 # directly to the root of the documentation.
164 #html_extra_path = []
165
166 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
167 # using the given strftime format.
168 #html_last_updated_fmt = '%b %d, %Y'
169
170 # If true, SmartyPants will be used to convert quotes and dashes to
171 # typographically correct entities.
172 #html_use_smartypants = True
173
174 # Custom sidebar templates, maps document names to template names.
175 #html_sidebars = {}
176
177 # Additional templates that should be rendered to pages, maps page names to
178 # template names.
179 #html_additional_pages = {}
180
181 # If false, no module index is generated.
182 #html_domain_indices = True
183
184 # If false, no index is generated.
185 #html_use_index = True
186
187 # If true, the index is split into individual pages for each letter.
188 #html_split_index = False
189
190 # If true, links to the reST sources are added to the pages.
191 #html_show_sourcelink = True
192
193 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
194 #html_show_sphinx = True
195
196 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
197 #html_show_copyright = True
198
199 # If true, an OpenSearch description file will be output, and all pages will
200 # contain a <link> tag referring to it. The value of this option must be the
201 # base URL from which the finished HTML is served.
202 #html_use_opensearch = ''
203
204 # This is the file name suffix for HTML files (e.g. ".xhtml").
205 #html_file_suffix = None
206
207 # Language to be used for generating the HTML full-text search index.
208 # Sphinx supports the following languages:
209 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
210 # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
211 #html_search_language = 'en'
212
213 # A dictionary with options for the search language support, empty by default.
214 # Now only 'ja' uses this config value
215 #html_search_options = {'type': 'default'}
216
217 # The name of a javascript file (relative to the configuration directory) that
218 # implements a search results scorer. If empty, the default will be used.
219 #html_search_scorer = 'scorer.js'
220
221 # Output file base name for HTML help builder.
222 htmlhelp_basename = 'SecureDropdoc'
223
224 # -- Options for LaTeX output ---------------------------------------------
225
226 latex_elements = {
227 # The paper size ('letterpaper' or 'a4paper').
228 #'papersize': 'letterpaper',
229
230 # The font size ('10pt', '11pt' or '12pt').
231 #'pointsize': '10pt',
232
233 # Additional stuff for the LaTeX preamble.
234 #'preamble': '',
235
236 # Latex figure (float) alignment
237 #'figure_align': 'htbp',
238 }
239
240 # Grouping the document tree into LaTeX files. List of tuples
241 # (source start file, target name, title,
242 # author, documentclass [howto, manual, or own class]).
243 latex_documents = [
244 (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',
245 author, 'manual'),
246 ]
247
248 # The name of an image file (relative to this directory) to place at the top of
249 # the title page.
250 #latex_logo = None
251
252 # For "manual" documents, if this is true, then toplevel headings are parts,
253 # not chapters.
254 #latex_use_parts = False
255
256 # If true, show page references after internal links.
257 #latex_show_pagerefs = False
258
259 # If true, show URL addresses after external links.
260 #latex_show_urls = False
261
262 # Documents to append as an appendix to all manuals.
263 #latex_appendices = []
264
265 # If false, no module index is generated.
266 #latex_domain_indices = True
267
268
269 # -- Options for manual page output ---------------------------------------
270
271 # One entry per manual page. List of tuples
272 # (source start file, name, description, authors, manual section).
273 man_pages = [
274 (master_doc, 'securedrop', u'SecureDrop Documentation',
275 [author], 1)
276 ]
277
278 # If true, show URL addresses after external links.
279 #man_show_urls = False
280
281
282 # -- Options for Texinfo output -------------------------------------------
283
284 # Grouping the document tree into Texinfo files. List of tuples
285 # (source start file, target name, title, author,
286 # dir menu entry, description, category)
287 texinfo_documents = [
288 (master_doc, 'SecureDrop', u'SecureDrop Documentation',
289 author, 'SecureDrop', 'One line description of project.',
290 'Miscellaneous'),
291 ]
292
293 # Documents to append as an appendix to all manuals.
294 #texinfo_appendices = []
295
296 # If false, no module index is generated.
297 #texinfo_domain_indices = True
298
299 # How to display URL addresses: 'footnote', 'no', or 'inline'.
300 #texinfo_show_urls = 'footnote'
301
302 # If true, do not generate a @detailmenu in the "Top" node's menu.
303 #texinfo_no_detailmenu = False
304
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -59,9 +59,9 @@
# built documents.
#
# The short X.Y version.
-version = '0.4.4'
+version = '0.5-rc2'
# The full version, including alpha/beta/rc tags.
-release = '0.4.4'
+release = '0.5-rc2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/securedrop/version.py b/securedrop/version.py
--- a/securedrop/version.py
+++ b/securedrop/version.py
@@ -1 +1 @@
-__version__ = '0.4.4'
+__version__ = '0.5-rc2'
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -59,9 +59,9 @@\n # built documents.\n #\n # The short X.Y version.\n-version = '0.4.4'\n+version = '0.5-rc2'\n # The full version, including alpha/beta/rc tags.\n-release = '0.4.4'\n+release = '0.5-rc2'\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\ndiff --git a/securedrop/version.py b/securedrop/version.py\n--- a/securedrop/version.py\n+++ b/securedrop/version.py\n@@ -1 +1 @@\n-__version__ = '0.4.4'\n+__version__ = '0.5-rc2'\n", "issue": "Add Brazil flag for nice display in the language picker\nSince the `pt_BR` locale is ready to go (see #2630), we should add a Brazil flag to the language picker.\n", "before_files": [{"content": "__version__ = '0.4.4'\n", "path": "securedrop/version.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# SecureDrop documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nimport shlex\n\n# Detect if we're being built by Read the Docs\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.todo', ]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SecureDrop'\ncopyright = u'2017, Freedom of the Press Foundation'\nauthor = u'SecureDrop Team and Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.4.4'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.4.4'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'default'\nelse:\n try:\n # If you want to build the docs locally using the RTD theme,\n # you may need to install it: ``pip install sphinx_rtd_theme``.\n # https://github.com/snide/sphinx_rtd_theme#via-package\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # This theme is included with Sphinx and is quite nice (based\n # on the Pocoo themes), but since we're using the RTD theme\n # for the production docs, it's best to use that to avoid\n # issues due to discrepancies between the themes.\n html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '../securedrop/static/i/favicon.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SecureDropdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'securedrop', u'SecureDrop Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n author, 'SecureDrop', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n", "path": "docs/conf.py"}], "after_files": [{"content": "__version__ = '0.5-rc2'\n", "path": "securedrop/version.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# SecureDrop documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nimport shlex\n\n# Detect if we're being built by Read the Docs\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.todo', ]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SecureDrop'\ncopyright = u'2017, Freedom of the Press Foundation'\nauthor = u'SecureDrop Team and Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.5-rc2'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.5-rc2'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'default'\nelse:\n try:\n # If you want to build the docs locally using the RTD theme,\n # you may need to install it: ``pip install sphinx_rtd_theme``.\n # https://github.com/snide/sphinx_rtd_theme#via-package\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # This theme is included with Sphinx and is quite nice (based\n # on the Pocoo themes), but since we're using the RTD theme\n # for the production docs, it's best to use that to avoid\n # issues due to discrepancies between the themes.\n html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '../securedrop/static/i/favicon.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SecureDropdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'securedrop', u'SecureDrop Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n author, 'SecureDrop', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n", "path": "docs/conf.py"}]} | 3,689 | 185 |
gh_patches_debug_13957 | rasdani/github-patches | git_diff | opendatacube__datacube-core-680 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Errors when running against the impending sqlalchemy 1.3 release (in beta)
Originally reported in #667
Datacube consistently fails when run against the current beta version of sqlalchemy. [According to](https://www.sqlalchemy.org/blog/2019/02/08/sqlalchemy-1.3.0b3-released/) the sqlalchemy devs this release "1.3b3 should hopefully be the last beta release for 1.3, as no additional major changes are planned."
This isn't currently a problem, but it will break all of our builds and guides if not resolved before 1.3 is declared stable.
Manually reproduce the error with:
```
pip install sqlalchemy==1.3b3
datacube system init
```
- Either the sqlalchemy 1.3 beta has a bug, which we should report to them.
- Or our own code is doing something incorrect and we should fix it before 1.3 is declared stable.
Errors when running against the impending sqlalchemy 1.3 release (in beta)
Originally reported in #667
Datacube consistently fails when run against the current beta version of sqlalchemy. [According to](https://www.sqlalchemy.org/blog/2019/02/08/sqlalchemy-1.3.0b3-released/) the sqlalchemy devs this release "1.3b3 should hopefully be the last beta release for 1.3, as no additional major changes are planned."
This isn't currently a problem, but it will break all of our builds and guides if not resolved before 1.3 is declared stable.
Manually reproduce the error with:
```
pip install sqlalchemy==1.3b3
datacube system init
```
- Either the sqlalchemy 1.3 beta has a bug, which we should report to them.
- Or our own code is doing something incorrect and we should fix it before 1.3 is declared stable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datacube/drivers/postgres/sql.py`
Content:
```
1 # coding=utf-8
2 """
3 Custom types for postgres & sqlalchemy
4 """
5
6 from sqlalchemy import TIMESTAMP
7 from sqlalchemy.dialects.postgresql.ranges import RangeOperators
8 from sqlalchemy.ext.compiler import compiles
9 from sqlalchemy.sql import sqltypes
10 from sqlalchemy.sql.expression import Executable, ClauseElement
11 from sqlalchemy.sql.functions import GenericFunction
12
13 SCHEMA_NAME = 'agdc'
14
15
16 class CreateView(Executable, ClauseElement):
17 def __init__(self, name, select):
18 self.name = name
19 self.select = select
20
21
22 @compiles(CreateView)
23 def visit_create_view(element, compiler, **kw):
24 return "CREATE VIEW %s AS %s" % (
25 element.name,
26 compiler.process(element.select, literal_binds=True)
27 )
28
29
30 TYPES_INIT_SQL = """
31 create or replace function {schema}.common_timestamp(text)
32 returns timestamp with time zone as $$
33 select ($1)::timestamp at time zone 'utc';
34 $$ language sql immutable returns null on null input;
35
36 create type {schema}.float8range as range (
37 subtype = float8,
38 subtype_diff = float8mi
39 );
40 """.format(schema=SCHEMA_NAME)
41
42
43 # pylint: disable=abstract-method
44 class FLOAT8RANGE(RangeOperators, sqltypes.TypeEngine):
45 __visit_name__ = 'FLOAT8RANGE'
46
47
48 @compiles(FLOAT8RANGE)
49 def visit_float8range(element, compiler, **kw):
50 return "FLOAT8RANGE"
51
52
53 # Register the function with SQLAlchemhy.
54 # pylint: disable=too-many-ancestors
55 class CommonTimestamp(GenericFunction):
56 type = TIMESTAMP(timezone=True)
57 package = 'agdc'
58 identifier = 'common_timestamp'
59
60 name = '%s.common_timestamp' % SCHEMA_NAME
61
62
63 # pylint: disable=too-many-ancestors
64 class Float8Range(GenericFunction):
65 type = FLOAT8RANGE
66 package = 'agdc'
67 identifier = 'float8range'
68
69 name = '%s.float8range' % SCHEMA_NAME
70
71
72 class PGNAME(sqltypes.Text):
73 """Postgres 'NAME' type."""
74 __visit_name__ = 'NAME'
75
76
77 @compiles(PGNAME)
78 def visit_name(element, compiler, **kw):
79 return "NAME"
80
81
82 def pg_exists(conn, name):
83 """
84 Does a postgres object exist?
85 :rtype bool
86 """
87 return conn.execute("SELECT to_regclass(%s)", name).scalar() is not None
88
89
90 def pg_column_exists(conn, table, column):
91 """
92 Does a postgres object exist?
93 :rtype bool
94 """
95 return conn.execute("""
96 SELECT 1 FROM pg_attribute
97 WHERE attrelid = to_regclass(%s)
98 AND attname = %s
99 AND NOT attisdropped
100 """, table, column).scalar() is not None
101
102
103 def escape_pg_identifier(engine, name):
104 """
105 Escape identifiers (tables, fields, roles, etc) for inclusion in SQL statements.
106
107 psycopg2 can safely merge query arguments, but cannot do the same for dynamically
108 generating queries.
109
110 See http://initd.org/psycopg/docs/sql.html for more information.
111 """
112 # New (2.7+) versions of psycopg2 have function: extensions.quote_ident()
113 # But it's too bleeding edge right now. We'll ask the server to escape instead, as
114 # these are not performance sensitive.
115 return engine.execute("select quote_ident(%s)", name).scalar()
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/datacube/drivers/postgres/sql.py b/datacube/drivers/postgres/sql.py
--- a/datacube/drivers/postgres/sql.py
+++ b/datacube/drivers/postgres/sql.py
@@ -57,7 +57,11 @@
package = 'agdc'
identifier = 'common_timestamp'
- name = '%s.common_timestamp' % SCHEMA_NAME
+ name = 'common_timestamp'
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.packagenames = ['%s' % SCHEMA_NAME]
# pylint: disable=too-many-ancestors
@@ -66,7 +70,11 @@
package = 'agdc'
identifier = 'float8range'
- name = '%s.float8range' % SCHEMA_NAME
+ name = 'float8range'
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.packagenames = ['%s' % SCHEMA_NAME]
class PGNAME(sqltypes.Text):
| {"golden_diff": "diff --git a/datacube/drivers/postgres/sql.py b/datacube/drivers/postgres/sql.py\n--- a/datacube/drivers/postgres/sql.py\n+++ b/datacube/drivers/postgres/sql.py\n@@ -57,7 +57,11 @@\n package = 'agdc'\n identifier = 'common_timestamp'\n \n- name = '%s.common_timestamp' % SCHEMA_NAME\n+ name = 'common_timestamp'\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self.packagenames = ['%s' % SCHEMA_NAME]\n \n \n # pylint: disable=too-many-ancestors\n@@ -66,7 +70,11 @@\n package = 'agdc'\n identifier = 'float8range'\n \n- name = '%s.float8range' % SCHEMA_NAME\n+ name = 'float8range'\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self.packagenames = ['%s' % SCHEMA_NAME]\n \n \n class PGNAME(sqltypes.Text):\n", "issue": "Errors when running against the impending sqlalchemy 1.3 release (in beta)\nOriginally reported in #667\r\n\r\nDatacube consistently fails when run against the current beta version of sqlalchemy. [According to](https://www.sqlalchemy.org/blog/2019/02/08/sqlalchemy-1.3.0b3-released/) the sqlalchemy devs this release \"1.3b3 should hopefully be the last beta release for 1.3, as no additional major changes are planned.\"\r\n\r\nThis isn't currently a problem, but it will break all of our builds and guides if not resolved before 1.3 is declared stable.\r\n\r\nManually reproduce the error with:\r\n\r\n```\r\n pip install sqlalchemy==1.3b3\r\n datacube system init\r\n```\r\n\r\n- Either the sqlalchemy 1.3 beta has a bug, which we should report to them.\r\n- Or our own code is doing something incorrect and we should fix it before 1.3 is declared stable.\r\n\nErrors when running against the impending sqlalchemy 1.3 release (in beta)\nOriginally reported in #667\r\n\r\nDatacube consistently fails when run against the current beta version of sqlalchemy. [According to](https://www.sqlalchemy.org/blog/2019/02/08/sqlalchemy-1.3.0b3-released/) the sqlalchemy devs this release \"1.3b3 should hopefully be the last beta release for 1.3, as no additional major changes are planned.\"\r\n\r\nThis isn't currently a problem, but it will break all of our builds and guides if not resolved before 1.3 is declared stable.\r\n\r\nManually reproduce the error with:\r\n\r\n```\r\n pip install sqlalchemy==1.3b3\r\n datacube system init\r\n```\r\n\r\n- Either the sqlalchemy 1.3 beta has a bug, which we should report to them.\r\n- Or our own code is doing something incorrect and we should fix it before 1.3 is declared stable.\r\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nCustom types for postgres & sqlalchemy\n\"\"\"\n\nfrom sqlalchemy import TIMESTAMP\nfrom sqlalchemy.dialects.postgresql.ranges import RangeOperators\nfrom sqlalchemy.ext.compiler import compiles\nfrom sqlalchemy.sql import sqltypes\nfrom sqlalchemy.sql.expression import Executable, ClauseElement\nfrom sqlalchemy.sql.functions import GenericFunction\n\nSCHEMA_NAME = 'agdc'\n\n\nclass CreateView(Executable, ClauseElement):\n def __init__(self, name, select):\n self.name = name\n self.select = select\n\n\n@compiles(CreateView)\ndef visit_create_view(element, compiler, **kw):\n return \"CREATE VIEW %s AS %s\" % (\n element.name,\n compiler.process(element.select, literal_binds=True)\n )\n\n\nTYPES_INIT_SQL = \"\"\"\ncreate or replace function {schema}.common_timestamp(text)\nreturns timestamp with time zone as $$\nselect ($1)::timestamp at time zone 'utc';\n$$ language sql immutable returns null on null input;\n\ncreate type {schema}.float8range as range (\n subtype = float8,\n subtype_diff = float8mi\n);\n\"\"\".format(schema=SCHEMA_NAME)\n\n\n# pylint: disable=abstract-method\nclass FLOAT8RANGE(RangeOperators, sqltypes.TypeEngine):\n __visit_name__ = 'FLOAT8RANGE'\n\n\n@compiles(FLOAT8RANGE)\ndef visit_float8range(element, compiler, **kw):\n return \"FLOAT8RANGE\"\n\n\n# Register the function with SQLAlchemhy.\n# pylint: disable=too-many-ancestors\nclass CommonTimestamp(GenericFunction):\n type = TIMESTAMP(timezone=True)\n package = 'agdc'\n identifier = 'common_timestamp'\n\n name = '%s.common_timestamp' % SCHEMA_NAME\n\n\n# pylint: disable=too-many-ancestors\nclass Float8Range(GenericFunction):\n type = FLOAT8RANGE\n package = 'agdc'\n identifier = 'float8range'\n\n name = '%s.float8range' % SCHEMA_NAME\n\n\nclass PGNAME(sqltypes.Text):\n \"\"\"Postgres 'NAME' type.\"\"\"\n __visit_name__ = 'NAME'\n\n\n@compiles(PGNAME)\ndef visit_name(element, compiler, **kw):\n return \"NAME\"\n\n\ndef pg_exists(conn, name):\n \"\"\"\n Does a postgres object exist?\n :rtype bool\n \"\"\"\n return conn.execute(\"SELECT to_regclass(%s)\", name).scalar() is not None\n\n\ndef pg_column_exists(conn, table, column):\n \"\"\"\n Does a postgres object exist?\n :rtype bool\n \"\"\"\n return conn.execute(\"\"\"\n SELECT 1 FROM pg_attribute\n WHERE attrelid = to_regclass(%s)\n AND attname = %s\n AND NOT attisdropped\n \"\"\", table, column).scalar() is not None\n\n\ndef escape_pg_identifier(engine, name):\n \"\"\"\n Escape identifiers (tables, fields, roles, etc) for inclusion in SQL statements.\n\n psycopg2 can safely merge query arguments, but cannot do the same for dynamically\n generating queries.\n\n See http://initd.org/psycopg/docs/sql.html for more information.\n \"\"\"\n # New (2.7+) versions of psycopg2 have function: extensions.quote_ident()\n # But it's too bleeding edge right now. We'll ask the server to escape instead, as\n # these are not performance sensitive.\n return engine.execute(\"select quote_ident(%s)\", name).scalar()\n", "path": "datacube/drivers/postgres/sql.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\nCustom types for postgres & sqlalchemy\n\"\"\"\n\nfrom sqlalchemy import TIMESTAMP\nfrom sqlalchemy.dialects.postgresql.ranges import RangeOperators\nfrom sqlalchemy.ext.compiler import compiles\nfrom sqlalchemy.sql import sqltypes\nfrom sqlalchemy.sql.expression import Executable, ClauseElement\nfrom sqlalchemy.sql.functions import GenericFunction\n\nSCHEMA_NAME = 'agdc'\n\n\nclass CreateView(Executable, ClauseElement):\n def __init__(self, name, select):\n self.name = name\n self.select = select\n\n\n@compiles(CreateView)\ndef visit_create_view(element, compiler, **kw):\n return \"CREATE VIEW %s AS %s\" % (\n element.name,\n compiler.process(element.select, literal_binds=True)\n )\n\n\nTYPES_INIT_SQL = \"\"\"\ncreate or replace function {schema}.common_timestamp(text)\nreturns timestamp with time zone as $$\nselect ($1)::timestamp at time zone 'utc';\n$$ language sql immutable returns null on null input;\n\ncreate type {schema}.float8range as range (\n subtype = float8,\n subtype_diff = float8mi\n);\n\"\"\".format(schema=SCHEMA_NAME)\n\n\n# pylint: disable=abstract-method\nclass FLOAT8RANGE(RangeOperators, sqltypes.TypeEngine):\n __visit_name__ = 'FLOAT8RANGE'\n\n\n@compiles(FLOAT8RANGE)\ndef visit_float8range(element, compiler, **kw):\n return \"FLOAT8RANGE\"\n\n\n# Register the function with SQLAlchemhy.\n# pylint: disable=too-many-ancestors\nclass CommonTimestamp(GenericFunction):\n type = TIMESTAMP(timezone=True)\n package = 'agdc'\n identifier = 'common_timestamp'\n\n name = 'common_timestamp'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.packagenames = ['%s' % SCHEMA_NAME]\n\n\n# pylint: disable=too-many-ancestors\nclass Float8Range(GenericFunction):\n type = FLOAT8RANGE\n package = 'agdc'\n identifier = 'float8range'\n\n name = 'float8range'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.packagenames = ['%s' % SCHEMA_NAME]\n\n\nclass PGNAME(sqltypes.Text):\n \"\"\"Postgres 'NAME' type.\"\"\"\n __visit_name__ = 'NAME'\n\n\n@compiles(PGNAME)\ndef visit_name(element, compiler, **kw):\n return \"NAME\"\n\n\ndef pg_exists(conn, name):\n \"\"\"\n Does a postgres object exist?\n :rtype bool\n \"\"\"\n return conn.execute(\"SELECT to_regclass(%s)\", name).scalar() is not None\n\n\ndef pg_column_exists(conn, table, column):\n \"\"\"\n Does a postgres object exist?\n :rtype bool\n \"\"\"\n return conn.execute(\"\"\"\n SELECT 1 FROM pg_attribute\n WHERE attrelid = to_regclass(%s)\n AND attname = %s\n AND NOT attisdropped\n \"\"\", table, column).scalar() is not None\n\n\ndef escape_pg_identifier(engine, name):\n \"\"\"\n Escape identifiers (tables, fields, roles, etc) for inclusion in SQL statements.\n\n psycopg2 can safely merge query arguments, but cannot do the same for dynamically\n generating queries.\n\n See http://initd.org/psycopg/docs/sql.html for more information.\n \"\"\"\n # New (2.7+) versions of psycopg2 have function: extensions.quote_ident()\n # But it's too bleeding edge right now. We'll ask the server to escape instead, as\n # these are not performance sensitive.\n return engine.execute(\"select quote_ident(%s)\", name).scalar()\n", "path": "datacube/drivers/postgres/sql.py"}]} | 1,666 | 248 |
gh_patches_debug_38519 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider lees_famous_recipe is broken
During the global build at 2021-10-20-14-42-48, spider **lees_famous_recipe** failed with **0 features** and **130 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/lees_famous_recipe.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/lees_famous_recipe.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/lees_famous_recipe.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/lees_famous_recipe.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 from locations.items import GeojsonPointItem
4 import re
5
6 daysKey = {
7 'Monday': 'Mo', 'Tuesday': 'Tu', 'Wednesday': 'We', 'Thursday': 'Th',
8 'Friday': 'Fr', 'Saturday': 'Sa', 'Sunday': 'Su'
9 }
10
11
12 class LeesFamousRecipeSpider(scrapy.Spider):
13 name = "lees_famous_recipe"
14 item_attributes = { 'brand': "Lee's Famous Recipe Chicken" }
15 allowed_domains = ["www.leesfamousrecipe.com"]
16 start_urls = (
17 'https://www.leesfamousrecipe.com/locations/all',
18 )
19
20 def parse_phone(self, phone):
21 phone = phone.replace('.','')
22 phone = phone.replace(')','')
23 phone = phone.replace('(','')
24 phone = phone.replace('_','')
25 phone = phone.replace('-','')
26 phone = phone.replace('+','')
27 phone = phone.replace(' ','')
28 return phone
29
30 def store_hours(self, hours):
31 try:
32 days = hours.split(': ')[0].strip()
33 if('-' in days):
34 startDay = daysKey[days.split('-')[0]]
35 endDay = daysKey[days.split('-')[1]]
36 dayOutput = startDay + "-" + endDay
37 else:
38 dayOutput = daysKey[days]
39
40 bothHours = hours.split(': ')[1].replace(' ','')
41 openHours = bothHours.split("-")[0]
42 closeHours = bothHours.split("-")[1]
43
44 if("am" in openHours):
45 openHours = openHours.replace("am","")
46 if(":" in openHours):
47 openH = openHours.split(":")[0]
48 openM = openHours.split(":")[1]
49 else:
50 openH = openHours
51 openM = "00"
52 openHours = openH + ":" + openM
53
54 if("pm" in openHours):
55 openHours = openHours.replace("pm","")
56 if(":" in openHours):
57 openH = openHours.split(":")[0]
58 openM = openHours.split(":")[1]
59 else:
60 openH = openHours
61 openM = "00"
62 openH = str(int(openH) + 12)
63 openHours = openH + ":" + openM
64
65 if("am" in closeHours):
66 closeHours = closeHours.replace("am","")
67 if(":" in closeHours):
68 closeH = closeHours.split(":")[0]
69 closeM = closeHours.split(":")[1]
70 else:
71 closeH = closeHours
72 closeM = "00"
73 closeHours = closeH + ":" + closeM
74
75 if("pm" in closeHours):
76 closeHours = closeHours.replace("pm","")
77 if(":" in closeHours):
78 closeH = closeHours.split(":")[0]
79 closeM = closeHours.split(":")[1]
80 else:
81 closeH = closeHours
82 closeM = "00"
83 closeH = str(int(closeH) + 12)
84 closeHours = closeH + ":" + closeM
85 return dayOutput +' '+ openHours.replace(' ','') + "-" + closeHours + ';'
86 except KeyError:
87 return ""
88
89 def parse(self, response):
90 if("https://www.leesfamousrecipe.com/locations/all" == response.url):
91 for match in response.xpath("//div[contains(@class,'field-content')]/a/@href"):
92 request = scrapy.Request(match.extract())
93 yield request
94 else:
95 nameString = response.xpath("//h1[@class='node-title']/text()").extract_first().strip()
96 shortString = response.xpath("//h1[@class='node-title']/small/text()").extract_first()
97 if shortString is None:
98 shortString = ""
99 nameString = nameString + " " + shortString
100 nameString = nameString.strip()
101
102 scriptBody = response.xpath("//script[@type='text/javascript' and contains(.,'latitude')]/text()").extract_first()
103 latString = re.findall("latitude\":\"(.*?)\"", scriptBody)[0]
104 lonString = re.findall("longitude\":\"(.*?)\"", scriptBody)[0]
105
106 openingHoursString = ""
107 firstHourBlock = response.xpath("//div[contains(@class,'field-name-field-hours-summer')]/div/div/p/br/parent::p/text()")
108 for hourLine in firstHourBlock:
109 openingHoursString = openingHoursString +' '+self.store_hours(hourLine.extract())
110 openingHoursString = openingHoursString.strip(';').strip()
111
112
113 if("british-columbia" in response.url):
114 countryString = "CA"
115 stateString = "BC"
116 else:
117 countryString = "US"
118 mapUrl = response.xpath("//div[contains(@class,'map-link')]/div/a/@href").extract_first()
119 stateString = re.findall(r'(?<=\+)(.*?)(?=\+)', mapUrl)[len(re.findall(r'(?<=\+)(.*?)(?=\+)', mapUrl)) - 2].strip().replace('%2C','')
120
121 yield GeojsonPointItem(
122 ref=nameString,
123 addr_full=response.xpath("//div[@class='street-address']/text()").extract_first().strip(),
124 city=response.xpath("//div[@class='city-state-zip']/span[@class='locality']/text()").extract_first().strip(),
125 opening_hours=openingHoursString,
126 state=stateString,
127 postcode=response.xpath("//div[@class='city-state-zip']/span[@class='postal-code']/text()").extract_first().strip(),
128 phone=self.parse_phone(response.xpath("//div[contains(@class,'field-name-field-phone')]/div/div/text()").extract_first().strip()),
129 country = countryString,
130 lat=float(latString),
131 lon=float(lonString),
132 )
133
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/lees_famous_recipe.py b/locations/spiders/lees_famous_recipe.py
--- a/locations/spiders/lees_famous_recipe.py
+++ b/locations/spiders/lees_famous_recipe.py
@@ -83,7 +83,7 @@
closeH = str(int(closeH) + 12)
closeHours = closeH + ":" + closeM
return dayOutput +' '+ openHours.replace(' ','') + "-" + closeHours + ';'
- except KeyError:
+ except (KeyError, IndexError):
return ""
def parse(self, response):
@@ -99,9 +99,8 @@
nameString = nameString + " " + shortString
nameString = nameString.strip()
- scriptBody = response.xpath("//script[@type='text/javascript' and contains(.,'latitude')]/text()").extract_first()
- latString = re.findall("latitude\":\"(.*?)\"", scriptBody)[0]
- lonString = re.findall("longitude\":\"(.*?)\"", scriptBody)[0]
+ googleMapSrc = response.xpath("//*[@id='block-system-main']/div/div/iframe").extract_first()
+ [latString, lonString] = re.findall("center=(.*?)\"", googleMapSrc)[0].split(',')
openingHoursString = ""
firstHourBlock = response.xpath("//div[contains(@class,'field-name-field-hours-summer')]/div/div/p/br/parent::p/text()")
@@ -116,7 +115,7 @@
else:
countryString = "US"
mapUrl = response.xpath("//div[contains(@class,'map-link')]/div/a/@href").extract_first()
- stateString = re.findall(r'(?<=\+)(.*?)(?=\+)', mapUrl)[len(re.findall(r'(?<=\+)(.*?)(?=\+)', mapUrl)) - 2].strip().replace('%2C','')
+ stateString = response.xpath("//div[contains(@class,'adr')]/div[2]/span[2]/text()").extract_first()
yield GeojsonPointItem(
ref=nameString,
@@ -125,7 +124,7 @@
opening_hours=openingHoursString,
state=stateString,
postcode=response.xpath("//div[@class='city-state-zip']/span[@class='postal-code']/text()").extract_first().strip(),
- phone=self.parse_phone(response.xpath("//div[contains(@class,'field-name-field-phone')]/div/div/text()").extract_first().strip()),
+ phone=self.parse_phone(response.xpath("//div[contains(@class,'adr')]/div[3]/text()").extract_first().strip()),
country = countryString,
lat=float(latString),
lon=float(lonString),
| {"golden_diff": "diff --git a/locations/spiders/lees_famous_recipe.py b/locations/spiders/lees_famous_recipe.py\n--- a/locations/spiders/lees_famous_recipe.py\n+++ b/locations/spiders/lees_famous_recipe.py\n@@ -83,7 +83,7 @@\n closeH = str(int(closeH) + 12)\n closeHours = closeH + \":\" + closeM\n return dayOutput +' '+ openHours.replace(' ','') + \"-\" + closeHours + ';'\n- except KeyError:\n+ except (KeyError, IndexError):\n return \"\"\n \n def parse(self, response):\n@@ -99,9 +99,8 @@\n nameString = nameString + \" \" + shortString\n nameString = nameString.strip()\n \n- scriptBody = response.xpath(\"//script[@type='text/javascript' and contains(.,'latitude')]/text()\").extract_first()\n- latString = re.findall(\"latitude\\\":\\\"(.*?)\\\"\", scriptBody)[0]\n- lonString = re.findall(\"longitude\\\":\\\"(.*?)\\\"\", scriptBody)[0]\n+ googleMapSrc = response.xpath(\"//*[@id='block-system-main']/div/div/iframe\").extract_first()\n+ [latString, lonString] = re.findall(\"center=(.*?)\\\"\", googleMapSrc)[0].split(',')\n \n openingHoursString = \"\"\n firstHourBlock = response.xpath(\"//div[contains(@class,'field-name-field-hours-summer')]/div/div/p/br/parent::p/text()\")\n@@ -116,7 +115,7 @@\n else:\n countryString = \"US\"\n mapUrl = response.xpath(\"//div[contains(@class,'map-link')]/div/a/@href\").extract_first()\n- stateString = re.findall(r'(?<=\\+)(.*?)(?=\\+)', mapUrl)[len(re.findall(r'(?<=\\+)(.*?)(?=\\+)', mapUrl)) - 2].strip().replace('%2C','')\n+ stateString = response.xpath(\"//div[contains(@class,'adr')]/div[2]/span[2]/text()\").extract_first()\n \n yield GeojsonPointItem(\n ref=nameString,\n@@ -125,7 +124,7 @@\n opening_hours=openingHoursString,\n state=stateString,\n postcode=response.xpath(\"//div[@class='city-state-zip']/span[@class='postal-code']/text()\").extract_first().strip(),\n- phone=self.parse_phone(response.xpath(\"//div[contains(@class,'field-name-field-phone')]/div/div/text()\").extract_first().strip()),\n+ phone=self.parse_phone(response.xpath(\"//div[contains(@class,'adr')]/div[3]/text()\").extract_first().strip()),\n country = countryString,\n lat=float(latString),\n lon=float(lonString),\n", "issue": "Spider lees_famous_recipe is broken\nDuring the global build at 2021-10-20-14-42-48, spider **lees_famous_recipe** failed with **0 features** and **130 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/lees_famous_recipe.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/lees_famous_recipe.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/lees_famous_recipe.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport re\n\ndaysKey = {\n 'Monday': 'Mo', 'Tuesday': 'Tu', 'Wednesday': 'We', 'Thursday': 'Th',\n 'Friday': 'Fr', 'Saturday': 'Sa', 'Sunday': 'Su'\n}\n\n\nclass LeesFamousRecipeSpider(scrapy.Spider):\n name = \"lees_famous_recipe\"\n item_attributes = { 'brand': \"Lee's Famous Recipe Chicken\" }\n allowed_domains = [\"www.leesfamousrecipe.com\"]\n start_urls = (\n 'https://www.leesfamousrecipe.com/locations/all',\n )\n\n def parse_phone(self, phone):\n phone = phone.replace('.','')\n phone = phone.replace(')','')\n phone = phone.replace('(','')\n phone = phone.replace('_','')\n phone = phone.replace('-','')\n phone = phone.replace('+','')\n phone = phone.replace(' ','')\n return phone\n\n def store_hours(self, hours):\n try:\n days = hours.split(': ')[0].strip()\n if('-' in days):\n startDay = daysKey[days.split('-')[0]]\n endDay = daysKey[days.split('-')[1]]\n dayOutput = startDay + \"-\" + endDay\n else:\n dayOutput = daysKey[days]\n\n bothHours = hours.split(': ')[1].replace(' ','')\n openHours = bothHours.split(\"-\")[0]\n closeHours = bothHours.split(\"-\")[1]\n\n if(\"am\" in openHours):\n openHours = openHours.replace(\"am\",\"\")\n if(\":\" in openHours):\n openH = openHours.split(\":\")[0]\n openM = openHours.split(\":\")[1]\n else:\n openH = openHours\n openM = \"00\"\n openHours = openH + \":\" + openM\n\n if(\"pm\" in openHours):\n openHours = openHours.replace(\"pm\",\"\")\n if(\":\" in openHours):\n openH = openHours.split(\":\")[0]\n openM = openHours.split(\":\")[1]\n else:\n openH = openHours\n openM = \"00\"\n openH = str(int(openH) + 12)\n openHours = openH + \":\" + openM\n\n if(\"am\" in closeHours):\n closeHours = closeHours.replace(\"am\",\"\")\n if(\":\" in closeHours):\n closeH = closeHours.split(\":\")[0]\n closeM = closeHours.split(\":\")[1]\n else:\n closeH = closeHours\n closeM = \"00\"\n closeHours = closeH + \":\" + closeM\n\n if(\"pm\" in closeHours):\n closeHours = closeHours.replace(\"pm\",\"\")\n if(\":\" in closeHours):\n closeH = closeHours.split(\":\")[0]\n closeM = closeHours.split(\":\")[1]\n else:\n closeH = closeHours\n closeM = \"00\"\n closeH = str(int(closeH) + 12)\n closeHours = closeH + \":\" + closeM\n return dayOutput +' '+ openHours.replace(' ','') + \"-\" + closeHours + ';'\n except KeyError:\n return \"\"\n\n def parse(self, response):\n if(\"https://www.leesfamousrecipe.com/locations/all\" == response.url):\n for match in response.xpath(\"//div[contains(@class,'field-content')]/a/@href\"):\n request = scrapy.Request(match.extract())\n yield request\n else:\n nameString = response.xpath(\"//h1[@class='node-title']/text()\").extract_first().strip()\n shortString = response.xpath(\"//h1[@class='node-title']/small/text()\").extract_first()\n if shortString is None:\n shortString = \"\"\n nameString = nameString + \" \" + shortString\n nameString = nameString.strip()\n\n scriptBody = response.xpath(\"//script[@type='text/javascript' and contains(.,'latitude')]/text()\").extract_first()\n latString = re.findall(\"latitude\\\":\\\"(.*?)\\\"\", scriptBody)[0]\n lonString = re.findall(\"longitude\\\":\\\"(.*?)\\\"\", scriptBody)[0]\n\n openingHoursString = \"\"\n firstHourBlock = response.xpath(\"//div[contains(@class,'field-name-field-hours-summer')]/div/div/p/br/parent::p/text()\")\n for hourLine in firstHourBlock:\n openingHoursString = openingHoursString +' '+self.store_hours(hourLine.extract())\n openingHoursString = openingHoursString.strip(';').strip()\n\n\n if(\"british-columbia\" in response.url):\n countryString = \"CA\"\n stateString = \"BC\"\n else:\n countryString = \"US\"\n mapUrl = response.xpath(\"//div[contains(@class,'map-link')]/div/a/@href\").extract_first()\n stateString = re.findall(r'(?<=\\+)(.*?)(?=\\+)', mapUrl)[len(re.findall(r'(?<=\\+)(.*?)(?=\\+)', mapUrl)) - 2].strip().replace('%2C','')\n\n yield GeojsonPointItem(\n ref=nameString,\n addr_full=response.xpath(\"//div[@class='street-address']/text()\").extract_first().strip(),\n city=response.xpath(\"//div[@class='city-state-zip']/span[@class='locality']/text()\").extract_first().strip(),\n opening_hours=openingHoursString,\n state=stateString,\n postcode=response.xpath(\"//div[@class='city-state-zip']/span[@class='postal-code']/text()\").extract_first().strip(),\n phone=self.parse_phone(response.xpath(\"//div[contains(@class,'field-name-field-phone')]/div/div/text()\").extract_first().strip()),\n country = countryString,\n lat=float(latString),\n lon=float(lonString),\n )\n\n", "path": "locations/spiders/lees_famous_recipe.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport re\n\ndaysKey = {\n 'Monday': 'Mo', 'Tuesday': 'Tu', 'Wednesday': 'We', 'Thursday': 'Th',\n 'Friday': 'Fr', 'Saturday': 'Sa', 'Sunday': 'Su'\n}\n\n\nclass LeesFamousRecipeSpider(scrapy.Spider):\n name = \"lees_famous_recipe\"\n item_attributes = { 'brand': \"Lee's Famous Recipe Chicken\" }\n allowed_domains = [\"www.leesfamousrecipe.com\"]\n start_urls = (\n 'https://www.leesfamousrecipe.com/locations/all',\n )\n\n def parse_phone(self, phone):\n phone = phone.replace('.','')\n phone = phone.replace(')','')\n phone = phone.replace('(','')\n phone = phone.replace('_','')\n phone = phone.replace('-','')\n phone = phone.replace('+','')\n phone = phone.replace(' ','')\n return phone\n\n def store_hours(self, hours):\n try:\n days = hours.split(': ')[0].strip()\n if('-' in days):\n startDay = daysKey[days.split('-')[0]]\n endDay = daysKey[days.split('-')[1]]\n dayOutput = startDay + \"-\" + endDay\n else:\n dayOutput = daysKey[days]\n\n bothHours = hours.split(': ')[1].replace(' ','')\n openHours = bothHours.split(\"-\")[0]\n closeHours = bothHours.split(\"-\")[1]\n\n if(\"am\" in openHours):\n openHours = openHours.replace(\"am\",\"\")\n if(\":\" in openHours):\n openH = openHours.split(\":\")[0]\n openM = openHours.split(\":\")[1]\n else:\n openH = openHours\n openM = \"00\"\n openHours = openH + \":\" + openM\n\n if(\"pm\" in openHours):\n openHours = openHours.replace(\"pm\",\"\")\n if(\":\" in openHours):\n openH = openHours.split(\":\")[0]\n openM = openHours.split(\":\")[1]\n else:\n openH = openHours\n openM = \"00\"\n openH = str(int(openH) + 12)\n openHours = openH + \":\" + openM\n\n if(\"am\" in closeHours):\n closeHours = closeHours.replace(\"am\",\"\")\n if(\":\" in closeHours):\n closeH = closeHours.split(\":\")[0]\n closeM = closeHours.split(\":\")[1]\n else:\n closeH = closeHours\n closeM = \"00\"\n closeHours = closeH + \":\" + closeM\n\n if(\"pm\" in closeHours):\n closeHours = closeHours.replace(\"pm\",\"\")\n if(\":\" in closeHours):\n closeH = closeHours.split(\":\")[0]\n closeM = closeHours.split(\":\")[1]\n else:\n closeH = closeHours\n closeM = \"00\"\n closeH = str(int(closeH) + 12)\n closeHours = closeH + \":\" + closeM\n return dayOutput +' '+ openHours.replace(' ','') + \"-\" + closeHours + ';'\n except (KeyError, IndexError):\n return \"\"\n\n def parse(self, response):\n if(\"https://www.leesfamousrecipe.com/locations/all\" == response.url):\n for match in response.xpath(\"//div[contains(@class,'field-content')]/a/@href\"):\n request = scrapy.Request(match.extract())\n yield request\n else:\n nameString = response.xpath(\"//h1[@class='node-title']/text()\").extract_first().strip()\n shortString = response.xpath(\"//h1[@class='node-title']/small/text()\").extract_first()\n if shortString is None:\n shortString = \"\"\n nameString = nameString + \" \" + shortString\n nameString = nameString.strip()\n\n googleMapSrc = response.xpath(\"//*[@id='block-system-main']/div/div/iframe\").extract_first()\n [latString, lonString] = re.findall(\"center=(.*?)\\\"\", googleMapSrc)[0].split(',')\n\n openingHoursString = \"\"\n firstHourBlock = response.xpath(\"//div[contains(@class,'field-name-field-hours-summer')]/div/div/p/br/parent::p/text()\")\n for hourLine in firstHourBlock:\n openingHoursString = openingHoursString +' '+self.store_hours(hourLine.extract())\n openingHoursString = openingHoursString.strip(';').strip()\n\n\n if(\"british-columbia\" in response.url):\n countryString = \"CA\"\n stateString = \"BC\"\n else:\n countryString = \"US\"\n mapUrl = response.xpath(\"//div[contains(@class,'map-link')]/div/a/@href\").extract_first()\n stateString = response.xpath(\"//div[contains(@class,'adr')]/div[2]/span[2]/text()\").extract_first()\n\n yield GeojsonPointItem(\n ref=nameString,\n addr_full=response.xpath(\"//div[@class='street-address']/text()\").extract_first().strip(),\n city=response.xpath(\"//div[@class='city-state-zip']/span[@class='locality']/text()\").extract_first().strip(),\n opening_hours=openingHoursString,\n state=stateString,\n postcode=response.xpath(\"//div[@class='city-state-zip']/span[@class='postal-code']/text()\").extract_first().strip(),\n phone=self.parse_phone(response.xpath(\"//div[contains(@class,'adr')]/div[3]/text()\").extract_first().strip()),\n country = countryString,\n lat=float(latString),\n lon=float(lonString),\n )\n\n", "path": "locations/spiders/lees_famous_recipe.py"}]} | 2,026 | 606 |
gh_patches_debug_1473 | rasdani/github-patches | git_diff | ivy-llc__ivy-13177 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tril_indces_from
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/jax/numpy/indexing.py`
Content:
```
1 # local
2 import ivy
3 from ivy.functional.frontends.jax.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6
7
8 @to_ivy_arrays_and_back
9 def diagonal(a, offset=0, axis1=0, axis2=1):
10 return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)
11
12
13 @to_ivy_arrays_and_back
14 def diag(v, k=0):
15 return ivy.diag(v, k=k)
16
17
18 @to_ivy_arrays_and_back
19 def diag_indices(n, ndim=2):
20 idx = ivy.arange(n, dtype=int)
21 return (idx,) * ndim
22
23
24 # take_along_axis
25 @to_ivy_arrays_and_back
26 def take_along_axis(arr, indices, axis, mode="fill"):
27 return ivy.take_along_axis(arr, indices, axis, mode=mode)
28
29
30 @to_ivy_arrays_and_back
31 def tril_indices(n_rows, n_cols=None, k=0):
32 return ivy.tril_indices(n_rows, n_cols, k)
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/jax/numpy/indexing.py b/ivy/functional/frontends/jax/numpy/indexing.py
--- a/ivy/functional/frontends/jax/numpy/indexing.py
+++ b/ivy/functional/frontends/jax/numpy/indexing.py
@@ -30,3 +30,8 @@
@to_ivy_arrays_and_back
def tril_indices(n_rows, n_cols=None, k=0):
return ivy.tril_indices(n_rows, n_cols, k)
+
+
+@to_ivy_arrays_and_back
+def tril_indices_from(arr, k=0):
+ return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)
| {"golden_diff": "diff --git a/ivy/functional/frontends/jax/numpy/indexing.py b/ivy/functional/frontends/jax/numpy/indexing.py\n--- a/ivy/functional/frontends/jax/numpy/indexing.py\n+++ b/ivy/functional/frontends/jax/numpy/indexing.py\n@@ -30,3 +30,8 @@\n @to_ivy_arrays_and_back\n def tril_indices(n_rows, n_cols=None, k=0):\n return ivy.tril_indices(n_rows, n_cols, k)\n+\n+\n+@to_ivy_arrays_and_back\n+def tril_indices_from(arr, k=0):\n+ return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)\n", "issue": "tril_indces_from\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n\n\n@to_ivy_arrays_and_back\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diag_indices(n, ndim=2):\n idx = ivy.arange(n, dtype=int)\n return (idx,) * ndim\n\n\n# take_along_axis\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis, mode=\"fill\"):\n return ivy.take_along_axis(arr, indices, axis, mode=mode)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n_rows, n_cols=None, k=0):\n return ivy.tril_indices(n_rows, n_cols, k)\n", "path": "ivy/functional/frontends/jax/numpy/indexing.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n\n\n@to_ivy_arrays_and_back\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diag_indices(n, ndim=2):\n idx = ivy.arange(n, dtype=int)\n return (idx,) * ndim\n\n\n# take_along_axis\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis, mode=\"fill\"):\n return ivy.take_along_axis(arr, indices, axis, mode=mode)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n_rows, n_cols=None, k=0):\n return ivy.tril_indices(n_rows, n_cols, k)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices_from(arr, k=0):\n return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)\n", "path": "ivy/functional/frontends/jax/numpy/indexing.py"}]} | 566 | 157 |
gh_patches_debug_54606 | rasdani/github-patches | git_diff | zulip__zulip-13843 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rename `subject_links` to `topic_links` in our API
This is an element of the broader `subject` -> `topic` migration (see #1192) that should be straightforward to change, because I believe the mobile apps don't access `subject_links` yet, so there's no compatibility work required. (What the data is used for in the webapp is the little in-topic-field links we show when there is a link or linkifier matching the topic line of the message).
@gnprice to confirm I'm reading the mobile codebase correctly that it's indeed not accessed.
Noticed in #13587; tagging as a priority since this sort of API migration gets more complex when delayed. We should be sure to look again at updating the docs as discussed in #13587 once this is complete.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/topic.py`
Content:
```
1 import datetime
2
3 from django.db import connection
4 from django.db.models.query import QuerySet, Q
5 from django.utils.timezone import now as timezone_now
6
7 from sqlalchemy.sql import (
8 column,
9 literal,
10 func,
11 )
12
13 from zerver.lib.request import REQ
14 from zerver.models import (
15 Message,
16 Recipient,
17 UserMessage,
18 UserProfile,
19 )
20
21 from typing import Any, Dict, List, Optional, Tuple
22
23 # Only use these constants for events.
24 ORIG_TOPIC = "orig_subject"
25 TOPIC_NAME = "subject"
26 TOPIC_LINKS = "subject_links"
27 MATCH_TOPIC = "match_subject"
28
29 # This constant is actually embedded into
30 # the JSON data for message edit history,
31 # so we'll always need to handle legacy data
32 # unless we do a pretty tricky migration.
33 LEGACY_PREV_TOPIC = "prev_subject"
34
35 # This constant is pretty closely coupled to the
36 # database, but it's the JSON field.
37 EXPORT_TOPIC_NAME = "subject"
38
39 '''
40 The following functions are for user-facing APIs
41 where we'll want to support "subject" for a while.
42 '''
43
44 def get_topic_from_message_info(message_info: Dict[str, Any]) -> str:
45 '''
46 Use this where you are getting dicts that are based off of messages
47 that may come from the outside world, especially from third party
48 APIs and bots.
49
50 We prefer 'topic' to 'subject' here. We expect at least one field
51 to be present (or the caller must know how to handle KeyError).
52 '''
53 if 'topic' in message_info:
54 return message_info['topic']
55
56 return message_info['subject']
57
58 def REQ_topic() -> Optional[str]:
59 # REQ handlers really return a REQ, but we
60 # lie to make the rest of the type matching work.
61 return REQ(
62 whence='topic',
63 aliases=['subject'],
64 converter=lambda x: x.strip(),
65 default=None,
66 )
67
68 '''
69 TRY TO KEEP THIS DIVIDING LINE.
70
71 Below this line we want to make it so that functions are only
72 using "subject" in the DB sense, and nothing customer facing.
73
74 '''
75
76 # This is used in low-level message functions in
77 # zerver/lib/message.py, and it's not user facing.
78 DB_TOPIC_NAME = "subject"
79 MESSAGE__TOPIC = 'message__subject'
80
81 def topic_match_sa(topic_name: str) -> Any:
82 # _sa is short for Sql Alchemy, which we use mostly for
83 # queries that search messages
84 topic_cond = func.upper(column("subject")) == func.upper(literal(topic_name))
85 return topic_cond
86
87 def topic_column_sa() -> Any:
88 return column("subject")
89
90 def filter_by_exact_message_topic(query: QuerySet, message: Message) -> QuerySet:
91 topic_name = message.topic_name()
92 return query.filter(subject=topic_name)
93
94 def filter_by_topic_name_via_message(query: QuerySet, topic_name: str) -> QuerySet:
95 return query.filter(message__subject__iexact=topic_name)
96
97 def messages_for_topic(stream_id: int, topic_name: str) -> QuerySet:
98 return Message.objects.filter(
99 recipient__type_id=stream_id,
100 subject__iexact=topic_name,
101 )
102
103 def save_message_for_edit_use_case(message: Message) -> None:
104 message.save(update_fields=[TOPIC_NAME, "content", "rendered_content",
105 "rendered_content_version", "last_edit_time",
106 "edit_history", "has_attachment", "has_image",
107 "has_link"])
108
109
110 def user_message_exists_for_topic(user_profile: UserProfile,
111 recipient: Recipient,
112 topic_name: str) -> bool:
113 return UserMessage.objects.filter(
114 user_profile=user_profile,
115 message__recipient=recipient,
116 message__subject__iexact=topic_name,
117 ).exists()
118
119 def update_messages_for_topic_edit(message: Message,
120 propagate_mode: str,
121 orig_topic_name: str,
122 topic_name: str) -> List[Message]:
123 propagate_query = Q(recipient = message.recipient, subject = orig_topic_name)
124 # We only change messages up to 7 days in the past, to avoid hammering our
125 # DB by changing an unbounded amount of messages
126 if propagate_mode == 'change_all':
127 before_bound = timezone_now() - datetime.timedelta(days=7)
128
129 propagate_query = (propagate_query & ~Q(id = message.id) &
130 Q(date_sent__range=(before_bound, timezone_now())))
131 if propagate_mode == 'change_later':
132 propagate_query = propagate_query & Q(id__gt = message.id)
133
134 messages = Message.objects.filter(propagate_query).select_related()
135
136 # Evaluate the query before running the update
137 messages_list = list(messages)
138 messages.update(subject=topic_name)
139
140 for m in messages_list:
141 # The cached ORM object is not changed by messages.update()
142 # and the remote cache update requires the new value
143 m.set_topic_name(topic_name)
144
145 return messages_list
146
147 def generate_topic_history_from_db_rows(rows: List[Tuple[str, int]]) -> List[Dict[str, Any]]:
148 canonical_topic_names = {} # type: Dict[str, Tuple[int, str]]
149
150 # Sort rows by max_message_id so that if a topic
151 # has many different casings, we use the most
152 # recent row.
153 rows = sorted(rows, key=lambda tup: tup[1])
154
155 for (topic_name, max_message_id) in rows:
156 canonical_name = topic_name.lower()
157 canonical_topic_names[canonical_name] = (max_message_id, topic_name)
158
159 history = []
160 for canonical_topic, (max_message_id, topic_name) in canonical_topic_names.items():
161 history.append(dict(
162 name=topic_name,
163 max_id=max_message_id)
164 )
165 return sorted(history, key=lambda x: -x['max_id'])
166
167 def get_topic_history_for_stream(user_profile: UserProfile,
168 recipient: Recipient,
169 public_history: bool) -> List[Dict[str, Any]]:
170 cursor = connection.cursor()
171 if public_history:
172 query = '''
173 SELECT
174 "zerver_message"."subject" as topic,
175 max("zerver_message".id) as max_message_id
176 FROM "zerver_message"
177 WHERE (
178 "zerver_message"."recipient_id" = %s
179 )
180 GROUP BY (
181 "zerver_message"."subject"
182 )
183 ORDER BY max("zerver_message".id) DESC
184 '''
185 cursor.execute(query, [recipient.id])
186 else:
187 query = '''
188 SELECT
189 "zerver_message"."subject" as topic,
190 max("zerver_message".id) as max_message_id
191 FROM "zerver_message"
192 INNER JOIN "zerver_usermessage" ON (
193 "zerver_usermessage"."message_id" = "zerver_message"."id"
194 )
195 WHERE (
196 "zerver_usermessage"."user_profile_id" = %s AND
197 "zerver_message"."recipient_id" = %s
198 )
199 GROUP BY (
200 "zerver_message"."subject"
201 )
202 ORDER BY max("zerver_message".id) DESC
203 '''
204 cursor.execute(query, [user_profile.id, recipient.id])
205 rows = cursor.fetchall()
206 cursor.close()
207
208 return generate_topic_history_from_db_rows(rows)
209
210 def get_topic_history_for_web_public_stream(recipient: Recipient) -> List[Dict[str, Any]]:
211 cursor = connection.cursor()
212 query = '''
213 SELECT
214 "zerver_message"."subject" as topic,
215 max("zerver_message".id) as max_message_id
216 FROM "zerver_message"
217 WHERE (
218 "zerver_message"."recipient_id" = %s
219 )
220 GROUP BY (
221 "zerver_message"."subject"
222 )
223 ORDER BY max("zerver_message".id) DESC
224 '''
225 cursor.execute(query, [recipient.id])
226 rows = cursor.fetchall()
227 cursor.close()
228
229 return generate_topic_history_from_db_rows(rows)
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/lib/topic.py b/zerver/lib/topic.py
--- a/zerver/lib/topic.py
+++ b/zerver/lib/topic.py
@@ -23,7 +23,7 @@
# Only use these constants for events.
ORIG_TOPIC = "orig_subject"
TOPIC_NAME = "subject"
-TOPIC_LINKS = "subject_links"
+TOPIC_LINKS = "topic_links"
MATCH_TOPIC = "match_subject"
# This constant is actually embedded into
| {"golden_diff": "diff --git a/zerver/lib/topic.py b/zerver/lib/topic.py\n--- a/zerver/lib/topic.py\n+++ b/zerver/lib/topic.py\n@@ -23,7 +23,7 @@\n # Only use these constants for events.\n ORIG_TOPIC = \"orig_subject\"\n TOPIC_NAME = \"subject\"\n-TOPIC_LINKS = \"subject_links\"\n+TOPIC_LINKS = \"topic_links\"\n MATCH_TOPIC = \"match_subject\"\n \n # This constant is actually embedded into\n", "issue": "Rename `subject_links` to `topic_links` in our API\nThis is an element of the broader `subject` -> `topic` migration (see #1192) that should be straightforward to change, because I believe the mobile apps don't access `subject_links` yet, so there's no compatibility work required. (What the data is used for in the webapp is the little in-topic-field links we show when there is a link or linkifier matching the topic line of the message).\r\n\r\n@gnprice to confirm I'm reading the mobile codebase correctly that it's indeed not accessed.\r\n\r\nNoticed in #13587; tagging as a priority since this sort of API migration gets more complex when delayed. We should be sure to look again at updating the docs as discussed in #13587 once this is complete.\n", "before_files": [{"content": "import datetime\n\nfrom django.db import connection\nfrom django.db.models.query import QuerySet, Q\nfrom django.utils.timezone import now as timezone_now\n\nfrom sqlalchemy.sql import (\n column,\n literal,\n func,\n)\n\nfrom zerver.lib.request import REQ\nfrom zerver.models import (\n Message,\n Recipient,\n UserMessage,\n UserProfile,\n)\n\nfrom typing import Any, Dict, List, Optional, Tuple\n\n# Only use these constants for events.\nORIG_TOPIC = \"orig_subject\"\nTOPIC_NAME = \"subject\"\nTOPIC_LINKS = \"subject_links\"\nMATCH_TOPIC = \"match_subject\"\n\n# This constant is actually embedded into\n# the JSON data for message edit history,\n# so we'll always need to handle legacy data\n# unless we do a pretty tricky migration.\nLEGACY_PREV_TOPIC = \"prev_subject\"\n\n# This constant is pretty closely coupled to the\n# database, but it's the JSON field.\nEXPORT_TOPIC_NAME = \"subject\"\n\n'''\nThe following functions are for user-facing APIs\nwhere we'll want to support \"subject\" for a while.\n'''\n\ndef get_topic_from_message_info(message_info: Dict[str, Any]) -> str:\n '''\n Use this where you are getting dicts that are based off of messages\n that may come from the outside world, especially from third party\n APIs and bots.\n\n We prefer 'topic' to 'subject' here. We expect at least one field\n to be present (or the caller must know how to handle KeyError).\n '''\n if 'topic' in message_info:\n return message_info['topic']\n\n return message_info['subject']\n\ndef REQ_topic() -> Optional[str]:\n # REQ handlers really return a REQ, but we\n # lie to make the rest of the type matching work.\n return REQ(\n whence='topic',\n aliases=['subject'],\n converter=lambda x: x.strip(),\n default=None,\n )\n\n'''\nTRY TO KEEP THIS DIVIDING LINE.\n\nBelow this line we want to make it so that functions are only\nusing \"subject\" in the DB sense, and nothing customer facing.\n\n'''\n\n# This is used in low-level message functions in\n# zerver/lib/message.py, and it's not user facing.\nDB_TOPIC_NAME = \"subject\"\nMESSAGE__TOPIC = 'message__subject'\n\ndef topic_match_sa(topic_name: str) -> Any:\n # _sa is short for Sql Alchemy, which we use mostly for\n # queries that search messages\n topic_cond = func.upper(column(\"subject\")) == func.upper(literal(topic_name))\n return topic_cond\n\ndef topic_column_sa() -> Any:\n return column(\"subject\")\n\ndef filter_by_exact_message_topic(query: QuerySet, message: Message) -> QuerySet:\n topic_name = message.topic_name()\n return query.filter(subject=topic_name)\n\ndef filter_by_topic_name_via_message(query: QuerySet, topic_name: str) -> QuerySet:\n return query.filter(message__subject__iexact=topic_name)\n\ndef messages_for_topic(stream_id: int, topic_name: str) -> QuerySet:\n return Message.objects.filter(\n recipient__type_id=stream_id,\n subject__iexact=topic_name,\n )\n\ndef save_message_for_edit_use_case(message: Message) -> None:\n message.save(update_fields=[TOPIC_NAME, \"content\", \"rendered_content\",\n \"rendered_content_version\", \"last_edit_time\",\n \"edit_history\", \"has_attachment\", \"has_image\",\n \"has_link\"])\n\n\ndef user_message_exists_for_topic(user_profile: UserProfile,\n recipient: Recipient,\n topic_name: str) -> bool:\n return UserMessage.objects.filter(\n user_profile=user_profile,\n message__recipient=recipient,\n message__subject__iexact=topic_name,\n ).exists()\n\ndef update_messages_for_topic_edit(message: Message,\n propagate_mode: str,\n orig_topic_name: str,\n topic_name: str) -> List[Message]:\n propagate_query = Q(recipient = message.recipient, subject = orig_topic_name)\n # We only change messages up to 7 days in the past, to avoid hammering our\n # DB by changing an unbounded amount of messages\n if propagate_mode == 'change_all':\n before_bound = timezone_now() - datetime.timedelta(days=7)\n\n propagate_query = (propagate_query & ~Q(id = message.id) &\n Q(date_sent__range=(before_bound, timezone_now())))\n if propagate_mode == 'change_later':\n propagate_query = propagate_query & Q(id__gt = message.id)\n\n messages = Message.objects.filter(propagate_query).select_related()\n\n # Evaluate the query before running the update\n messages_list = list(messages)\n messages.update(subject=topic_name)\n\n for m in messages_list:\n # The cached ORM object is not changed by messages.update()\n # and the remote cache update requires the new value\n m.set_topic_name(topic_name)\n\n return messages_list\n\ndef generate_topic_history_from_db_rows(rows: List[Tuple[str, int]]) -> List[Dict[str, Any]]:\n canonical_topic_names = {} # type: Dict[str, Tuple[int, str]]\n\n # Sort rows by max_message_id so that if a topic\n # has many different casings, we use the most\n # recent row.\n rows = sorted(rows, key=lambda tup: tup[1])\n\n for (topic_name, max_message_id) in rows:\n canonical_name = topic_name.lower()\n canonical_topic_names[canonical_name] = (max_message_id, topic_name)\n\n history = []\n for canonical_topic, (max_message_id, topic_name) in canonical_topic_names.items():\n history.append(dict(\n name=topic_name,\n max_id=max_message_id)\n )\n return sorted(history, key=lambda x: -x['max_id'])\n\ndef get_topic_history_for_stream(user_profile: UserProfile,\n recipient: Recipient,\n public_history: bool) -> List[Dict[str, Any]]:\n cursor = connection.cursor()\n if public_history:\n query = '''\n SELECT\n \"zerver_message\".\"subject\" as topic,\n max(\"zerver_message\".id) as max_message_id\n FROM \"zerver_message\"\n WHERE (\n \"zerver_message\".\"recipient_id\" = %s\n )\n GROUP BY (\n \"zerver_message\".\"subject\"\n )\n ORDER BY max(\"zerver_message\".id) DESC\n '''\n cursor.execute(query, [recipient.id])\n else:\n query = '''\n SELECT\n \"zerver_message\".\"subject\" as topic,\n max(\"zerver_message\".id) as max_message_id\n FROM \"zerver_message\"\n INNER JOIN \"zerver_usermessage\" ON (\n \"zerver_usermessage\".\"message_id\" = \"zerver_message\".\"id\"\n )\n WHERE (\n \"zerver_usermessage\".\"user_profile_id\" = %s AND\n \"zerver_message\".\"recipient_id\" = %s\n )\n GROUP BY (\n \"zerver_message\".\"subject\"\n )\n ORDER BY max(\"zerver_message\".id) DESC\n '''\n cursor.execute(query, [user_profile.id, recipient.id])\n rows = cursor.fetchall()\n cursor.close()\n\n return generate_topic_history_from_db_rows(rows)\n\ndef get_topic_history_for_web_public_stream(recipient: Recipient) -> List[Dict[str, Any]]:\n cursor = connection.cursor()\n query = '''\n SELECT\n \"zerver_message\".\"subject\" as topic,\n max(\"zerver_message\".id) as max_message_id\n FROM \"zerver_message\"\n WHERE (\n \"zerver_message\".\"recipient_id\" = %s\n )\n GROUP BY (\n \"zerver_message\".\"subject\"\n )\n ORDER BY max(\"zerver_message\".id) DESC\n '''\n cursor.execute(query, [recipient.id])\n rows = cursor.fetchall()\n cursor.close()\n\n return generate_topic_history_from_db_rows(rows)\n", "path": "zerver/lib/topic.py"}], "after_files": [{"content": "import datetime\n\nfrom django.db import connection\nfrom django.db.models.query import QuerySet, Q\nfrom django.utils.timezone import now as timezone_now\n\nfrom sqlalchemy.sql import (\n column,\n literal,\n func,\n)\n\nfrom zerver.lib.request import REQ\nfrom zerver.models import (\n Message,\n Recipient,\n UserMessage,\n UserProfile,\n)\n\nfrom typing import Any, Dict, List, Optional, Tuple\n\n# Only use these constants for events.\nORIG_TOPIC = \"orig_subject\"\nTOPIC_NAME = \"subject\"\nTOPIC_LINKS = \"topic_links\"\nMATCH_TOPIC = \"match_subject\"\n\n# This constant is actually embedded into\n# the JSON data for message edit history,\n# so we'll always need to handle legacy data\n# unless we do a pretty tricky migration.\nLEGACY_PREV_TOPIC = \"prev_subject\"\n\n# This constant is pretty closely coupled to the\n# database, but it's the JSON field.\nEXPORT_TOPIC_NAME = \"subject\"\n\n'''\nThe following functions are for user-facing APIs\nwhere we'll want to support \"subject\" for a while.\n'''\n\ndef get_topic_from_message_info(message_info: Dict[str, Any]) -> str:\n '''\n Use this where you are getting dicts that are based off of messages\n that may come from the outside world, especially from third party\n APIs and bots.\n\n We prefer 'topic' to 'subject' here. We expect at least one field\n to be present (or the caller must know how to handle KeyError).\n '''\n if 'topic' in message_info:\n return message_info['topic']\n\n return message_info['subject']\n\ndef REQ_topic() -> Optional[str]:\n # REQ handlers really return a REQ, but we\n # lie to make the rest of the type matching work.\n return REQ(\n whence='topic',\n aliases=['subject'],\n converter=lambda x: x.strip(),\n default=None,\n )\n\n'''\nTRY TO KEEP THIS DIVIDING LINE.\n\nBelow this line we want to make it so that functions are only\nusing \"subject\" in the DB sense, and nothing customer facing.\n\n'''\n\n# This is used in low-level message functions in\n# zerver/lib/message.py, and it's not user facing.\nDB_TOPIC_NAME = \"subject\"\nMESSAGE__TOPIC = 'message__subject'\n\ndef topic_match_sa(topic_name: str) -> Any:\n # _sa is short for Sql Alchemy, which we use mostly for\n # queries that search messages\n topic_cond = func.upper(column(\"subject\")) == func.upper(literal(topic_name))\n return topic_cond\n\ndef topic_column_sa() -> Any:\n return column(\"subject\")\n\ndef filter_by_exact_message_topic(query: QuerySet, message: Message) -> QuerySet:\n topic_name = message.topic_name()\n return query.filter(subject=topic_name)\n\ndef filter_by_topic_name_via_message(query: QuerySet, topic_name: str) -> QuerySet:\n return query.filter(message__subject__iexact=topic_name)\n\ndef messages_for_topic(stream_id: int, topic_name: str) -> QuerySet:\n return Message.objects.filter(\n recipient__type_id=stream_id,\n subject__iexact=topic_name,\n )\n\ndef save_message_for_edit_use_case(message: Message) -> None:\n message.save(update_fields=[TOPIC_NAME, \"content\", \"rendered_content\",\n \"rendered_content_version\", \"last_edit_time\",\n \"edit_history\", \"has_attachment\", \"has_image\",\n \"has_link\"])\n\n\ndef user_message_exists_for_topic(user_profile: UserProfile,\n recipient: Recipient,\n topic_name: str) -> bool:\n return UserMessage.objects.filter(\n user_profile=user_profile,\n message__recipient=recipient,\n message__subject__iexact=topic_name,\n ).exists()\n\ndef update_messages_for_topic_edit(message: Message,\n propagate_mode: str,\n orig_topic_name: str,\n topic_name: str) -> List[Message]:\n propagate_query = Q(recipient = message.recipient, subject = orig_topic_name)\n # We only change messages up to 7 days in the past, to avoid hammering our\n # DB by changing an unbounded amount of messages\n if propagate_mode == 'change_all':\n before_bound = timezone_now() - datetime.timedelta(days=7)\n\n propagate_query = (propagate_query & ~Q(id = message.id) &\n Q(date_sent__range=(before_bound, timezone_now())))\n if propagate_mode == 'change_later':\n propagate_query = propagate_query & Q(id__gt = message.id)\n\n messages = Message.objects.filter(propagate_query).select_related()\n\n # Evaluate the query before running the update\n messages_list = list(messages)\n messages.update(subject=topic_name)\n\n for m in messages_list:\n # The cached ORM object is not changed by messages.update()\n # and the remote cache update requires the new value\n m.set_topic_name(topic_name)\n\n return messages_list\n\ndef generate_topic_history_from_db_rows(rows: List[Tuple[str, int]]) -> List[Dict[str, Any]]:\n canonical_topic_names = {} # type: Dict[str, Tuple[int, str]]\n\n # Sort rows by max_message_id so that if a topic\n # has many different casings, we use the most\n # recent row.\n rows = sorted(rows, key=lambda tup: tup[1])\n\n for (topic_name, max_message_id) in rows:\n canonical_name = topic_name.lower()\n canonical_topic_names[canonical_name] = (max_message_id, topic_name)\n\n history = []\n for canonical_topic, (max_message_id, topic_name) in canonical_topic_names.items():\n history.append(dict(\n name=topic_name,\n max_id=max_message_id)\n )\n return sorted(history, key=lambda x: -x['max_id'])\n\ndef get_topic_history_for_stream(user_profile: UserProfile,\n recipient: Recipient,\n public_history: bool) -> List[Dict[str, Any]]:\n cursor = connection.cursor()\n if public_history:\n query = '''\n SELECT\n \"zerver_message\".\"subject\" as topic,\n max(\"zerver_message\".id) as max_message_id\n FROM \"zerver_message\"\n WHERE (\n \"zerver_message\".\"recipient_id\" = %s\n )\n GROUP BY (\n \"zerver_message\".\"subject\"\n )\n ORDER BY max(\"zerver_message\".id) DESC\n '''\n cursor.execute(query, [recipient.id])\n else:\n query = '''\n SELECT\n \"zerver_message\".\"subject\" as topic,\n max(\"zerver_message\".id) as max_message_id\n FROM \"zerver_message\"\n INNER JOIN \"zerver_usermessage\" ON (\n \"zerver_usermessage\".\"message_id\" = \"zerver_message\".\"id\"\n )\n WHERE (\n \"zerver_usermessage\".\"user_profile_id\" = %s AND\n \"zerver_message\".\"recipient_id\" = %s\n )\n GROUP BY (\n \"zerver_message\".\"subject\"\n )\n ORDER BY max(\"zerver_message\".id) DESC\n '''\n cursor.execute(query, [user_profile.id, recipient.id])\n rows = cursor.fetchall()\n cursor.close()\n\n return generate_topic_history_from_db_rows(rows)\n\ndef get_topic_history_for_web_public_stream(recipient: Recipient) -> List[Dict[str, Any]]:\n cursor = connection.cursor()\n query = '''\n SELECT\n \"zerver_message\".\"subject\" as topic,\n max(\"zerver_message\".id) as max_message_id\n FROM \"zerver_message\"\n WHERE (\n \"zerver_message\".\"recipient_id\" = %s\n )\n GROUP BY (\n \"zerver_message\".\"subject\"\n )\n ORDER BY max(\"zerver_message\".id) DESC\n '''\n cursor.execute(query, [recipient.id])\n rows = cursor.fetchall()\n cursor.close()\n\n return generate_topic_history_from_db_rows(rows)\n", "path": "zerver/lib/topic.py"}]} | 2,725 | 103 |
gh_patches_debug_13100 | rasdani/github-patches | git_diff | mlflow__mlflow-10361 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace `pytest` with `dev/pytest.sh` in `mlflow/ml-package-versions.yml`
### Summary
A follow-up task for https://github.com/mlflow/mlflow/pull/10006. Replace `pytest` with `dev/pytest.sh` in `mlflow/ml-package-versions.yml` to enable the `fail-fast` behavior.
Example:
```diff
diff --git a/mlflow/ml-package-versions.yml b/mlflow/ml-package-versions.yml
index 4fdc2635d..373aa9e17 100644
--- a/mlflow/ml-package-versions.yml
+++ b/mlflow/ml-package-versions.yml
@@ -229,7 +229,7 @@ fastai:
"< 2.8.0": ["torch<1.13.0", "torchvision<0.14.0"]
">= 2.8.0": ["torch", "torchvision"]
run: |
- pytest tests/fastai/test_fastai_autolog.py
+ dev/pytest.sh tests/fastai/test_fastai_autolog.py
onnx:
package_info:
````
### Notes
- Make sure to open a PR from a **non-master** branch.
- Sign off the commit using the `-s` flag when making a commit:
```sh
git commit -s -m "..."
# ^^ make sure to use this
```
- Include `Resolve #{issue_number}` (e.g. `Resolve #123`) in the PR description when opening a PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conftest.py`
Content:
```
1 import json
2 import os
3 import posixpath
4 import shutil
5 import subprocess
6 import sys
7
8 import click
9 import pytest
10
11 from mlflow.environment_variables import _MLFLOW_TESTING, MLFLOW_TRACKING_URI
12 from mlflow.version import VERSION
13
14 from tests.helper_functions import get_safe_port
15
16
17 def pytest_addoption(parser):
18 parser.addoption(
19 "--requires-ssh",
20 action="store_true",
21 dest="requires_ssh",
22 default=False,
23 help="Run tests decorated with 'requires_ssh' annotation. "
24 "These tests require keys to be configured locally "
25 "for SSH authentication.",
26 )
27 parser.addoption(
28 "--ignore-flavors",
29 action="store_true",
30 dest="ignore_flavors",
31 default=False,
32 help="Ignore tests for model flavors.",
33 )
34 parser.addoption(
35 "--splits",
36 default=None,
37 type=int,
38 help="The number of groups to split tests into.",
39 )
40 parser.addoption(
41 "--group",
42 default=None,
43 type=int,
44 help="The group of tests to run.",
45 )
46 parser.addoption(
47 "--serve-wheel",
48 action="store_true",
49 default=os.getenv("CI", "false").lower() == "true",
50 help="Serve a wheel for the dev version of MLflow. True by default in CI, False otherwise.",
51 )
52
53
54 def pytest_configure(config):
55 # Register markers to suppress `PytestUnknownMarkWarning`
56 config.addinivalue_line("markers", "requires_ssh")
57 config.addinivalue_line("markers", "notrackingurimock")
58 config.addinivalue_line("markers", "allow_infer_pip_requirements_fallback")
59
60
61 @pytest.hookimpl(tryfirst=True)
62 def pytest_cmdline_main(config):
63 group = config.getoption("group")
64 splits = config.getoption("splits")
65
66 if splits is None and group is None:
67 return None
68
69 if splits and group is None:
70 raise pytest.UsageError("`--group` is required")
71
72 if group and splits is None:
73 raise pytest.UsageError("`--splits` is required")
74
75 if splits < 0:
76 raise pytest.UsageError("`--splits` must be >= 1")
77
78 if group < 1 or group > splits:
79 raise pytest.UsageError("`--group` must be between 1 and {splits}")
80
81 return None
82
83
84 def pytest_sessionstart(session):
85 if uri := MLFLOW_TRACKING_URI.get():
86 click.echo(
87 click.style(
88 (
89 f"Environment variable {MLFLOW_TRACKING_URI} is set to {uri!r}, "
90 "which may interfere with tests."
91 ),
92 fg="red",
93 )
94 )
95
96
97 def pytest_runtest_setup(item):
98 markers = [mark.name for mark in item.iter_markers()]
99 if "requires_ssh" in markers and not item.config.getoption("--requires-ssh"):
100 pytest.skip("use `--requires-ssh` to run this test")
101
102
103 @pytest.hookimpl(hookwrapper=True)
104 def pytest_report_teststatus(report, config):
105 outcome = yield
106 if report.when == "call":
107 try:
108 import psutil
109 except ImportError:
110 return
111
112 (*rest, result) = outcome.get_result()
113 mem = psutil.virtual_memory()
114 mem_used = mem.used / 1024**3
115 mem_total = mem.total / 1024**3
116
117 disk = psutil.disk_usage("/")
118 disk_used = disk.used / 1024**3
119 disk_total = disk.total / 1024**3
120 outcome.force_result(
121 (
122 *rest,
123 (
124 f"{result} | "
125 f"MEM {mem_used:.1f}/{mem_total:.1f} GB | "
126 f"DISK {disk_used:.1f}/{disk_total:.1f} GB"
127 ),
128 )
129 )
130
131
132 @pytest.hookimpl(hookwrapper=True)
133 def pytest_ignore_collect(path, config):
134 outcome = yield
135 if not outcome.get_result() and config.getoption("ignore_flavors"):
136 # If not ignored by the default hook and `--ignore-flavors` specified
137
138 # Ignored files and directories must be included in dev/run-python-flavor-tests.sh
139 model_flavors = [
140 # Tests of flavor modules.
141 "tests/azureml",
142 "tests/catboost",
143 "tests/diviner",
144 "tests/fastai",
145 "tests/gluon",
146 "tests/h2o",
147 "tests/johnsnowlabs",
148 "tests/keras",
149 "tests/keras_core",
150 "tests/langchain",
151 "tests/lightgbm",
152 "tests/mleap",
153 "tests/models",
154 "tests/onnx",
155 "tests/openai",
156 "tests/paddle",
157 "tests/pmdarima",
158 "tests/prophet",
159 "tests/pyfunc",
160 "tests/pytorch",
161 "tests/sagemaker",
162 "tests/sentence_transformers",
163 "tests/shap",
164 "tests/sklearn",
165 "tests/spacy",
166 "tests/spark",
167 "tests/statsmodels",
168 "tests/tensorflow",
169 "tests/transformers",
170 "tests/xgboost",
171 # Lazy loading test.
172 "tests/test_mlflow_lazily_imports_ml_packages.py",
173 # Tests of utils.
174 "tests/utils/test_model_utils.py",
175 # This test is included here because it imports many big libraries like tf, keras, etc.
176 "tests/tracking/fluent/test_fluent_autolog.py",
177 # Cross flavor autologging related tests.
178 "tests/autologging/test_autologging_safety_unit.py",
179 "tests/autologging/test_autologging_behaviors_unit.py",
180 "tests/autologging/test_autologging_behaviors_integration.py",
181 "tests/autologging/test_autologging_utils.py",
182 "tests/autologging/test_training_session.py",
183 # Opt in authentication feature.
184 "tests/server/auth",
185 "tests/gateway",
186 ]
187
188 relpath = os.path.relpath(str(path))
189 relpath = relpath.replace(os.sep, posixpath.sep) # for Windows
190
191 if relpath in model_flavors:
192 outcome.force_result(True)
193
194
195 @pytest.hookimpl(trylast=True)
196 def pytest_collection_modifyitems(session, config, items): # pylint: disable=unused-argument
197 # Executing `tests.server.test_prometheus_exporter` after `tests.server.test_handlers`
198 # results in an error because Flask >= 2.2.0 doesn't allow calling setup method such as
199 # `before_request` on the application after the first request. To avoid this issue,
200 # execute `tests.server.test_prometheus_exporter` first by reordering the test items.
201 items.sort(key=lambda item: item.module.__name__ != "tests.server.test_prometheus_exporter")
202
203 # Select the tests to run based on the group and splits
204 if (splits := config.getoption("--splits")) and (group := config.getoption("--group")):
205 items[:] = items[(group - 1) :: splits]
206
207
208 @pytest.hookimpl(hookwrapper=True)
209 def pytest_terminal_summary(
210 terminalreporter, exitstatus, config
211 ): # pylint: disable=unused-argument
212 yield
213 failed_test_reports = terminalreporter.stats.get("failed", [])
214 if failed_test_reports:
215 if len(failed_test_reports) <= 30:
216 terminalreporter.section("command to run failed test cases")
217 ids = [repr(report.nodeid) for report in failed_test_reports]
218 else:
219 terminalreporter.section("command to run failed test suites")
220 # Use dict.fromkeys to preserve the order
221 ids = list(dict.fromkeys(report.fspath for report in failed_test_reports))
222 terminalreporter.write(" ".join(["pytest"] + ids))
223 terminalreporter.write("\n" * 2)
224
225 # If some tests failed at installing mlflow, we suggest using `--serve-wheel` flag.
226 # Some test cases try to install mlflow via pip e.g. model loading. They pins
227 # mlflow version to install based on local environment i.e. dev version ahead of
228 # the latest release, hence it's not found on PyPI. `--serve-wheel` flag was
229 # introduced to resolve this issue, which starts local PyPI server and serve
230 # an mlflow wheel based on local source code.
231 # Ref: https://github.com/mlflow/mlflow/pull/10247
232 msg = f"No matching distribution found for mlflow=={VERSION}"
233 for rep in failed_test_reports:
234 if any(msg in t for t in (rep.longreprtext, rep.capstdout, rep.capstderr)):
235 terminalreporter.section("HINTS", yellow=True)
236 terminalreporter.write(
237 f"Found test(s) that failed with {msg!r}. Adding"
238 " --serve-wheel` flag to your pytest command may help.\n\n",
239 yellow=True,
240 )
241 break
242
243
244 @pytest.fixture(scope="module", autouse=True)
245 def clean_up_envs():
246 yield
247
248 if "GITHUB_ACTIONS" in os.environ:
249 from mlflow.utils.virtualenv import _get_mlflow_virtualenv_root
250
251 shutil.rmtree(_get_mlflow_virtualenv_root(), ignore_errors=True)
252 if os.name != "nt":
253 conda_info = json.loads(subprocess.check_output(["conda", "info", "--json"], text=True))
254 root_prefix = conda_info["root_prefix"]
255 for env in conda_info["envs"]:
256 if env != root_prefix:
257 shutil.rmtree(env, ignore_errors=True)
258
259
260 @pytest.fixture(scope="session", autouse=True)
261 def enable_mlflow_testing():
262 with pytest.MonkeyPatch.context() as mp:
263 mp.setenv(_MLFLOW_TESTING.name, "TRUE")
264 yield
265
266
267 @pytest.fixture(scope="session", autouse=True)
268 def serve_wheel(request, tmp_path_factory):
269 """
270 Models logged during tests have a dependency on the dev version of MLflow built from
271 source (e.g., mlflow==1.20.0.dev0) and cannot be served because the dev version is not
272 available on PyPI. This fixture serves a wheel for the dev version from a temporary
273 PyPI repository running on localhost and appends the repository URL to the
274 `PIP_EXTRA_INDEX_URL` environment variable to make the wheel available to pip.
275 """
276 if not request.config.getoption("--serve-wheel"):
277 yield # pytest expects a generator fixture to yield
278 return
279
280 root = tmp_path_factory.mktemp("root")
281 mlflow_dir = root.joinpath("mlflow")
282 mlflow_dir.mkdir()
283 port = get_safe_port()
284 try:
285 repo_root = subprocess.check_output(
286 [
287 "git",
288 "rev-parse",
289 "--show-toplevel",
290 ],
291 text=True,
292 ).strip()
293 except subprocess.CalledProcessError:
294 # Some tests run in a Docker container where git is not installed.
295 # In this case, assume we're in the root of the repo.
296 repo_root = "."
297
298 subprocess.run(
299 [
300 sys.executable,
301 "-m",
302 "pip",
303 "wheel",
304 "--wheel-dir",
305 mlflow_dir,
306 "--no-deps",
307 repo_root,
308 ],
309 check=True,
310 )
311 with subprocess.Popen(
312 [
313 sys.executable,
314 "-m",
315 "http.server",
316 str(port),
317 ],
318 cwd=root,
319 ) as prc:
320 url = f"http://localhost:{port}"
321 if existing_url := os.environ.get("PIP_EXTRA_INDEX_URL"):
322 url = f"{existing_url} {url}"
323 os.environ["PIP_EXTRA_INDEX_URL"] = url
324
325 yield
326 prc.terminate()
327
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conftest.py b/conftest.py
--- a/conftest.py
+++ b/conftest.py
@@ -100,6 +100,27 @@
pytest.skip("use `--requires-ssh` to run this test")
+def fetch_pr_labels():
+ """
+ Returns the labels associated with the current pull request.
+ """
+ if "GITHUB_ACTIONS" not in os.environ:
+ return None
+
+ if os.environ.get("GITHUB_EVENT_NAME") != "pull_request":
+ return None
+
+ with open(os.environ["GITHUB_EVENT_PATH"]) as f:
+ pr_data = json.load(f)
+ return [label["name"] for label in pr_data["pull_request"]["labels"]]
+
+
+def pytest_configure(config):
+ labels = fetch_pr_labels() or []
+ if "fail-fast" in labels:
+ config.option.maxfail = 1
+
+
@pytest.hookimpl(hookwrapper=True)
def pytest_report_teststatus(report, config):
outcome = yield
| {"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -100,6 +100,27 @@\n pytest.skip(\"use `--requires-ssh` to run this test\")\n \n \n+def fetch_pr_labels():\n+ \"\"\"\n+ Returns the labels associated with the current pull request.\n+ \"\"\"\n+ if \"GITHUB_ACTIONS\" not in os.environ:\n+ return None\n+\n+ if os.environ.get(\"GITHUB_EVENT_NAME\") != \"pull_request\":\n+ return None\n+\n+ with open(os.environ[\"GITHUB_EVENT_PATH\"]) as f:\n+ pr_data = json.load(f)\n+ return [label[\"name\"] for label in pr_data[\"pull_request\"][\"labels\"]]\n+\n+\n+def pytest_configure(config):\n+ labels = fetch_pr_labels() or []\n+ if \"fail-fast\" in labels:\n+ config.option.maxfail = 1\n+\n+\n @pytest.hookimpl(hookwrapper=True)\n def pytest_report_teststatus(report, config):\n outcome = yield\n", "issue": "Replace `pytest` with `dev/pytest.sh` in `mlflow/ml-package-versions.yml`\n### Summary\r\n\r\nA follow-up task for https://github.com/mlflow/mlflow/pull/10006. Replace `pytest` with `dev/pytest.sh` in `mlflow/ml-package-versions.yml` to enable the `fail-fast` behavior.\r\n\r\n\r\nExample:\r\n\r\n```diff\r\ndiff --git a/mlflow/ml-package-versions.yml b/mlflow/ml-package-versions.yml\r\nindex 4fdc2635d..373aa9e17 100644\r\n--- a/mlflow/ml-package-versions.yml\r\n+++ b/mlflow/ml-package-versions.yml\r\n@@ -229,7 +229,7 @@ fastai:\r\n \"< 2.8.0\": [\"torch<1.13.0\", \"torchvision<0.14.0\"]\r\n \">= 2.8.0\": [\"torch\", \"torchvision\"]\r\n run: |\r\n- pytest tests/fastai/test_fastai_autolog.py\r\n+ dev/pytest.sh tests/fastai/test_fastai_autolog.py\r\n \r\n onnx:\r\n package_info:\r\n```` \r\n\r\n### Notes\r\n\r\n- Make sure to open a PR from a **non-master** branch.\r\n- Sign off the commit using the `-s` flag when making a commit:\r\n\r\n ```sh\r\n git commit -s -m \"...\"\r\n # ^^ make sure to use this\r\n ```\r\n\r\n- Include `Resolve #{issue_number}` (e.g. `Resolve #123`) in the PR description when opening a PR.\r\n\n", "before_files": [{"content": "import json\nimport os\nimport posixpath\nimport shutil\nimport subprocess\nimport sys\n\nimport click\nimport pytest\n\nfrom mlflow.environment_variables import _MLFLOW_TESTING, MLFLOW_TRACKING_URI\nfrom mlflow.version import VERSION\n\nfrom tests.helper_functions import get_safe_port\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--requires-ssh\",\n action=\"store_true\",\n dest=\"requires_ssh\",\n default=False,\n help=\"Run tests decorated with 'requires_ssh' annotation. \"\n \"These tests require keys to be configured locally \"\n \"for SSH authentication.\",\n )\n parser.addoption(\n \"--ignore-flavors\",\n action=\"store_true\",\n dest=\"ignore_flavors\",\n default=False,\n help=\"Ignore tests for model flavors.\",\n )\n parser.addoption(\n \"--splits\",\n default=None,\n type=int,\n help=\"The number of groups to split tests into.\",\n )\n parser.addoption(\n \"--group\",\n default=None,\n type=int,\n help=\"The group of tests to run.\",\n )\n parser.addoption(\n \"--serve-wheel\",\n action=\"store_true\",\n default=os.getenv(\"CI\", \"false\").lower() == \"true\",\n help=\"Serve a wheel for the dev version of MLflow. True by default in CI, False otherwise.\",\n )\n\n\ndef pytest_configure(config):\n # Register markers to suppress `PytestUnknownMarkWarning`\n config.addinivalue_line(\"markers\", \"requires_ssh\")\n config.addinivalue_line(\"markers\", \"notrackingurimock\")\n config.addinivalue_line(\"markers\", \"allow_infer_pip_requirements_fallback\")\n\n\[email protected](tryfirst=True)\ndef pytest_cmdline_main(config):\n group = config.getoption(\"group\")\n splits = config.getoption(\"splits\")\n\n if splits is None and group is None:\n return None\n\n if splits and group is None:\n raise pytest.UsageError(\"`--group` is required\")\n\n if group and splits is None:\n raise pytest.UsageError(\"`--splits` is required\")\n\n if splits < 0:\n raise pytest.UsageError(\"`--splits` must be >= 1\")\n\n if group < 1 or group > splits:\n raise pytest.UsageError(\"`--group` must be between 1 and {splits}\")\n\n return None\n\n\ndef pytest_sessionstart(session):\n if uri := MLFLOW_TRACKING_URI.get():\n click.echo(\n click.style(\n (\n f\"Environment variable {MLFLOW_TRACKING_URI} is set to {uri!r}, \"\n \"which may interfere with tests.\"\n ),\n fg=\"red\",\n )\n )\n\n\ndef pytest_runtest_setup(item):\n markers = [mark.name for mark in item.iter_markers()]\n if \"requires_ssh\" in markers and not item.config.getoption(\"--requires-ssh\"):\n pytest.skip(\"use `--requires-ssh` to run this test\")\n\n\[email protected](hookwrapper=True)\ndef pytest_report_teststatus(report, config):\n outcome = yield\n if report.when == \"call\":\n try:\n import psutil\n except ImportError:\n return\n\n (*rest, result) = outcome.get_result()\n mem = psutil.virtual_memory()\n mem_used = mem.used / 1024**3\n mem_total = mem.total / 1024**3\n\n disk = psutil.disk_usage(\"/\")\n disk_used = disk.used / 1024**3\n disk_total = disk.total / 1024**3\n outcome.force_result(\n (\n *rest,\n (\n f\"{result} | \"\n f\"MEM {mem_used:.1f}/{mem_total:.1f} GB | \"\n f\"DISK {disk_used:.1f}/{disk_total:.1f} GB\"\n ),\n )\n )\n\n\[email protected](hookwrapper=True)\ndef pytest_ignore_collect(path, config):\n outcome = yield\n if not outcome.get_result() and config.getoption(\"ignore_flavors\"):\n # If not ignored by the default hook and `--ignore-flavors` specified\n\n # Ignored files and directories must be included in dev/run-python-flavor-tests.sh\n model_flavors = [\n # Tests of flavor modules.\n \"tests/azureml\",\n \"tests/catboost\",\n \"tests/diviner\",\n \"tests/fastai\",\n \"tests/gluon\",\n \"tests/h2o\",\n \"tests/johnsnowlabs\",\n \"tests/keras\",\n \"tests/keras_core\",\n \"tests/langchain\",\n \"tests/lightgbm\",\n \"tests/mleap\",\n \"tests/models\",\n \"tests/onnx\",\n \"tests/openai\",\n \"tests/paddle\",\n \"tests/pmdarima\",\n \"tests/prophet\",\n \"tests/pyfunc\",\n \"tests/pytorch\",\n \"tests/sagemaker\",\n \"tests/sentence_transformers\",\n \"tests/shap\",\n \"tests/sklearn\",\n \"tests/spacy\",\n \"tests/spark\",\n \"tests/statsmodels\",\n \"tests/tensorflow\",\n \"tests/transformers\",\n \"tests/xgboost\",\n # Lazy loading test.\n \"tests/test_mlflow_lazily_imports_ml_packages.py\",\n # Tests of utils.\n \"tests/utils/test_model_utils.py\",\n # This test is included here because it imports many big libraries like tf, keras, etc.\n \"tests/tracking/fluent/test_fluent_autolog.py\",\n # Cross flavor autologging related tests.\n \"tests/autologging/test_autologging_safety_unit.py\",\n \"tests/autologging/test_autologging_behaviors_unit.py\",\n \"tests/autologging/test_autologging_behaviors_integration.py\",\n \"tests/autologging/test_autologging_utils.py\",\n \"tests/autologging/test_training_session.py\",\n # Opt in authentication feature.\n \"tests/server/auth\",\n \"tests/gateway\",\n ]\n\n relpath = os.path.relpath(str(path))\n relpath = relpath.replace(os.sep, posixpath.sep) # for Windows\n\n if relpath in model_flavors:\n outcome.force_result(True)\n\n\[email protected](trylast=True)\ndef pytest_collection_modifyitems(session, config, items): # pylint: disable=unused-argument\n # Executing `tests.server.test_prometheus_exporter` after `tests.server.test_handlers`\n # results in an error because Flask >= 2.2.0 doesn't allow calling setup method such as\n # `before_request` on the application after the first request. To avoid this issue,\n # execute `tests.server.test_prometheus_exporter` first by reordering the test items.\n items.sort(key=lambda item: item.module.__name__ != \"tests.server.test_prometheus_exporter\")\n\n # Select the tests to run based on the group and splits\n if (splits := config.getoption(\"--splits\")) and (group := config.getoption(\"--group\")):\n items[:] = items[(group - 1) :: splits]\n\n\[email protected](hookwrapper=True)\ndef pytest_terminal_summary(\n terminalreporter, exitstatus, config\n): # pylint: disable=unused-argument\n yield\n failed_test_reports = terminalreporter.stats.get(\"failed\", [])\n if failed_test_reports:\n if len(failed_test_reports) <= 30:\n terminalreporter.section(\"command to run failed test cases\")\n ids = [repr(report.nodeid) for report in failed_test_reports]\n else:\n terminalreporter.section(\"command to run failed test suites\")\n # Use dict.fromkeys to preserve the order\n ids = list(dict.fromkeys(report.fspath for report in failed_test_reports))\n terminalreporter.write(\" \".join([\"pytest\"] + ids))\n terminalreporter.write(\"\\n\" * 2)\n\n # If some tests failed at installing mlflow, we suggest using `--serve-wheel` flag.\n # Some test cases try to install mlflow via pip e.g. model loading. They pins\n # mlflow version to install based on local environment i.e. dev version ahead of\n # the latest release, hence it's not found on PyPI. `--serve-wheel` flag was\n # introduced to resolve this issue, which starts local PyPI server and serve\n # an mlflow wheel based on local source code.\n # Ref: https://github.com/mlflow/mlflow/pull/10247\n msg = f\"No matching distribution found for mlflow=={VERSION}\"\n for rep in failed_test_reports:\n if any(msg in t for t in (rep.longreprtext, rep.capstdout, rep.capstderr)):\n terminalreporter.section(\"HINTS\", yellow=True)\n terminalreporter.write(\n f\"Found test(s) that failed with {msg!r}. Adding\"\n \" --serve-wheel` flag to your pytest command may help.\\n\\n\",\n yellow=True,\n )\n break\n\n\[email protected](scope=\"module\", autouse=True)\ndef clean_up_envs():\n yield\n\n if \"GITHUB_ACTIONS\" in os.environ:\n from mlflow.utils.virtualenv import _get_mlflow_virtualenv_root\n\n shutil.rmtree(_get_mlflow_virtualenv_root(), ignore_errors=True)\n if os.name != \"nt\":\n conda_info = json.loads(subprocess.check_output([\"conda\", \"info\", \"--json\"], text=True))\n root_prefix = conda_info[\"root_prefix\"]\n for env in conda_info[\"envs\"]:\n if env != root_prefix:\n shutil.rmtree(env, ignore_errors=True)\n\n\[email protected](scope=\"session\", autouse=True)\ndef enable_mlflow_testing():\n with pytest.MonkeyPatch.context() as mp:\n mp.setenv(_MLFLOW_TESTING.name, \"TRUE\")\n yield\n\n\[email protected](scope=\"session\", autouse=True)\ndef serve_wheel(request, tmp_path_factory):\n \"\"\"\n Models logged during tests have a dependency on the dev version of MLflow built from\n source (e.g., mlflow==1.20.0.dev0) and cannot be served because the dev version is not\n available on PyPI. This fixture serves a wheel for the dev version from a temporary\n PyPI repository running on localhost and appends the repository URL to the\n `PIP_EXTRA_INDEX_URL` environment variable to make the wheel available to pip.\n \"\"\"\n if not request.config.getoption(\"--serve-wheel\"):\n yield # pytest expects a generator fixture to yield\n return\n\n root = tmp_path_factory.mktemp(\"root\")\n mlflow_dir = root.joinpath(\"mlflow\")\n mlflow_dir.mkdir()\n port = get_safe_port()\n try:\n repo_root = subprocess.check_output(\n [\n \"git\",\n \"rev-parse\",\n \"--show-toplevel\",\n ],\n text=True,\n ).strip()\n except subprocess.CalledProcessError:\n # Some tests run in a Docker container where git is not installed.\n # In this case, assume we're in the root of the repo.\n repo_root = \".\"\n\n subprocess.run(\n [\n sys.executable,\n \"-m\",\n \"pip\",\n \"wheel\",\n \"--wheel-dir\",\n mlflow_dir,\n \"--no-deps\",\n repo_root,\n ],\n check=True,\n )\n with subprocess.Popen(\n [\n sys.executable,\n \"-m\",\n \"http.server\",\n str(port),\n ],\n cwd=root,\n ) as prc:\n url = f\"http://localhost:{port}\"\n if existing_url := os.environ.get(\"PIP_EXTRA_INDEX_URL\"):\n url = f\"{existing_url} {url}\"\n os.environ[\"PIP_EXTRA_INDEX_URL\"] = url\n\n yield\n prc.terminate()\n", "path": "conftest.py"}], "after_files": [{"content": "import json\nimport os\nimport posixpath\nimport shutil\nimport subprocess\nimport sys\n\nimport click\nimport pytest\n\nfrom mlflow.environment_variables import _MLFLOW_TESTING, MLFLOW_TRACKING_URI\nfrom mlflow.version import VERSION\n\nfrom tests.helper_functions import get_safe_port\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--requires-ssh\",\n action=\"store_true\",\n dest=\"requires_ssh\",\n default=False,\n help=\"Run tests decorated with 'requires_ssh' annotation. \"\n \"These tests require keys to be configured locally \"\n \"for SSH authentication.\",\n )\n parser.addoption(\n \"--ignore-flavors\",\n action=\"store_true\",\n dest=\"ignore_flavors\",\n default=False,\n help=\"Ignore tests for model flavors.\",\n )\n parser.addoption(\n \"--splits\",\n default=None,\n type=int,\n help=\"The number of groups to split tests into.\",\n )\n parser.addoption(\n \"--group\",\n default=None,\n type=int,\n help=\"The group of tests to run.\",\n )\n parser.addoption(\n \"--serve-wheel\",\n action=\"store_true\",\n default=os.getenv(\"CI\", \"false\").lower() == \"true\",\n help=\"Serve a wheel for the dev version of MLflow. True by default in CI, False otherwise.\",\n )\n\n\ndef pytest_configure(config):\n # Register markers to suppress `PytestUnknownMarkWarning`\n config.addinivalue_line(\"markers\", \"requires_ssh\")\n config.addinivalue_line(\"markers\", \"notrackingurimock\")\n config.addinivalue_line(\"markers\", \"allow_infer_pip_requirements_fallback\")\n\n\[email protected](tryfirst=True)\ndef pytest_cmdline_main(config):\n group = config.getoption(\"group\")\n splits = config.getoption(\"splits\")\n\n if splits is None and group is None:\n return None\n\n if splits and group is None:\n raise pytest.UsageError(\"`--group` is required\")\n\n if group and splits is None:\n raise pytest.UsageError(\"`--splits` is required\")\n\n if splits < 0:\n raise pytest.UsageError(\"`--splits` must be >= 1\")\n\n if group < 1 or group > splits:\n raise pytest.UsageError(\"`--group` must be between 1 and {splits}\")\n\n return None\n\n\ndef pytest_sessionstart(session):\n if uri := MLFLOW_TRACKING_URI.get():\n click.echo(\n click.style(\n (\n f\"Environment variable {MLFLOW_TRACKING_URI} is set to {uri!r}, \"\n \"which may interfere with tests.\"\n ),\n fg=\"red\",\n )\n )\n\n\ndef pytest_runtest_setup(item):\n markers = [mark.name for mark in item.iter_markers()]\n if \"requires_ssh\" in markers and not item.config.getoption(\"--requires-ssh\"):\n pytest.skip(\"use `--requires-ssh` to run this test\")\n\n\ndef fetch_pr_labels():\n \"\"\"\n Returns the labels associated with the current pull request.\n \"\"\"\n if \"GITHUB_ACTIONS\" not in os.environ:\n return None\n\n if os.environ.get(\"GITHUB_EVENT_NAME\") != \"pull_request\":\n return None\n\n with open(os.environ[\"GITHUB_EVENT_PATH\"]) as f:\n pr_data = json.load(f)\n return [label[\"name\"] for label in pr_data[\"pull_request\"][\"labels\"]]\n\n\ndef pytest_configure(config):\n labels = fetch_pr_labels() or []\n if \"fail-fast\" in labels:\n config.option.maxfail = 1\n\n\[email protected](hookwrapper=True)\ndef pytest_report_teststatus(report, config):\n outcome = yield\n if report.when == \"call\":\n try:\n import psutil\n except ImportError:\n return\n\n (*rest, result) = outcome.get_result()\n mem = psutil.virtual_memory()\n mem_used = mem.used / 1024**3\n mem_total = mem.total / 1024**3\n\n disk = psutil.disk_usage(\"/\")\n disk_used = disk.used / 1024**3\n disk_total = disk.total / 1024**3\n outcome.force_result(\n (\n *rest,\n (\n f\"{result} | \"\n f\"MEM {mem_used:.1f}/{mem_total:.1f} GB | \"\n f\"DISK {disk_used:.1f}/{disk_total:.1f} GB\"\n ),\n )\n )\n\n\[email protected](hookwrapper=True)\ndef pytest_ignore_collect(path, config):\n outcome = yield\n if not outcome.get_result() and config.getoption(\"ignore_flavors\"):\n # If not ignored by the default hook and `--ignore-flavors` specified\n\n # Ignored files and directories must be included in dev/run-python-flavor-tests.sh\n model_flavors = [\n # Tests of flavor modules.\n \"tests/azureml\",\n \"tests/catboost\",\n \"tests/diviner\",\n \"tests/fastai\",\n \"tests/gluon\",\n \"tests/h2o\",\n \"tests/johnsnowlabs\",\n \"tests/keras\",\n \"tests/keras_core\",\n \"tests/langchain\",\n \"tests/lightgbm\",\n \"tests/mleap\",\n \"tests/models\",\n \"tests/onnx\",\n \"tests/openai\",\n \"tests/paddle\",\n \"tests/pmdarima\",\n \"tests/prophet\",\n \"tests/pyfunc\",\n \"tests/pytorch\",\n \"tests/sagemaker\",\n \"tests/sentence_transformers\",\n \"tests/shap\",\n \"tests/sklearn\",\n \"tests/spacy\",\n \"tests/spark\",\n \"tests/statsmodels\",\n \"tests/tensorflow\",\n \"tests/transformers\",\n \"tests/xgboost\",\n # Lazy loading test.\n \"tests/test_mlflow_lazily_imports_ml_packages.py\",\n # Tests of utils.\n \"tests/utils/test_model_utils.py\",\n # This test is included here because it imports many big libraries like tf, keras, etc.\n \"tests/tracking/fluent/test_fluent_autolog.py\",\n # Cross flavor autologging related tests.\n \"tests/autologging/test_autologging_safety_unit.py\",\n \"tests/autologging/test_autologging_behaviors_unit.py\",\n \"tests/autologging/test_autologging_behaviors_integration.py\",\n \"tests/autologging/test_autologging_utils.py\",\n \"tests/autologging/test_training_session.py\",\n # Opt in authentication feature.\n \"tests/server/auth\",\n \"tests/gateway\",\n ]\n\n relpath = os.path.relpath(str(path))\n relpath = relpath.replace(os.sep, posixpath.sep) # for Windows\n\n if relpath in model_flavors:\n outcome.force_result(True)\n\n\[email protected](trylast=True)\ndef pytest_collection_modifyitems(session, config, items): # pylint: disable=unused-argument\n # Executing `tests.server.test_prometheus_exporter` after `tests.server.test_handlers`\n # results in an error because Flask >= 2.2.0 doesn't allow calling setup method such as\n # `before_request` on the application after the first request. To avoid this issue,\n # execute `tests.server.test_prometheus_exporter` first by reordering the test items.\n items.sort(key=lambda item: item.module.__name__ != \"tests.server.test_prometheus_exporter\")\n\n # Select the tests to run based on the group and splits\n if (splits := config.getoption(\"--splits\")) and (group := config.getoption(\"--group\")):\n items[:] = items[(group - 1) :: splits]\n\n\[email protected](hookwrapper=True)\ndef pytest_terminal_summary(\n terminalreporter, exitstatus, config\n): # pylint: disable=unused-argument\n yield\n failed_test_reports = terminalreporter.stats.get(\"failed\", [])\n if failed_test_reports:\n if len(failed_test_reports) <= 30:\n terminalreporter.section(\"command to run failed test cases\")\n ids = [repr(report.nodeid) for report in failed_test_reports]\n else:\n terminalreporter.section(\"command to run failed test suites\")\n # Use dict.fromkeys to preserve the order\n ids = list(dict.fromkeys(report.fspath for report in failed_test_reports))\n terminalreporter.write(\" \".join([\"pytest\"] + ids))\n terminalreporter.write(\"\\n\" * 2)\n\n # If some tests failed at installing mlflow, we suggest using `--serve-wheel` flag.\n # Some test cases try to install mlflow via pip e.g. model loading. They pins\n # mlflow version to install based on local environment i.e. dev version ahead of\n # the latest release, hence it's not found on PyPI. `--serve-wheel` flag was\n # introduced to resolve this issue, which starts local PyPI server and serve\n # an mlflow wheel based on local source code.\n # Ref: https://github.com/mlflow/mlflow/pull/10247\n msg = f\"No matching distribution found for mlflow=={VERSION}\"\n for rep in failed_test_reports:\n if any(msg in t for t in (rep.longreprtext, rep.capstdout, rep.capstderr)):\n terminalreporter.section(\"HINTS\", yellow=True)\n terminalreporter.write(\n f\"Found test(s) that failed with {msg!r}. Adding\"\n \" --serve-wheel` flag to your pytest command may help.\\n\\n\",\n yellow=True,\n )\n break\n\n\[email protected](scope=\"module\", autouse=True)\ndef clean_up_envs():\n yield\n\n if \"GITHUB_ACTIONS\" in os.environ:\n from mlflow.utils.virtualenv import _get_mlflow_virtualenv_root\n\n shutil.rmtree(_get_mlflow_virtualenv_root(), ignore_errors=True)\n if os.name != \"nt\":\n conda_info = json.loads(subprocess.check_output([\"conda\", \"info\", \"--json\"], text=True))\n root_prefix = conda_info[\"root_prefix\"]\n for env in conda_info[\"envs\"]:\n if env != root_prefix:\n shutil.rmtree(env, ignore_errors=True)\n\n\[email protected](scope=\"session\", autouse=True)\ndef enable_mlflow_testing():\n with pytest.MonkeyPatch.context() as mp:\n mp.setenv(_MLFLOW_TESTING.name, \"TRUE\")\n yield\n\n\[email protected](scope=\"session\", autouse=True)\ndef serve_wheel(request, tmp_path_factory):\n \"\"\"\n Models logged during tests have a dependency on the dev version of MLflow built from\n source (e.g., mlflow==1.20.0.dev0) and cannot be served because the dev version is not\n available on PyPI. This fixture serves a wheel for the dev version from a temporary\n PyPI repository running on localhost and appends the repository URL to the\n `PIP_EXTRA_INDEX_URL` environment variable to make the wheel available to pip.\n \"\"\"\n if not request.config.getoption(\"--serve-wheel\"):\n yield # pytest expects a generator fixture to yield\n return\n\n root = tmp_path_factory.mktemp(\"root\")\n mlflow_dir = root.joinpath(\"mlflow\")\n mlflow_dir.mkdir()\n port = get_safe_port()\n try:\n repo_root = subprocess.check_output(\n [\n \"git\",\n \"rev-parse\",\n \"--show-toplevel\",\n ],\n text=True,\n ).strip()\n except subprocess.CalledProcessError:\n # Some tests run in a Docker container where git is not installed.\n # In this case, assume we're in the root of the repo.\n repo_root = \".\"\n\n subprocess.run(\n [\n sys.executable,\n \"-m\",\n \"pip\",\n \"wheel\",\n \"--wheel-dir\",\n mlflow_dir,\n \"--no-deps\",\n repo_root,\n ],\n check=True,\n )\n with subprocess.Popen(\n [\n sys.executable,\n \"-m\",\n \"http.server\",\n str(port),\n ],\n cwd=root,\n ) as prc:\n url = f\"http://localhost:{port}\"\n if existing_url := os.environ.get(\"PIP_EXTRA_INDEX_URL\"):\n url = f\"{existing_url} {url}\"\n os.environ[\"PIP_EXTRA_INDEX_URL\"] = url\n\n yield\n prc.terminate()\n", "path": "conftest.py"}]} | 4,038 | 233 |
gh_patches_debug_15871 | rasdani/github-patches | git_diff | Kinto__kinto-1405 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Memcached to dockerfile and docker compose setup.
Following the addition of Memcached as a cache backend.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/core/__init__.py`
Content:
```
1 """Main entry point
2 """
3 import logging
4 import pkg_resources
5 import tempfile
6
7 from cornice import Service as CorniceService
8 from dockerflow import logging as dockerflow_logging
9 from pyramid.settings import aslist
10
11 from kinto.core import errors
12 from kinto.core import events
13 from kinto.core.initialization import ( # NOQA
14 initialize, install_middlewares,
15 load_default_settings)
16 from kinto.core.utils import (
17 follow_subrequest, current_service, current_resource_name,
18 prefixed_userid, prefixed_principals, log_context)
19
20
21 logger = logging.getLogger(__name__)
22
23
24 # Module version, as defined in PEP-0396.
25 __version__ = pkg_resources.get_distribution('kinto').version # FIXME?
26
27 DEFAULT_SETTINGS = {
28 'backoff': None,
29 'batch_max_requests': 25,
30 'cache_backend': '',
31 'cache_url': '',
32 'cache_pool_size': 25,
33 'cache_prefix': '',
34 'cache_max_size_bytes': 524288,
35 'cors_origins': '*',
36 'cors_max_age_seconds': 3600,
37 'eos': None,
38 'eos_message': None,
39 'eos_url': None,
40 'error_info_link': 'https://github.com/Kinto/kinto/issues/',
41 'http_host': None,
42 'http_scheme': None,
43 'id_generator': 'kinto.core.storage.generators.UUID4',
44 'includes': '',
45 'initialization_sequence': (
46 'kinto.core.initialization.setup_request_bound_data',
47 'kinto.core.initialization.setup_json_serializer',
48 'kinto.core.initialization.setup_logging',
49 'kinto.core.initialization.setup_storage',
50 'kinto.core.initialization.setup_permission',
51 'kinto.core.initialization.setup_cache',
52 'kinto.core.initialization.setup_requests_scheme',
53 'kinto.core.initialization.setup_version_redirection',
54 'kinto.core.initialization.setup_deprecation',
55 'kinto.core.initialization.setup_authentication',
56 'kinto.core.initialization.setup_backoff',
57 'kinto.core.initialization.setup_statsd',
58 'kinto.core.initialization.setup_listeners',
59 'kinto.core.events.setup_transaction_hook',
60 ),
61 'event_listeners': '',
62 'heartbeat_timeout_seconds': 10,
63 'newrelic_config': None,
64 'newrelic_env': 'dev',
65 'paginate_by': None,
66 'pagination_token_validity_seconds': 10 * 60,
67 'permission_backend': '',
68 'permission_url': '',
69 'permission_pool_size': 25,
70 'profiler_dir': tempfile.gettempdir(),
71 'profiler_enabled': False,
72 'project_docs': '',
73 'project_name': '',
74 'project_version': '',
75 'readonly': False,
76 'retry_after_seconds': 30,
77 'statsd_backend': 'kinto.core.statsd',
78 'statsd_prefix': 'kinto.core',
79 'statsd_url': None,
80 'storage_backend': '',
81 'storage_url': '',
82 'storage_max_fetch_size': 10000,
83 'storage_pool_size': 25,
84 'tm.annotate_user': False, # Do annotate transactions with the user-id.
85 'transaction_per_request': True,
86 'userid_hmac_secret': '',
87 'version_json_path': 'version.json',
88 'version_prefix_redirect_enabled': True,
89 'trailing_slash_redirect_enabled': True,
90 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',
91 'multiauth.policies': 'basicauth',
92 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'
93 'BasicAuthAuthenticationPolicy'),
94 'multiauth.authorization_policy': ('kinto.core.authorization.'
95 'AuthorizationPolicy'),
96 }
97
98
99 class Service(CorniceService):
100 """Subclass of the default cornice service.
101
102 This is useful in order to attach specific behaviours without monkey
103 patching the default cornice service (which would impact other uses of it)
104 """
105 default_cors_headers = ('Backoff', 'Retry-After', 'Alert',
106 'Content-Length')
107
108 def error_handler(self, request):
109 return errors.json_error_handler(request)
110
111 @classmethod
112 def init_from_settings(cls, settings):
113 cls.cors_origins = tuple(aslist(settings['cors_origins']))
114 cors_max_age = settings['cors_max_age_seconds']
115 cls.cors_max_age = int(cors_max_age) if cors_max_age else None
116
117
118 class JsonLogFormatter(dockerflow_logging.JsonLogFormatter):
119 logger_name = 'kinto'
120
121 @classmethod
122 def init_from_settings(cls, settings):
123 cls.logger_name = settings['project_name']
124
125 def __init__(self, fmt=None, datefmt=None, style='%'):
126 # Do not let mozilla-cloud-services-logger constructor to improperly
127 # use style as the logger_name.
128 # See https://github.com/mozilla/mozilla-cloud-services-logger/issues/3
129 logger_name = self.logger_name
130 super().__init__(fmt, datefmt, style)
131 self.logger_name = logger_name
132
133
134 def includeme(config):
135 settings = config.get_settings()
136
137 # Heartbeat registry.
138 config.registry.heartbeats = {}
139
140 # Public settings registry.
141 config.registry.public_settings = {'batch_max_requests', 'readonly'}
142
143 # Directive to declare arbitrary API capabilities.
144 def add_api_capability(config, identifier, description='', url='', **kw):
145 existing = config.registry.api_capabilities.get(identifier)
146 if existing:
147 error_msg = "The '{}' API capability was already registered ({})."
148 raise ValueError(error_msg.format(identifier, existing))
149
150 capability = dict(description=description, url=url, **kw)
151 config.registry.api_capabilities[identifier] = capability
152
153 config.add_directive('add_api_capability', add_api_capability)
154 config.registry.api_capabilities = {}
155
156 # Resource events helpers.
157 config.add_request_method(events.get_resource_events,
158 name='get_resource_events')
159 config.add_request_method(events.notify_resource_event,
160 name='notify_resource_event')
161
162 # Setup cornice.
163 config.include('cornice')
164
165 # Setup cornice api documentation
166 config.include('cornice_swagger')
167
168 # Per-request transaction.
169 config.include('pyramid_tm')
170
171 # Add CORS settings to the base kinto.core Service class.
172 Service.init_from_settings(settings)
173
174 # Use the project name as the main logger name (Logger field in MozLog).
175 JsonLogFormatter.init_from_settings(settings)
176
177 # Setup components.
178 for step in aslist(settings['initialization_sequence']):
179 step_func = config.maybe_dotted(step)
180 step_func(config)
181
182 # Custom helpers.
183 config.add_request_method(log_context)
184 config.add_request_method(follow_subrequest)
185 config.add_request_method(prefixed_userid, property=True)
186 config.add_request_method(prefixed_principals, reify=True)
187 config.add_request_method(lambda r: {
188 'id': r.prefixed_userid,
189 'principals': r.prefixed_principals},
190 name='get_user_info')
191 config.add_request_method(current_resource_name, reify=True)
192 config.add_request_method(current_service, reify=True)
193 config.commit()
194
195 # Include plugins after init, unlike pyramid includes.
196 includes = aslist(settings['includes'])
197 for app in includes:
198 config.include(app)
199
200 # # Show settings to output.
201 # for key, value in settings.items():
202 # logger.info('Using {} = {}'.format(key, value))
203
204 # Scan views.
205 config.scan('kinto.core.views')
206
207 # Give sign of life.
208 msg = 'Running {project_name} {project_version}.'
209 logger.info(msg.format_map(settings))
210
```
Path: `kinto/core/storage/postgresql/client.py`
Content:
```
1 import contextlib
2 import logging
3 import warnings
4 from collections import defaultdict
5
6 from kinto.core.storage import exceptions
7 from kinto.core.utils import sqlalchemy
8 import transaction as zope_transaction
9
10
11 logger = logging.getLogger(__name__)
12
13
14 class PostgreSQLClient:
15 def __init__(self, session_factory, commit_manually, invalidate):
16 self.session_factory = session_factory
17 self.commit_manually = commit_manually
18 self.invalidate = invalidate
19
20 @contextlib.contextmanager
21 def connect(self, readonly=False, force_commit=False):
22 """
23 Pulls a connection from the pool when context is entered and
24 returns it when context is exited.
25
26 A COMMIT is performed on the current transaction if everything went
27 well. Otherwise transaction is ROLLBACK, and everything cleaned up.
28 """
29 commit_manually = self.commit_manually and not readonly
30 session = None
31 try:
32 # Pull connection from pool.
33 session = self.session_factory()
34 # Start context
35 yield session
36 if not readonly and not self.commit_manually:
37 # Mark session as dirty.
38 self.invalidate(session)
39 # Success
40 if commit_manually:
41 session.commit()
42 elif force_commit:
43 # Commit like would do a succesful request.
44 zope_transaction.commit()
45
46 except sqlalchemy.exc.IntegrityError as e:
47 logger.error(e, exc_info=True)
48 if commit_manually: # pragma: no branch
49 session.rollback()
50 raise exceptions.IntegrityError(original=e) from e
51 except sqlalchemy.exc.SQLAlchemyError as e:
52 logger.error(e, exc_info=True)
53 if session and commit_manually:
54 session.rollback()
55 raise exceptions.BackendError(original=e) from e
56 finally:
57 if session and self.commit_manually:
58 # Give back to pool if commit done manually.
59 session.close()
60
61
62 # Reuse existing client if same URL.
63 _CLIENTS = defaultdict(dict)
64
65
66 def create_from_config(config, prefix='', with_transaction=True):
67 """Create a PostgreSQLClient client using settings in the provided config.
68 """
69 if sqlalchemy is None:
70 message = ('PostgreSQL SQLAlchemy dependency missing. '
71 'Refer to installation section in documentation.')
72 raise ImportWarning(message)
73
74 from zope.sqlalchemy import ZopeTransactionExtension, invalidate
75 from sqlalchemy.orm import sessionmaker, scoped_session
76
77 settings = {**config.get_settings()}
78 # Custom Kinto settings, unsupported by SQLAlchemy.
79 settings.pop(prefix + 'backend', None)
80 settings.pop(prefix + 'max_fetch_size', None)
81 settings.pop(prefix + 'max_size_bytes', None)
82 settings.pop(prefix + 'prefix', None)
83 settings.pop(prefix + 'strict_json', None)
84 transaction_per_request = with_transaction and settings.pop('transaction_per_request', False)
85
86 url = settings[prefix + 'url']
87 existing_client = _CLIENTS[transaction_per_request].get(url)
88 if existing_client:
89 msg = ('Reuse existing PostgreSQL connection. '
90 'Parameters {}* will be ignored.'.format(prefix))
91 warnings.warn(msg)
92 return existing_client
93
94 # Initialize SQLAlchemy engine from settings.
95 poolclass_key = prefix + 'poolclass'
96 settings.setdefault(poolclass_key, ('kinto.core.storage.postgresql.'
97 'pool.QueuePoolWithMaxBacklog'))
98 settings[poolclass_key] = config.maybe_dotted(settings[poolclass_key])
99 engine = sqlalchemy.engine_from_config(settings, prefix=prefix, url=url)
100
101 # Initialize thread-safe session factory.
102 options = {}
103 if transaction_per_request:
104 # Plug with Pyramid transaction manager
105 options['extension'] = ZopeTransactionExtension()
106 session_factory = scoped_session(sessionmaker(bind=engine, **options))
107
108 # Store one client per URI.
109 commit_manually = (not transaction_per_request)
110 client = PostgreSQLClient(session_factory, commit_manually, invalidate)
111 _CLIENTS[transaction_per_request][url] = client
112 return client
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py
--- a/kinto/core/__init__.py
+++ b/kinto/core/__init__.py
@@ -28,6 +28,7 @@
'backoff': None,
'batch_max_requests': 25,
'cache_backend': '',
+ 'cache_hosts': '',
'cache_url': '',
'cache_pool_size': 25,
'cache_prefix': '',
diff --git a/kinto/core/storage/postgresql/client.py b/kinto/core/storage/postgresql/client.py
--- a/kinto/core/storage/postgresql/client.py
+++ b/kinto/core/storage/postgresql/client.py
@@ -81,6 +81,7 @@
settings.pop(prefix + 'max_size_bytes', None)
settings.pop(prefix + 'prefix', None)
settings.pop(prefix + 'strict_json', None)
+ settings.pop(prefix + 'hosts', None)
transaction_per_request = with_transaction and settings.pop('transaction_per_request', False)
url = settings[prefix + 'url']
| {"golden_diff": "diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py\n--- a/kinto/core/__init__.py\n+++ b/kinto/core/__init__.py\n@@ -28,6 +28,7 @@\n 'backoff': None,\n 'batch_max_requests': 25,\n 'cache_backend': '',\n+ 'cache_hosts': '',\n 'cache_url': '',\n 'cache_pool_size': 25,\n 'cache_prefix': '',\ndiff --git a/kinto/core/storage/postgresql/client.py b/kinto/core/storage/postgresql/client.py\n--- a/kinto/core/storage/postgresql/client.py\n+++ b/kinto/core/storage/postgresql/client.py\n@@ -81,6 +81,7 @@\n settings.pop(prefix + 'max_size_bytes', None)\n settings.pop(prefix + 'prefix', None)\n settings.pop(prefix + 'strict_json', None)\n+ settings.pop(prefix + 'hosts', None)\n transaction_per_request = with_transaction and settings.pop('transaction_per_request', False)\n \n url = settings[prefix + 'url']\n", "issue": "Add Memcached to dockerfile and docker compose setup.\nFollowing the addition of Memcached as a cache backend.\n", "before_files": [{"content": "\"\"\"Main entry point\n\"\"\"\nimport logging\nimport pkg_resources\nimport tempfile\n\nfrom cornice import Service as CorniceService\nfrom dockerflow import logging as dockerflow_logging\nfrom pyramid.settings import aslist\n\nfrom kinto.core import errors\nfrom kinto.core import events\nfrom kinto.core.initialization import ( # NOQA\n initialize, install_middlewares,\n load_default_settings)\nfrom kinto.core.utils import (\n follow_subrequest, current_service, current_resource_name,\n prefixed_userid, prefixed_principals, log_context)\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution('kinto').version # FIXME?\n\nDEFAULT_SETTINGS = {\n 'backoff': None,\n 'batch_max_requests': 25,\n 'cache_backend': '',\n 'cache_url': '',\n 'cache_pool_size': 25,\n 'cache_prefix': '',\n 'cache_max_size_bytes': 524288,\n 'cors_origins': '*',\n 'cors_max_age_seconds': 3600,\n 'eos': None,\n 'eos_message': None,\n 'eos_url': None,\n 'error_info_link': 'https://github.com/Kinto/kinto/issues/',\n 'http_host': None,\n 'http_scheme': None,\n 'id_generator': 'kinto.core.storage.generators.UUID4',\n 'includes': '',\n 'initialization_sequence': (\n 'kinto.core.initialization.setup_request_bound_data',\n 'kinto.core.initialization.setup_json_serializer',\n 'kinto.core.initialization.setup_logging',\n 'kinto.core.initialization.setup_storage',\n 'kinto.core.initialization.setup_permission',\n 'kinto.core.initialization.setup_cache',\n 'kinto.core.initialization.setup_requests_scheme',\n 'kinto.core.initialization.setup_version_redirection',\n 'kinto.core.initialization.setup_deprecation',\n 'kinto.core.initialization.setup_authentication',\n 'kinto.core.initialization.setup_backoff',\n 'kinto.core.initialization.setup_statsd',\n 'kinto.core.initialization.setup_listeners',\n 'kinto.core.events.setup_transaction_hook',\n ),\n 'event_listeners': '',\n 'heartbeat_timeout_seconds': 10,\n 'newrelic_config': None,\n 'newrelic_env': 'dev',\n 'paginate_by': None,\n 'pagination_token_validity_seconds': 10 * 60,\n 'permission_backend': '',\n 'permission_url': '',\n 'permission_pool_size': 25,\n 'profiler_dir': tempfile.gettempdir(),\n 'profiler_enabled': False,\n 'project_docs': '',\n 'project_name': '',\n 'project_version': '',\n 'readonly': False,\n 'retry_after_seconds': 30,\n 'statsd_backend': 'kinto.core.statsd',\n 'statsd_prefix': 'kinto.core',\n 'statsd_url': None,\n 'storage_backend': '',\n 'storage_url': '',\n 'storage_max_fetch_size': 10000,\n 'storage_pool_size': 25,\n 'tm.annotate_user': False, # Do annotate transactions with the user-id.\n 'transaction_per_request': True,\n 'userid_hmac_secret': '',\n 'version_json_path': 'version.json',\n 'version_prefix_redirect_enabled': True,\n 'trailing_slash_redirect_enabled': True,\n 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',\n 'multiauth.policies': 'basicauth',\n 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'\n 'BasicAuthAuthenticationPolicy'),\n 'multiauth.authorization_policy': ('kinto.core.authorization.'\n 'AuthorizationPolicy'),\n}\n\n\nclass Service(CorniceService):\n \"\"\"Subclass of the default cornice service.\n\n This is useful in order to attach specific behaviours without monkey\n patching the default cornice service (which would impact other uses of it)\n \"\"\"\n default_cors_headers = ('Backoff', 'Retry-After', 'Alert',\n 'Content-Length')\n\n def error_handler(self, request):\n return errors.json_error_handler(request)\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.cors_origins = tuple(aslist(settings['cors_origins']))\n cors_max_age = settings['cors_max_age_seconds']\n cls.cors_max_age = int(cors_max_age) if cors_max_age else None\n\n\nclass JsonLogFormatter(dockerflow_logging.JsonLogFormatter):\n logger_name = 'kinto'\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.logger_name = settings['project_name']\n\n def __init__(self, fmt=None, datefmt=None, style='%'):\n # Do not let mozilla-cloud-services-logger constructor to improperly\n # use style as the logger_name.\n # See https://github.com/mozilla/mozilla-cloud-services-logger/issues/3\n logger_name = self.logger_name\n super().__init__(fmt, datefmt, style)\n self.logger_name = logger_name\n\n\ndef includeme(config):\n settings = config.get_settings()\n\n # Heartbeat registry.\n config.registry.heartbeats = {}\n\n # Public settings registry.\n config.registry.public_settings = {'batch_max_requests', 'readonly'}\n\n # Directive to declare arbitrary API capabilities.\n def add_api_capability(config, identifier, description='', url='', **kw):\n existing = config.registry.api_capabilities.get(identifier)\n if existing:\n error_msg = \"The '{}' API capability was already registered ({}).\"\n raise ValueError(error_msg.format(identifier, existing))\n\n capability = dict(description=description, url=url, **kw)\n config.registry.api_capabilities[identifier] = capability\n\n config.add_directive('add_api_capability', add_api_capability)\n config.registry.api_capabilities = {}\n\n # Resource events helpers.\n config.add_request_method(events.get_resource_events,\n name='get_resource_events')\n config.add_request_method(events.notify_resource_event,\n name='notify_resource_event')\n\n # Setup cornice.\n config.include('cornice')\n\n # Setup cornice api documentation\n config.include('cornice_swagger')\n\n # Per-request transaction.\n config.include('pyramid_tm')\n\n # Add CORS settings to the base kinto.core Service class.\n Service.init_from_settings(settings)\n\n # Use the project name as the main logger name (Logger field in MozLog).\n JsonLogFormatter.init_from_settings(settings)\n\n # Setup components.\n for step in aslist(settings['initialization_sequence']):\n step_func = config.maybe_dotted(step)\n step_func(config)\n\n # Custom helpers.\n config.add_request_method(log_context)\n config.add_request_method(follow_subrequest)\n config.add_request_method(prefixed_userid, property=True)\n config.add_request_method(prefixed_principals, reify=True)\n config.add_request_method(lambda r: {\n 'id': r.prefixed_userid,\n 'principals': r.prefixed_principals},\n name='get_user_info')\n config.add_request_method(current_resource_name, reify=True)\n config.add_request_method(current_service, reify=True)\n config.commit()\n\n # Include plugins after init, unlike pyramid includes.\n includes = aslist(settings['includes'])\n for app in includes:\n config.include(app)\n\n # # Show settings to output.\n # for key, value in settings.items():\n # logger.info('Using {} = {}'.format(key, value))\n\n # Scan views.\n config.scan('kinto.core.views')\n\n # Give sign of life.\n msg = 'Running {project_name} {project_version}.'\n logger.info(msg.format_map(settings))\n", "path": "kinto/core/__init__.py"}, {"content": "import contextlib\nimport logging\nimport warnings\nfrom collections import defaultdict\n\nfrom kinto.core.storage import exceptions\nfrom kinto.core.utils import sqlalchemy\nimport transaction as zope_transaction\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PostgreSQLClient:\n def __init__(self, session_factory, commit_manually, invalidate):\n self.session_factory = session_factory\n self.commit_manually = commit_manually\n self.invalidate = invalidate\n\n @contextlib.contextmanager\n def connect(self, readonly=False, force_commit=False):\n \"\"\"\n Pulls a connection from the pool when context is entered and\n returns it when context is exited.\n\n A COMMIT is performed on the current transaction if everything went\n well. Otherwise transaction is ROLLBACK, and everything cleaned up.\n \"\"\"\n commit_manually = self.commit_manually and not readonly\n session = None\n try:\n # Pull connection from pool.\n session = self.session_factory()\n # Start context\n yield session\n if not readonly and not self.commit_manually:\n # Mark session as dirty.\n self.invalidate(session)\n # Success\n if commit_manually:\n session.commit()\n elif force_commit:\n # Commit like would do a succesful request.\n zope_transaction.commit()\n\n except sqlalchemy.exc.IntegrityError as e:\n logger.error(e, exc_info=True)\n if commit_manually: # pragma: no branch\n session.rollback()\n raise exceptions.IntegrityError(original=e) from e\n except sqlalchemy.exc.SQLAlchemyError as e:\n logger.error(e, exc_info=True)\n if session and commit_manually:\n session.rollback()\n raise exceptions.BackendError(original=e) from e\n finally:\n if session and self.commit_manually:\n # Give back to pool if commit done manually.\n session.close()\n\n\n# Reuse existing client if same URL.\n_CLIENTS = defaultdict(dict)\n\n\ndef create_from_config(config, prefix='', with_transaction=True):\n \"\"\"Create a PostgreSQLClient client using settings in the provided config.\n \"\"\"\n if sqlalchemy is None:\n message = ('PostgreSQL SQLAlchemy dependency missing. '\n 'Refer to installation section in documentation.')\n raise ImportWarning(message)\n\n from zope.sqlalchemy import ZopeTransactionExtension, invalidate\n from sqlalchemy.orm import sessionmaker, scoped_session\n\n settings = {**config.get_settings()}\n # Custom Kinto settings, unsupported by SQLAlchemy.\n settings.pop(prefix + 'backend', None)\n settings.pop(prefix + 'max_fetch_size', None)\n settings.pop(prefix + 'max_size_bytes', None)\n settings.pop(prefix + 'prefix', None)\n settings.pop(prefix + 'strict_json', None)\n transaction_per_request = with_transaction and settings.pop('transaction_per_request', False)\n\n url = settings[prefix + 'url']\n existing_client = _CLIENTS[transaction_per_request].get(url)\n if existing_client:\n msg = ('Reuse existing PostgreSQL connection. '\n 'Parameters {}* will be ignored.'.format(prefix))\n warnings.warn(msg)\n return existing_client\n\n # Initialize SQLAlchemy engine from settings.\n poolclass_key = prefix + 'poolclass'\n settings.setdefault(poolclass_key, ('kinto.core.storage.postgresql.'\n 'pool.QueuePoolWithMaxBacklog'))\n settings[poolclass_key] = config.maybe_dotted(settings[poolclass_key])\n engine = sqlalchemy.engine_from_config(settings, prefix=prefix, url=url)\n\n # Initialize thread-safe session factory.\n options = {}\n if transaction_per_request:\n # Plug with Pyramid transaction manager\n options['extension'] = ZopeTransactionExtension()\n session_factory = scoped_session(sessionmaker(bind=engine, **options))\n\n # Store one client per URI.\n commit_manually = (not transaction_per_request)\n client = PostgreSQLClient(session_factory, commit_manually, invalidate)\n _CLIENTS[transaction_per_request][url] = client\n return client\n", "path": "kinto/core/storage/postgresql/client.py"}], "after_files": [{"content": "\"\"\"Main entry point\n\"\"\"\nimport logging\nimport pkg_resources\nimport tempfile\n\nfrom cornice import Service as CorniceService\nfrom dockerflow import logging as dockerflow_logging\nfrom pyramid.settings import aslist\n\nfrom kinto.core import errors\nfrom kinto.core import events\nfrom kinto.core.initialization import ( # NOQA\n initialize, install_middlewares,\n load_default_settings)\nfrom kinto.core.utils import (\n follow_subrequest, current_service, current_resource_name,\n prefixed_userid, prefixed_principals, log_context)\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution('kinto').version # FIXME?\n\nDEFAULT_SETTINGS = {\n 'backoff': None,\n 'batch_max_requests': 25,\n 'cache_backend': '',\n 'cache_hosts': '',\n 'cache_url': '',\n 'cache_pool_size': 25,\n 'cache_prefix': '',\n 'cache_max_size_bytes': 524288,\n 'cors_origins': '*',\n 'cors_max_age_seconds': 3600,\n 'eos': None,\n 'eos_message': None,\n 'eos_url': None,\n 'error_info_link': 'https://github.com/Kinto/kinto/issues/',\n 'http_host': None,\n 'http_scheme': None,\n 'id_generator': 'kinto.core.storage.generators.UUID4',\n 'includes': '',\n 'initialization_sequence': (\n 'kinto.core.initialization.setup_request_bound_data',\n 'kinto.core.initialization.setup_json_serializer',\n 'kinto.core.initialization.setup_logging',\n 'kinto.core.initialization.setup_storage',\n 'kinto.core.initialization.setup_permission',\n 'kinto.core.initialization.setup_cache',\n 'kinto.core.initialization.setup_requests_scheme',\n 'kinto.core.initialization.setup_version_redirection',\n 'kinto.core.initialization.setup_deprecation',\n 'kinto.core.initialization.setup_authentication',\n 'kinto.core.initialization.setup_backoff',\n 'kinto.core.initialization.setup_statsd',\n 'kinto.core.initialization.setup_listeners',\n 'kinto.core.events.setup_transaction_hook',\n ),\n 'event_listeners': '',\n 'heartbeat_timeout_seconds': 10,\n 'newrelic_config': None,\n 'newrelic_env': 'dev',\n 'paginate_by': None,\n 'pagination_token_validity_seconds': 10 * 60,\n 'permission_backend': '',\n 'permission_url': '',\n 'permission_pool_size': 25,\n 'profiler_dir': tempfile.gettempdir(),\n 'profiler_enabled': False,\n 'project_docs': '',\n 'project_name': '',\n 'project_version': '',\n 'readonly': False,\n 'retry_after_seconds': 30,\n 'statsd_backend': 'kinto.core.statsd',\n 'statsd_prefix': 'kinto.core',\n 'statsd_url': None,\n 'storage_backend': '',\n 'storage_url': '',\n 'storage_max_fetch_size': 10000,\n 'storage_pool_size': 25,\n 'tm.annotate_user': False, # Do annotate transactions with the user-id.\n 'transaction_per_request': True,\n 'userid_hmac_secret': '',\n 'version_json_path': 'version.json',\n 'version_prefix_redirect_enabled': True,\n 'trailing_slash_redirect_enabled': True,\n 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',\n 'multiauth.policies': 'basicauth',\n 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'\n 'BasicAuthAuthenticationPolicy'),\n 'multiauth.authorization_policy': ('kinto.core.authorization.'\n 'AuthorizationPolicy'),\n}\n\n\nclass Service(CorniceService):\n \"\"\"Subclass of the default cornice service.\n\n This is useful in order to attach specific behaviours without monkey\n patching the default cornice service (which would impact other uses of it)\n \"\"\"\n default_cors_headers = ('Backoff', 'Retry-After', 'Alert',\n 'Content-Length')\n\n def error_handler(self, request):\n return errors.json_error_handler(request)\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.cors_origins = tuple(aslist(settings['cors_origins']))\n cors_max_age = settings['cors_max_age_seconds']\n cls.cors_max_age = int(cors_max_age) if cors_max_age else None\n\n\nclass JsonLogFormatter(dockerflow_logging.JsonLogFormatter):\n logger_name = 'kinto'\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.logger_name = settings['project_name']\n\n def __init__(self, fmt=None, datefmt=None, style='%'):\n # Do not let mozilla-cloud-services-logger constructor to improperly\n # use style as the logger_name.\n # See https://github.com/mozilla/mozilla-cloud-services-logger/issues/3\n logger_name = self.logger_name\n super().__init__(fmt, datefmt, style)\n self.logger_name = logger_name\n\n\ndef includeme(config):\n settings = config.get_settings()\n\n # Heartbeat registry.\n config.registry.heartbeats = {}\n\n # Public settings registry.\n config.registry.public_settings = {'batch_max_requests', 'readonly'}\n\n # Directive to declare arbitrary API capabilities.\n def add_api_capability(config, identifier, description='', url='', **kw):\n existing = config.registry.api_capabilities.get(identifier)\n if existing:\n error_msg = \"The '{}' API capability was already registered ({}).\"\n raise ValueError(error_msg.format(identifier, existing))\n\n capability = dict(description=description, url=url, **kw)\n config.registry.api_capabilities[identifier] = capability\n\n config.add_directive('add_api_capability', add_api_capability)\n config.registry.api_capabilities = {}\n\n # Resource events helpers.\n config.add_request_method(events.get_resource_events,\n name='get_resource_events')\n config.add_request_method(events.notify_resource_event,\n name='notify_resource_event')\n\n # Setup cornice.\n config.include('cornice')\n\n # Setup cornice api documentation\n config.include('cornice_swagger')\n\n # Per-request transaction.\n config.include('pyramid_tm')\n\n # Add CORS settings to the base kinto.core Service class.\n Service.init_from_settings(settings)\n\n # Use the project name as the main logger name (Logger field in MozLog).\n JsonLogFormatter.init_from_settings(settings)\n\n # Setup components.\n for step in aslist(settings['initialization_sequence']):\n step_func = config.maybe_dotted(step)\n step_func(config)\n\n # Custom helpers.\n config.add_request_method(log_context)\n config.add_request_method(follow_subrequest)\n config.add_request_method(prefixed_userid, property=True)\n config.add_request_method(prefixed_principals, reify=True)\n config.add_request_method(lambda r: {\n 'id': r.prefixed_userid,\n 'principals': r.prefixed_principals},\n name='get_user_info')\n config.add_request_method(current_resource_name, reify=True)\n config.add_request_method(current_service, reify=True)\n config.commit()\n\n # Include plugins after init, unlike pyramid includes.\n includes = aslist(settings['includes'])\n for app in includes:\n config.include(app)\n\n # # Show settings to output.\n # for key, value in settings.items():\n # logger.info('Using {} = {}'.format(key, value))\n\n # Scan views.\n config.scan('kinto.core.views')\n\n # Give sign of life.\n msg = 'Running {project_name} {project_version}.'\n logger.info(msg.format_map(settings))\n", "path": "kinto/core/__init__.py"}, {"content": "import contextlib\nimport logging\nimport warnings\nfrom collections import defaultdict\n\nfrom kinto.core.storage import exceptions\nfrom kinto.core.utils import sqlalchemy\nimport transaction as zope_transaction\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PostgreSQLClient:\n def __init__(self, session_factory, commit_manually, invalidate):\n self.session_factory = session_factory\n self.commit_manually = commit_manually\n self.invalidate = invalidate\n\n @contextlib.contextmanager\n def connect(self, readonly=False, force_commit=False):\n \"\"\"\n Pulls a connection from the pool when context is entered and\n returns it when context is exited.\n\n A COMMIT is performed on the current transaction if everything went\n well. Otherwise transaction is ROLLBACK, and everything cleaned up.\n \"\"\"\n commit_manually = self.commit_manually and not readonly\n session = None\n try:\n # Pull connection from pool.\n session = self.session_factory()\n # Start context\n yield session\n if not readonly and not self.commit_manually:\n # Mark session as dirty.\n self.invalidate(session)\n # Success\n if commit_manually:\n session.commit()\n elif force_commit:\n # Commit like would do a succesful request.\n zope_transaction.commit()\n\n except sqlalchemy.exc.IntegrityError as e:\n logger.error(e, exc_info=True)\n if commit_manually: # pragma: no branch\n session.rollback()\n raise exceptions.IntegrityError(original=e) from e\n except sqlalchemy.exc.SQLAlchemyError as e:\n logger.error(e, exc_info=True)\n if session and commit_manually:\n session.rollback()\n raise exceptions.BackendError(original=e) from e\n finally:\n if session and self.commit_manually:\n # Give back to pool if commit done manually.\n session.close()\n\n\n# Reuse existing client if same URL.\n_CLIENTS = defaultdict(dict)\n\n\ndef create_from_config(config, prefix='', with_transaction=True):\n \"\"\"Create a PostgreSQLClient client using settings in the provided config.\n \"\"\"\n if sqlalchemy is None:\n message = ('PostgreSQL SQLAlchemy dependency missing. '\n 'Refer to installation section in documentation.')\n raise ImportWarning(message)\n\n from zope.sqlalchemy import ZopeTransactionExtension, invalidate\n from sqlalchemy.orm import sessionmaker, scoped_session\n\n settings = {**config.get_settings()}\n # Custom Kinto settings, unsupported by SQLAlchemy.\n settings.pop(prefix + 'backend', None)\n settings.pop(prefix + 'max_fetch_size', None)\n settings.pop(prefix + 'max_size_bytes', None)\n settings.pop(prefix + 'prefix', None)\n settings.pop(prefix + 'strict_json', None)\n settings.pop(prefix + 'hosts', None)\n transaction_per_request = with_transaction and settings.pop('transaction_per_request', False)\n\n url = settings[prefix + 'url']\n existing_client = _CLIENTS[transaction_per_request].get(url)\n if existing_client:\n msg = ('Reuse existing PostgreSQL connection. '\n 'Parameters {}* will be ignored.'.format(prefix))\n warnings.warn(msg)\n return existing_client\n\n # Initialize SQLAlchemy engine from settings.\n poolclass_key = prefix + 'poolclass'\n settings.setdefault(poolclass_key, ('kinto.core.storage.postgresql.'\n 'pool.QueuePoolWithMaxBacklog'))\n settings[poolclass_key] = config.maybe_dotted(settings[poolclass_key])\n engine = sqlalchemy.engine_from_config(settings, prefix=prefix, url=url)\n\n # Initialize thread-safe session factory.\n options = {}\n if transaction_per_request:\n # Plug with Pyramid transaction manager\n options['extension'] = ZopeTransactionExtension()\n session_factory = scoped_session(sessionmaker(bind=engine, **options))\n\n # Store one client per URI.\n commit_manually = (not transaction_per_request)\n client = PostgreSQLClient(session_factory, commit_manually, invalidate)\n _CLIENTS[transaction_per_request][url] = client\n return client\n", "path": "kinto/core/storage/postgresql/client.py"}]} | 3,561 | 232 |
gh_patches_debug_3285 | rasdani/github-patches | git_diff | pyro-ppl__pyro-576 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TransformedDistribution's event_shape forwards to incorrect base distribution's method
Probably a typo:
https://github.com/uber/pyro/blob/51a2ccfe9445c7072c3150c4abe1ab1d2ac17246/pyro/distributions/transformed_distribution.py#L62
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/distributions/transformed_distribution.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import torch
4 import torch.nn as nn
5 from torch.autograd import Variable
6
7 from pyro.distributions.distribution import Distribution
8 from pyro.nn import AutoRegressiveNN
9
10
11 class TransformedDistribution(Distribution):
12 """
13 Transforms the base distribution by applying a sequence of `Bijector`s to it.
14 This results in a scorable distribution (i.e. it has a `log_pdf()` method).
15
16 :param base_distribution: a (continuous) base distribution; samples from this distribution
17 are passed through the sequence of `Bijector`s to yield a sample from the
18 `TransformedDistribution`
19 :type base_distribution: pyro.distribution.Distribution
20 :param bijectors: either a single Bijector or a sequence of Bijectors wrapped in a nn.ModuleList
21 :returns: the transformed distribution
22 """
23
24 def __init__(self, base_distribution, bijectors, *args, **kwargs):
25 super(TransformedDistribution, self).__init__(*args, **kwargs)
26 self.reparameterized = base_distribution.reparameterized
27 self.base_dist = base_distribution
28 if isinstance(bijectors, Bijector):
29 self.bijectors = nn.ModuleList([bijectors])
30 elif isinstance(bijectors, nn.ModuleList):
31 for bijector in bijectors:
32 assert isinstance(bijector, Bijector), \
33 "bijectors must be a Bijector or a nn.ModuleList of Bijectors"
34 self.bijectors = bijectors
35
36 def sample(self, *args, **kwargs):
37 """
38 :returns: a sample y
39 :rtype: torch.autograd.Variable
40
41 Sample from base distribution and pass through bijector(s)
42 """
43 x = self.base_dist.sample(*args, **kwargs)
44 next_input = x
45 for bijector in self.bijectors:
46 y = bijector(next_input)
47 if bijector.add_inverse_to_cache:
48 bijector._add_intermediate_to_cache(next_input, y, 'x')
49 next_input = y
50 return next_input
51
52 def batch_shape(self, x=None, *args, **kwargs):
53 """
54 Ref: :py:meth:`pyro.distributions.distribution.Distribution.batch_shape`
55 """
56 return self.base_dist.batch_shape(x, *args, **kwargs)
57
58 def event_shape(self, *args, **kwargs):
59 """
60 Ref: :py:meth:`pyro.distributions.distribution.Distribution.event_shape`
61 """
62 return self.base_dist.batch_shape(*args, **kwargs)
63
64 def log_pdf(self, y, *args, **kwargs):
65 """
66 :param y: a value sampled from the transformed distribution
67 :type y: torch.autograd.Variable
68
69 :returns: the score (the log pdf) of y
70 :rtype: torch.autograd.Variable
71
72 Scores the sample by inverting the bijector(s) and computing the score using the score
73 of the base distribution and the log det jacobian
74 """
75 inverses = []
76 next_to_invert = y
77 for bijector in reversed(self.bijectors):
78 inverse = bijector.inverse(next_to_invert)
79 inverses.append(inverse)
80 next_to_invert = inverse
81 log_pdf_base = self.base_dist.log_pdf(inverses[-1], *args, **kwargs)
82 log_det_jacobian = self.bijectors[-1].log_det_jacobian(y, *args, **kwargs)
83 for bijector, inverse in zip(list(reversed(self.bijectors))[1:], inverses[:-1]):
84 log_det_jacobian += bijector.log_det_jacobian(inverse, *args, **kwargs)
85 return log_pdf_base - log_det_jacobian
86
87 def batch_log_pdf(self, y, *args, **kwargs):
88 raise NotImplementedError("https://github.com/uber/pyro/issues/293")
89
90
91 class Bijector(nn.Module):
92 """
93 Abstract class `Bijector`. `Bijector` are bijective transformations with computable
94 log det jacobians. They are meant for use in `TransformedDistribution`.
95 """
96
97 def __init__(self, *args, **kwargs):
98 super(Bijector, self).__init__(*args, **kwargs)
99 self.add_inverse_to_cache = False
100
101 def __call__(self, *args, **kwargs):
102 """
103 Virtual forward method
104
105 Invokes the bijection x=>y
106 """
107 raise NotImplementedError()
108
109 def inverse(self, *args, **kwargs):
110 """
111 Virtual inverse method
112
113 Inverts the bijection y => x.
114 """
115 raise NotImplementedError()
116
117 def log_det_jacobian(self, *args, **kwargs):
118 """
119 Virtual logdet jacobian method.
120
121 Computes the log det jacobian `|dy/dx|`
122 """
123 raise NotImplementedError()
124
125
126 class InverseAutoregressiveFlow(Bijector):
127 """
128 An implementation of an Inverse Autoregressive Flow. Together with the `TransformedDistribution` this
129 provides a way to create richer variational approximations.
130
131 Example usage::
132
133 >>> base_dist = Normal(...)
134 >>> iaf = InverseAutoregressiveFlow(...)
135 >>> pyro.module("my_iaf", iaf)
136 >>> iaf_dist = TransformedDistribution(base_dist, iaf)
137
138 Note that this implementation is only meant to be used in settings where the inverse of the Bijector
139 is never explicitly computed (rather the result is cached from the forward call). In the context of
140 variational inference, this means that the InverseAutoregressiveFlow should only be used in the guide,
141 i.e. in the variational distribution. In other contexts the inverse could in principle be computed but
142 this would be a (potentially) costly computation that scales with the dimension of the input (and in
143 any case support for this is not included in this implementation).
144
145 :param input_dim: dimension of input
146 :type input_dim: int
147 :param hidden_dim: hidden dimension (number of hidden units)
148 :type hidden_dim: int
149 :param sigmoid_bias: bias on the hidden units fed into the sigmoid; default=`2.0`
150 :type sigmoid_bias: float
151 :param permutation: whether the order of the inputs should be permuted (by default the conditional
152 dependence structure of the autoregression follows the sequential order)
153 :type permutation: bool
154
155 References:
156
157 1. Improving Variational Inference with Inverse Autoregressive Flow [arXiv:1606.04934]
158 Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling
159
160 2. Variational Inference with Normalizing Flows [arXiv:1505.05770]
161 Danilo Jimenez Rezende, Shakir Mohamed
162
163 3. MADE: Masked Autoencoder for Distribution Estimation [arXiv:1502.03509]
164 Mathieu Germain, Karol Gregor, Iain Murray, Hugo Larochelle
165 """
166
167 def __init__(self, input_dim, hidden_dim, sigmoid_bias=2.0, permutation=None):
168 super(InverseAutoregressiveFlow, self).__init__()
169 self.input_dim = input_dim
170 self.hidden_dim = hidden_dim
171 self.arn = AutoRegressiveNN(input_dim, hidden_dim, output_dim_multiplier=2, permutation=permutation)
172 self.sigmoid = nn.Sigmoid()
173 self.sigmoid_bias = Variable(torch.Tensor([sigmoid_bias]))
174 self._intermediates_cache = {}
175 self.add_inverse_to_cache = True
176
177 def get_arn(self):
178 """
179 :rtype: pyro.nn.AutoRegressiveNN
180
181 Return the AutoRegressiveNN associated with the InverseAutoregressiveFlow
182 """
183 return self.arn
184
185 def __call__(self, x, *args, **kwargs):
186 """
187 :param x: the input into the bijection
188 :type x: torch.autograd.Variable
189
190 Invokes the bijection x=>y; in the prototypical context of a TransformedDistribution `x` is a
191 sample from the base distribution (or the output of a previous flow)
192 """
193 hidden = self.arn(x)
194 sigma = self.sigmoid(hidden[:, 0:self.input_dim] + self.sigmoid_bias.type_as(hidden))
195 mean = hidden[:, self.input_dim:]
196 y = sigma * x + (Variable(torch.ones(sigma.size())).type_as(sigma) - sigma) * mean
197 self._add_intermediate_to_cache(sigma, y, 'sigma')
198 return y
199
200 def inverse(self, y, *args, **kwargs):
201 """
202 :param y: the output of the bijection
203 :type y: torch.autograd.Variable
204
205 Inverts y => x. As noted above, this implementation is incapable of inverting arbitrary values
206 `y`; rather it assumes `y` is the result of a previously computed application of the bijector
207 to some `x` (which was cached on the forward call)
208 """
209 if (y, 'x') in self._intermediates_cache:
210 x = self._intermediates_cache.pop((y, 'x'))
211 return x
212 else:
213 raise KeyError("Bijector InverseAutoregressiveFlow expected to find" +
214 "key in intermediates cache but didn't")
215
216 def _add_intermediate_to_cache(self, intermediate, y, name):
217 """
218 Internal function used to cache intermediate results computed during the forward call
219 """
220 assert((y, name) not in self._intermediates_cache),\
221 "key collision in _add_intermediate_to_cache"
222 self._intermediates_cache[(y, name)] = intermediate
223
224 def log_det_jacobian(self, y, *args, **kwargs):
225 """
226 Calculates the determinant of the log jacobian
227 """
228 if (y, 'sigma') in self._intermediates_cache:
229 sigma = self._intermediates_cache.pop((y, 'sigma'))
230 else:
231 raise KeyError("Bijector InverseAutoregressiveFlow expected to find" +
232 "key in intermediates cache but didn't")
233 if 'log_pdf_mask' in kwargs:
234 return torch.sum(kwargs['log_pdf_mask'] * torch.log(sigma))
235 return torch.sum(torch.log(sigma))
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyro/distributions/transformed_distribution.py b/pyro/distributions/transformed_distribution.py
--- a/pyro/distributions/transformed_distribution.py
+++ b/pyro/distributions/transformed_distribution.py
@@ -59,7 +59,7 @@
"""
Ref: :py:meth:`pyro.distributions.distribution.Distribution.event_shape`
"""
- return self.base_dist.batch_shape(*args, **kwargs)
+ return self.base_dist.event_shape(*args, **kwargs)
def log_pdf(self, y, *args, **kwargs):
"""
| {"golden_diff": "diff --git a/pyro/distributions/transformed_distribution.py b/pyro/distributions/transformed_distribution.py\n--- a/pyro/distributions/transformed_distribution.py\n+++ b/pyro/distributions/transformed_distribution.py\n@@ -59,7 +59,7 @@\n \"\"\"\n Ref: :py:meth:`pyro.distributions.distribution.Distribution.event_shape`\n \"\"\"\n- return self.base_dist.batch_shape(*args, **kwargs)\n+ return self.base_dist.event_shape(*args, **kwargs)\n \n def log_pdf(self, y, *args, **kwargs):\n \"\"\"\n", "issue": "TransformedDistribution's event_shape forwards to incorrect base distribution's method\nProbably a typo:\r\n\r\nhttps://github.com/uber/pyro/blob/51a2ccfe9445c7072c3150c4abe1ab1d2ac17246/pyro/distributions/transformed_distribution.py#L62\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nfrom pyro.distributions.distribution import Distribution\nfrom pyro.nn import AutoRegressiveNN\n\n\nclass TransformedDistribution(Distribution):\n \"\"\"\n Transforms the base distribution by applying a sequence of `Bijector`s to it.\n This results in a scorable distribution (i.e. it has a `log_pdf()` method).\n\n :param base_distribution: a (continuous) base distribution; samples from this distribution\n are passed through the sequence of `Bijector`s to yield a sample from the\n `TransformedDistribution`\n :type base_distribution: pyro.distribution.Distribution\n :param bijectors: either a single Bijector or a sequence of Bijectors wrapped in a nn.ModuleList\n :returns: the transformed distribution\n \"\"\"\n\n def __init__(self, base_distribution, bijectors, *args, **kwargs):\n super(TransformedDistribution, self).__init__(*args, **kwargs)\n self.reparameterized = base_distribution.reparameterized\n self.base_dist = base_distribution\n if isinstance(bijectors, Bijector):\n self.bijectors = nn.ModuleList([bijectors])\n elif isinstance(bijectors, nn.ModuleList):\n for bijector in bijectors:\n assert isinstance(bijector, Bijector), \\\n \"bijectors must be a Bijector or a nn.ModuleList of Bijectors\"\n self.bijectors = bijectors\n\n def sample(self, *args, **kwargs):\n \"\"\"\n :returns: a sample y\n :rtype: torch.autograd.Variable\n\n Sample from base distribution and pass through bijector(s)\n \"\"\"\n x = self.base_dist.sample(*args, **kwargs)\n next_input = x\n for bijector in self.bijectors:\n y = bijector(next_input)\n if bijector.add_inverse_to_cache:\n bijector._add_intermediate_to_cache(next_input, y, 'x')\n next_input = y\n return next_input\n\n def batch_shape(self, x=None, *args, **kwargs):\n \"\"\"\n Ref: :py:meth:`pyro.distributions.distribution.Distribution.batch_shape`\n \"\"\"\n return self.base_dist.batch_shape(x, *args, **kwargs)\n\n def event_shape(self, *args, **kwargs):\n \"\"\"\n Ref: :py:meth:`pyro.distributions.distribution.Distribution.event_shape`\n \"\"\"\n return self.base_dist.batch_shape(*args, **kwargs)\n\n def log_pdf(self, y, *args, **kwargs):\n \"\"\"\n :param y: a value sampled from the transformed distribution\n :type y: torch.autograd.Variable\n\n :returns: the score (the log pdf) of y\n :rtype: torch.autograd.Variable\n\n Scores the sample by inverting the bijector(s) and computing the score using the score\n of the base distribution and the log det jacobian\n \"\"\"\n inverses = []\n next_to_invert = y\n for bijector in reversed(self.bijectors):\n inverse = bijector.inverse(next_to_invert)\n inverses.append(inverse)\n next_to_invert = inverse\n log_pdf_base = self.base_dist.log_pdf(inverses[-1], *args, **kwargs)\n log_det_jacobian = self.bijectors[-1].log_det_jacobian(y, *args, **kwargs)\n for bijector, inverse in zip(list(reversed(self.bijectors))[1:], inverses[:-1]):\n log_det_jacobian += bijector.log_det_jacobian(inverse, *args, **kwargs)\n return log_pdf_base - log_det_jacobian\n\n def batch_log_pdf(self, y, *args, **kwargs):\n raise NotImplementedError(\"https://github.com/uber/pyro/issues/293\")\n\n\nclass Bijector(nn.Module):\n \"\"\"\n Abstract class `Bijector`. `Bijector` are bijective transformations with computable\n log det jacobians. They are meant for use in `TransformedDistribution`.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Bijector, self).__init__(*args, **kwargs)\n self.add_inverse_to_cache = False\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Virtual forward method\n\n Invokes the bijection x=>y\n \"\"\"\n raise NotImplementedError()\n\n def inverse(self, *args, **kwargs):\n \"\"\"\n Virtual inverse method\n\n Inverts the bijection y => x.\n \"\"\"\n raise NotImplementedError()\n\n def log_det_jacobian(self, *args, **kwargs):\n \"\"\"\n Virtual logdet jacobian method.\n\n Computes the log det jacobian `|dy/dx|`\n \"\"\"\n raise NotImplementedError()\n\n\nclass InverseAutoregressiveFlow(Bijector):\n \"\"\"\n An implementation of an Inverse Autoregressive Flow. Together with the `TransformedDistribution` this\n provides a way to create richer variational approximations.\n\n Example usage::\n\n >>> base_dist = Normal(...)\n >>> iaf = InverseAutoregressiveFlow(...)\n >>> pyro.module(\"my_iaf\", iaf)\n >>> iaf_dist = TransformedDistribution(base_dist, iaf)\n\n Note that this implementation is only meant to be used in settings where the inverse of the Bijector\n is never explicitly computed (rather the result is cached from the forward call). In the context of\n variational inference, this means that the InverseAutoregressiveFlow should only be used in the guide,\n i.e. in the variational distribution. In other contexts the inverse could in principle be computed but\n this would be a (potentially) costly computation that scales with the dimension of the input (and in\n any case support for this is not included in this implementation).\n\n :param input_dim: dimension of input\n :type input_dim: int\n :param hidden_dim: hidden dimension (number of hidden units)\n :type hidden_dim: int\n :param sigmoid_bias: bias on the hidden units fed into the sigmoid; default=`2.0`\n :type sigmoid_bias: float\n :param permutation: whether the order of the inputs should be permuted (by default the conditional\n dependence structure of the autoregression follows the sequential order)\n :type permutation: bool\n\n References:\n\n 1. Improving Variational Inference with Inverse Autoregressive Flow [arXiv:1606.04934]\n Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling\n\n 2. Variational Inference with Normalizing Flows [arXiv:1505.05770]\n Danilo Jimenez Rezende, Shakir Mohamed\n\n 3. MADE: Masked Autoencoder for Distribution Estimation [arXiv:1502.03509]\n Mathieu Germain, Karol Gregor, Iain Murray, Hugo Larochelle\n \"\"\"\n\n def __init__(self, input_dim, hidden_dim, sigmoid_bias=2.0, permutation=None):\n super(InverseAutoregressiveFlow, self).__init__()\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.arn = AutoRegressiveNN(input_dim, hidden_dim, output_dim_multiplier=2, permutation=permutation)\n self.sigmoid = nn.Sigmoid()\n self.sigmoid_bias = Variable(torch.Tensor([sigmoid_bias]))\n self._intermediates_cache = {}\n self.add_inverse_to_cache = True\n\n def get_arn(self):\n \"\"\"\n :rtype: pyro.nn.AutoRegressiveNN\n\n Return the AutoRegressiveNN associated with the InverseAutoregressiveFlow\n \"\"\"\n return self.arn\n\n def __call__(self, x, *args, **kwargs):\n \"\"\"\n :param x: the input into the bijection\n :type x: torch.autograd.Variable\n\n Invokes the bijection x=>y; in the prototypical context of a TransformedDistribution `x` is a\n sample from the base distribution (or the output of a previous flow)\n \"\"\"\n hidden = self.arn(x)\n sigma = self.sigmoid(hidden[:, 0:self.input_dim] + self.sigmoid_bias.type_as(hidden))\n mean = hidden[:, self.input_dim:]\n y = sigma * x + (Variable(torch.ones(sigma.size())).type_as(sigma) - sigma) * mean\n self._add_intermediate_to_cache(sigma, y, 'sigma')\n return y\n\n def inverse(self, y, *args, **kwargs):\n \"\"\"\n :param y: the output of the bijection\n :type y: torch.autograd.Variable\n\n Inverts y => x. As noted above, this implementation is incapable of inverting arbitrary values\n `y`; rather it assumes `y` is the result of a previously computed application of the bijector\n to some `x` (which was cached on the forward call)\n \"\"\"\n if (y, 'x') in self._intermediates_cache:\n x = self._intermediates_cache.pop((y, 'x'))\n return x\n else:\n raise KeyError(\"Bijector InverseAutoregressiveFlow expected to find\" +\n \"key in intermediates cache but didn't\")\n\n def _add_intermediate_to_cache(self, intermediate, y, name):\n \"\"\"\n Internal function used to cache intermediate results computed during the forward call\n \"\"\"\n assert((y, name) not in self._intermediates_cache),\\\n \"key collision in _add_intermediate_to_cache\"\n self._intermediates_cache[(y, name)] = intermediate\n\n def log_det_jacobian(self, y, *args, **kwargs):\n \"\"\"\n Calculates the determinant of the log jacobian\n \"\"\"\n if (y, 'sigma') in self._intermediates_cache:\n sigma = self._intermediates_cache.pop((y, 'sigma'))\n else:\n raise KeyError(\"Bijector InverseAutoregressiveFlow expected to find\" +\n \"key in intermediates cache but didn't\")\n if 'log_pdf_mask' in kwargs:\n return torch.sum(kwargs['log_pdf_mask'] * torch.log(sigma))\n return torch.sum(torch.log(sigma))\n", "path": "pyro/distributions/transformed_distribution.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nfrom pyro.distributions.distribution import Distribution\nfrom pyro.nn import AutoRegressiveNN\n\n\nclass TransformedDistribution(Distribution):\n \"\"\"\n Transforms the base distribution by applying a sequence of `Bijector`s to it.\n This results in a scorable distribution (i.e. it has a `log_pdf()` method).\n\n :param base_distribution: a (continuous) base distribution; samples from this distribution\n are passed through the sequence of `Bijector`s to yield a sample from the\n `TransformedDistribution`\n :type base_distribution: pyro.distribution.Distribution\n :param bijectors: either a single Bijector or a sequence of Bijectors wrapped in a nn.ModuleList\n :returns: the transformed distribution\n \"\"\"\n\n def __init__(self, base_distribution, bijectors, *args, **kwargs):\n super(TransformedDistribution, self).__init__(*args, **kwargs)\n self.reparameterized = base_distribution.reparameterized\n self.base_dist = base_distribution\n if isinstance(bijectors, Bijector):\n self.bijectors = nn.ModuleList([bijectors])\n elif isinstance(bijectors, nn.ModuleList):\n for bijector in bijectors:\n assert isinstance(bijector, Bijector), \\\n \"bijectors must be a Bijector or a nn.ModuleList of Bijectors\"\n self.bijectors = bijectors\n\n def sample(self, *args, **kwargs):\n \"\"\"\n :returns: a sample y\n :rtype: torch.autograd.Variable\n\n Sample from base distribution and pass through bijector(s)\n \"\"\"\n x = self.base_dist.sample(*args, **kwargs)\n next_input = x\n for bijector in self.bijectors:\n y = bijector(next_input)\n if bijector.add_inverse_to_cache:\n bijector._add_intermediate_to_cache(next_input, y, 'x')\n next_input = y\n return next_input\n\n def batch_shape(self, x=None, *args, **kwargs):\n \"\"\"\n Ref: :py:meth:`pyro.distributions.distribution.Distribution.batch_shape`\n \"\"\"\n return self.base_dist.batch_shape(x, *args, **kwargs)\n\n def event_shape(self, *args, **kwargs):\n \"\"\"\n Ref: :py:meth:`pyro.distributions.distribution.Distribution.event_shape`\n \"\"\"\n return self.base_dist.event_shape(*args, **kwargs)\n\n def log_pdf(self, y, *args, **kwargs):\n \"\"\"\n :param y: a value sampled from the transformed distribution\n :type y: torch.autograd.Variable\n\n :returns: the score (the log pdf) of y\n :rtype: torch.autograd.Variable\n\n Scores the sample by inverting the bijector(s) and computing the score using the score\n of the base distribution and the log det jacobian\n \"\"\"\n inverses = []\n next_to_invert = y\n for bijector in reversed(self.bijectors):\n inverse = bijector.inverse(next_to_invert)\n inverses.append(inverse)\n next_to_invert = inverse\n log_pdf_base = self.base_dist.log_pdf(inverses[-1], *args, **kwargs)\n log_det_jacobian = self.bijectors[-1].log_det_jacobian(y, *args, **kwargs)\n for bijector, inverse in zip(list(reversed(self.bijectors))[1:], inverses[:-1]):\n log_det_jacobian += bijector.log_det_jacobian(inverse, *args, **kwargs)\n return log_pdf_base - log_det_jacobian\n\n def batch_log_pdf(self, y, *args, **kwargs):\n raise NotImplementedError(\"https://github.com/uber/pyro/issues/293\")\n\n\nclass Bijector(nn.Module):\n \"\"\"\n Abstract class `Bijector`. `Bijector` are bijective transformations with computable\n log det jacobians. They are meant for use in `TransformedDistribution`.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Bijector, self).__init__(*args, **kwargs)\n self.add_inverse_to_cache = False\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Virtual forward method\n\n Invokes the bijection x=>y\n \"\"\"\n raise NotImplementedError()\n\n def inverse(self, *args, **kwargs):\n \"\"\"\n Virtual inverse method\n\n Inverts the bijection y => x.\n \"\"\"\n raise NotImplementedError()\n\n def log_det_jacobian(self, *args, **kwargs):\n \"\"\"\n Virtual logdet jacobian method.\n\n Computes the log det jacobian `|dy/dx|`\n \"\"\"\n raise NotImplementedError()\n\n\nclass InverseAutoregressiveFlow(Bijector):\n \"\"\"\n An implementation of an Inverse Autoregressive Flow. Together with the `TransformedDistribution` this\n provides a way to create richer variational approximations.\n\n Example usage::\n\n >>> base_dist = Normal(...)\n >>> iaf = InverseAutoregressiveFlow(...)\n >>> pyro.module(\"my_iaf\", iaf)\n >>> iaf_dist = TransformedDistribution(base_dist, iaf)\n\n Note that this implementation is only meant to be used in settings where the inverse of the Bijector\n is never explicitly computed (rather the result is cached from the forward call). In the context of\n variational inference, this means that the InverseAutoregressiveFlow should only be used in the guide,\n i.e. in the variational distribution. In other contexts the inverse could in principle be computed but\n this would be a (potentially) costly computation that scales with the dimension of the input (and in\n any case support for this is not included in this implementation).\n\n :param input_dim: dimension of input\n :type input_dim: int\n :param hidden_dim: hidden dimension (number of hidden units)\n :type hidden_dim: int\n :param sigmoid_bias: bias on the hidden units fed into the sigmoid; default=`2.0`\n :type sigmoid_bias: float\n :param permutation: whether the order of the inputs should be permuted (by default the conditional\n dependence structure of the autoregression follows the sequential order)\n :type permutation: bool\n\n References:\n\n 1. Improving Variational Inference with Inverse Autoregressive Flow [arXiv:1606.04934]\n Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling\n\n 2. Variational Inference with Normalizing Flows [arXiv:1505.05770]\n Danilo Jimenez Rezende, Shakir Mohamed\n\n 3. MADE: Masked Autoencoder for Distribution Estimation [arXiv:1502.03509]\n Mathieu Germain, Karol Gregor, Iain Murray, Hugo Larochelle\n \"\"\"\n\n def __init__(self, input_dim, hidden_dim, sigmoid_bias=2.0, permutation=None):\n super(InverseAutoregressiveFlow, self).__init__()\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.arn = AutoRegressiveNN(input_dim, hidden_dim, output_dim_multiplier=2, permutation=permutation)\n self.sigmoid = nn.Sigmoid()\n self.sigmoid_bias = Variable(torch.Tensor([sigmoid_bias]))\n self._intermediates_cache = {}\n self.add_inverse_to_cache = True\n\n def get_arn(self):\n \"\"\"\n :rtype: pyro.nn.AutoRegressiveNN\n\n Return the AutoRegressiveNN associated with the InverseAutoregressiveFlow\n \"\"\"\n return self.arn\n\n def __call__(self, x, *args, **kwargs):\n \"\"\"\n :param x: the input into the bijection\n :type x: torch.autograd.Variable\n\n Invokes the bijection x=>y; in the prototypical context of a TransformedDistribution `x` is a\n sample from the base distribution (or the output of a previous flow)\n \"\"\"\n hidden = self.arn(x)\n sigma = self.sigmoid(hidden[:, 0:self.input_dim] + self.sigmoid_bias.type_as(hidden))\n mean = hidden[:, self.input_dim:]\n y = sigma * x + (Variable(torch.ones(sigma.size())).type_as(sigma) - sigma) * mean\n self._add_intermediate_to_cache(sigma, y, 'sigma')\n return y\n\n def inverse(self, y, *args, **kwargs):\n \"\"\"\n :param y: the output of the bijection\n :type y: torch.autograd.Variable\n\n Inverts y => x. As noted above, this implementation is incapable of inverting arbitrary values\n `y`; rather it assumes `y` is the result of a previously computed application of the bijector\n to some `x` (which was cached on the forward call)\n \"\"\"\n if (y, 'x') in self._intermediates_cache:\n x = self._intermediates_cache.pop((y, 'x'))\n return x\n else:\n raise KeyError(\"Bijector InverseAutoregressiveFlow expected to find\" +\n \"key in intermediates cache but didn't\")\n\n def _add_intermediate_to_cache(self, intermediate, y, name):\n \"\"\"\n Internal function used to cache intermediate results computed during the forward call\n \"\"\"\n assert((y, name) not in self._intermediates_cache),\\\n \"key collision in _add_intermediate_to_cache\"\n self._intermediates_cache[(y, name)] = intermediate\n\n def log_det_jacobian(self, y, *args, **kwargs):\n \"\"\"\n Calculates the determinant of the log jacobian\n \"\"\"\n if (y, 'sigma') in self._intermediates_cache:\n sigma = self._intermediates_cache.pop((y, 'sigma'))\n else:\n raise KeyError(\"Bijector InverseAutoregressiveFlow expected to find\" +\n \"key in intermediates cache but didn't\")\n if 'log_pdf_mask' in kwargs:\n return torch.sum(kwargs['log_pdf_mask'] * torch.log(sigma))\n return torch.sum(torch.log(sigma))\n", "path": "pyro/distributions/transformed_distribution.py"}]} | 3,228 | 128 |
gh_patches_debug_14265 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1493 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reimplement user categories feature
We recently removed some features from the user Profile view and now want to reimplement them using only Django.
This task will be to add a "categories" field to the User Profile ("settings") view).
## Task
All of these tasks should be done in the `accounts` app.
- [ ] determine how to add a multi-select field to a Django form
- [ ] users should be able to select zero or more categories when editing their profile
- [ ] when a user updates their categories, the categories should be listed on the user profile page
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `project/accounts/views.py`
Content:
```
1 """
2 Class based views.
3
4 This module will include views for the accounts app.
5 """
6
7 from accounts.authentication import account_activation_token, send_activation_email
8 from accounts.forms import ProfileEditForm, UserRegistrationForm
9 from accounts.models import Profile
10 from django.conf import settings
11 from django.contrib.auth import get_user_model, login
12 from django.contrib.auth import views as auth_views
13 from django.contrib.auth.decorators import login_required
14 from django.contrib.auth.mixins import LoginRequiredMixin
15 from django.contrib.sites.shortcuts import get_current_site
16 from django.http import HttpResponseRedirect
17 from django.shortcuts import get_object_or_404, redirect
18 from django.template.response import TemplateResponse
19 from django.urls import reverse, reverse_lazy
20 from django.utils.encoding import force_str
21 from django.utils.http import urlsafe_base64_decode
22 from django.views import View
23 from django.views.generic.edit import FormView, UpdateView
24
25
26 class ProfileFollow(LoginRequiredMixin, View):
27 def get(self, request, *args, **kwargs):
28 # Prevent users from following themselves.
29 if request.user.username == kwargs["username"]:
30 pass
31 else:
32 following_profile = Profile.objects.get(user__username=kwargs["username"])
33
34 self.request.user.profile.following.add(following_profile)
35
36 redirect_to = reverse("profile", kwargs={"username": kwargs["username"]})
37
38 return HttpResponseRedirect(redirect_to)
39
40
41 class ProfileUnfollow(LoginRequiredMixin, View):
42 def get(self, request, *args, **kwargs):
43 # Prevent users from following themselves.
44 if request.user.username == kwargs["username"]:
45 pass
46 else:
47 following_profile = Profile.objects.get(user__username=kwargs["username"])
48
49 self.request.user.profile.following.remove(following_profile)
50
51 redirect_to = reverse("profile", kwargs={"username": kwargs["username"]})
52
53 return HttpResponseRedirect(redirect_to)
54
55
56 class RegisterView(FormView):
57 """
58 A form view that handles user registration.
59 """
60
61 template_name = "accounts/register/register.html"
62 form_class = UserRegistrationForm
63 success_url = "/"
64
65 def _create_user(self, form):
66 username = form.cleaned_data["username"]
67 password = form.cleaned_data["password"]
68 email = form.cleaned_data["email"]
69 user = get_user_model().objects.create_user(username, email, password)
70 return user
71
72 def _send_email(self, user):
73 domain = get_current_site(self.request).domain
74 send_activation_email(user, domain)
75
76 def _login(self, user):
77 login(self.request, user)
78
79 def form_valid(self, form):
80 user = self._create_user(form)
81
82 self._send_email(user)
83 self._login(user)
84
85 return super(RegisterView, self).form_valid(form)
86
87
88 class ProfileActivationView(View):
89 """
90 This shows different views to the user when they are verifying
91 their account based on whether they are already verified or not.
92 """
93
94 def get(self, request, uidb64, token):
95
96 try:
97 uid = force_str(urlsafe_base64_decode(uidb64))
98 user = get_user_model().objects.get(pk=uid)
99
100 except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):
101 user = None
102
103 redirect_link = {"href": "/", "label": "Back to Main"}
104
105 template_var = {
106 "link": redirect_link,
107 }
108
109 if user is not None and account_activation_token.check_token(user, token):
110 profile = user.profile
111
112 if profile.is_verified:
113 template_var["title"] = "Email Already Verified"
114 template_var["content"] = "You have already verified your email."
115 else:
116 profile.is_verified = True
117 profile.save()
118
119 template_var["title"] = "Email Verification Successful"
120 template_var["content"] = "Thank you for verifying your email."
121 else:
122 # invalid link
123 template_var["title"] = "Email Verification Error"
124 template_var["content"] = "Email could not be verified"
125
126 return TemplateResponse(request, "general_message.html", template_var)
127
128
129 class PasswordResetView(auth_views.PasswordResetView):
130 template_name = "accounts/users/password_reset.html"
131 email_template_name = "accounts/users/password_reset_email.html"
132 subject_template_name = "accounts/users/password_reset_subject.txt"
133 from_email = settings.EMAIL_HOST_USER
134 success_url = reverse_lazy("accounts_password_reset_done")
135
136
137 class PasswordResetDoneView(auth_views.PasswordResetDoneView):
138 template_name = "accounts/users/password_reset_done.html"
139
140
141 class PasswordResetConfirmView(auth_views.PasswordResetConfirmView):
142 template_name = "accounts/users/password_reset_confirm.html"
143 success_url = reverse_lazy("accounts_password_reset_complete")
144
145
146 class PasswordResetCompleteView(auth_views.PasswordResetCompleteView):
147 template_name = "accounts/users/password_reset_complete.html"
148
149
150 class SettingsView(LoginRequiredMixin, UpdateView):
151 """A form view to edit Profile"""
152
153 login_url = "accounts_login"
154 form_class = ProfileEditForm
155 success_url = reverse_lazy("accounts_settings")
156 template_name = "accounts/settings.html"
157
158 def get_object(self, queryset=None):
159 return Profile.objects.get(user=self.request.user)
160
161 def get_initial(self):
162 profile = Profile.objects.get(user=self.request.user)
163 self.initial.update(
164 {
165 "username": profile.user.username,
166 "email": profile.user.email,
167 "first_name": profile.first_name or None,
168 "last_name": profile.last_name or None,
169 "about_me": profile.about_me or None,
170 "profile_image": profile.profile_image or None,
171 }
172 )
173 return super(SettingsView, self).get_initial()
174
175
176 class UserProfileView(LoginRequiredMixin, View):
177 """A view that shows profile for authorized users"""
178
179 def get(self, request, username=None):
180 profile = get_object_or_404(Profile, user__username=username)
181
182 return TemplateResponse(
183 request,
184 "account.html",
185 {
186 "profile": profile,
187 },
188 )
189
190
191 class UserFollowers(LoginRequiredMixin, View):
192 """A view that shows the followers for authorized users"""
193
194 def get(self, request, username=None):
195 profile = get_object_or_404(Profile, user__username=username)
196
197 return TemplateResponse(
198 request,
199 "user_followers.html",
200 {
201 "profile": profile,
202 },
203 )
204
205
206 class ProfileFollowing(LoginRequiredMixin, View):
207 """
208 A view that shows list of profiles
209 that profile with given username is following
210 """
211
212 def get(self, request, username=None):
213 profile = get_object_or_404(Profile, user__username=username)
214
215 return TemplateResponse(
216 request,
217 "profile_following.html",
218 {
219 "profile": profile,
220 },
221 )
222
223
224 class UserCivis(LoginRequiredMixin, View):
225 """
226 A view that shows list of civis
227 that profile with given username created
228 """
229
230 def get(self, request, username=None):
231 profile = get_object_or_404(Profile, user__username=username)
232 user = profile.user
233 civis = user.civis.all()
234
235 return TemplateResponse(
236 request,
237 "user_civis.html",
238 {"profile": profile, "civis": civis},
239 )
240
241
242 @login_required
243 def expunge_user(request):
244 """
245 Delete User Information
246 """
247
248 user_model = get_user_model()
249 user = get_object_or_404(user_model, username=request.user.username)
250
251 profile = get_object_or_404(Profile, user=user)
252
253 # Expunge personally identifiable data in user
254 expunged_user_data = {
255 "is_active": False,
256 "email": "",
257 "first_name": "",
258 "last_name": "",
259 "username": f"expunged-{ user.id }",
260 }
261 user.__dict__.update(expunged_user_data)
262 user.save()
263
264 # Expunge personally identifiable data in profile
265 expunged_profile_data = {
266 "first_name": "",
267 "last_name": "",
268 "about_me": "",
269 }
270 profile.__dict__.update(expunged_profile_data)
271 profile.save()
272
273 return redirect("/")
274
275
276 class UserIssues(LoginRequiredMixin, View):
277 def get(self, request, username=None):
278 profile = get_object_or_404(Profile, user__username=username)
279 user = profile.user
280 civis = user.civis.all()
281 followers = profile.followers.all()
282
283 return TemplateResponse(
284 request,
285 "user_civis.html",
286 {"profile": profile, "followers": followers, "civis": civis},
287 )
288
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/project/accounts/views.py b/project/accounts/views.py
--- a/project/accounts/views.py
+++ b/project/accounts/views.py
@@ -178,7 +178,6 @@
def get(self, request, username=None):
profile = get_object_or_404(Profile, user__username=username)
-
return TemplateResponse(
request,
"account.html",
@@ -193,7 +192,6 @@
def get(self, request, username=None):
profile = get_object_or_404(Profile, user__username=username)
-
return TemplateResponse(
request,
"user_followers.html",
| {"golden_diff": "diff --git a/project/accounts/views.py b/project/accounts/views.py\n--- a/project/accounts/views.py\n+++ b/project/accounts/views.py\n@@ -178,7 +178,6 @@\n \n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n-\n return TemplateResponse(\n request,\n \"account.html\",\n@@ -193,7 +192,6 @@\n \n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n-\n return TemplateResponse(\n request,\n \"user_followers.html\",\n", "issue": "Reimplement user categories feature\nWe recently removed some features from the user Profile view and now want to reimplement them using only Django.\r\n\r\nThis task will be to add a \"categories\" field to the User Profile (\"settings\") view).\r\n\r\n## Task\r\n\r\nAll of these tasks should be done in the `accounts` app.\r\n\r\n- [ ] determine how to add a multi-select field to a Django form\r\n- [ ] users should be able to select zero or more categories when editing their profile\r\n- [ ] when a user updates their categories, the categories should be listed on the user profile page\r\n\n", "before_files": [{"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UserRegistrationForm\nfrom accounts.models import Profile\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\n\nclass ProfileFollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.add(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass ProfileUnfollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.remove(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = get_user_model().objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):\n user = None\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n\n template_var = {\n \"link\": redirect_link,\n }\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = user.profile\n\n if profile.is_verified:\n template_var[\"title\"] = \"Email Already Verified\"\n template_var[\"content\"] = \"You have already verified your email.\"\n else:\n profile.is_verified = True\n profile.save()\n\n template_var[\"title\"] = \"Email Verification Successful\"\n template_var[\"content\"] = \"Thank you for verifying your email.\"\n else:\n # invalid link\n template_var[\"title\"] = \"Email Verification Error\"\n template_var[\"content\"] = \"Email could not be verified\"\n\n return TemplateResponse(request, \"general_message.html\", template_var)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n \"profile_image\": profile.profile_image or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass UserProfileView(LoginRequiredMixin, View):\n \"\"\"A view that shows profile for authorized users\"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"account.html\",\n {\n \"profile\": profile,\n },\n )\n\n\nclass UserFollowers(LoginRequiredMixin, View):\n \"\"\"A view that shows the followers for authorized users\"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"user_followers.html\",\n {\n \"profile\": profile,\n },\n )\n\n\nclass ProfileFollowing(LoginRequiredMixin, View):\n \"\"\"\n A view that shows list of profiles\n that profile with given username is following\n \"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"profile_following.html\",\n {\n \"profile\": profile,\n },\n )\n\n\nclass UserCivis(LoginRequiredMixin, View):\n \"\"\"\n A view that shows list of civis\n that profile with given username created\n \"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n user = profile.user\n civis = user.civis.all()\n\n return TemplateResponse(\n request,\n \"user_civis.html\",\n {\"profile\": profile, \"civis\": civis},\n )\n\n\n@login_required\ndef expunge_user(request):\n \"\"\"\n Delete User Information\n \"\"\"\n\n user_model = get_user_model()\n user = get_object_or_404(user_model, username=request.user.username)\n\n profile = get_object_or_404(Profile, user=user)\n\n # Expunge personally identifiable data in user\n expunged_user_data = {\n \"is_active\": False,\n \"email\": \"\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"username\": f\"expunged-{ user.id }\",\n }\n user.__dict__.update(expunged_user_data)\n user.save()\n\n # Expunge personally identifiable data in profile\n expunged_profile_data = {\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"about_me\": \"\",\n }\n profile.__dict__.update(expunged_profile_data)\n profile.save()\n\n return redirect(\"/\")\n\n\nclass UserIssues(LoginRequiredMixin, View):\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n user = profile.user\n civis = user.civis.all()\n followers = profile.followers.all()\n\n return TemplateResponse(\n request,\n \"user_civis.html\",\n {\"profile\": profile, \"followers\": followers, \"civis\": civis},\n )\n", "path": "project/accounts/views.py"}], "after_files": [{"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UserRegistrationForm\nfrom accounts.models import Profile\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\n\nclass ProfileFollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.add(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass ProfileUnfollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.remove(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = get_user_model().objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):\n user = None\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n\n template_var = {\n \"link\": redirect_link,\n }\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = user.profile\n\n if profile.is_verified:\n template_var[\"title\"] = \"Email Already Verified\"\n template_var[\"content\"] = \"You have already verified your email.\"\n else:\n profile.is_verified = True\n profile.save()\n\n template_var[\"title\"] = \"Email Verification Successful\"\n template_var[\"content\"] = \"Thank you for verifying your email.\"\n else:\n # invalid link\n template_var[\"title\"] = \"Email Verification Error\"\n template_var[\"content\"] = \"Email could not be verified\"\n\n return TemplateResponse(request, \"general_message.html\", template_var)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n \"profile_image\": profile.profile_image or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass UserProfileView(LoginRequiredMixin, View):\n \"\"\"A view that shows profile for authorized users\"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n return TemplateResponse(\n request,\n \"account.html\",\n {\n \"profile\": profile,\n },\n )\n\n\nclass UserFollowers(LoginRequiredMixin, View):\n \"\"\"A view that shows the followers for authorized users\"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n return TemplateResponse(\n request,\n \"user_followers.html\",\n {\n \"profile\": profile,\n },\n )\n\n\nclass ProfileFollowing(LoginRequiredMixin, View):\n \"\"\"\n A view that shows list of profiles\n that profile with given username is following\n \"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"profile_following.html\",\n {\n \"profile\": profile,\n },\n )\n\n\nclass UserCivis(LoginRequiredMixin, View):\n \"\"\"\n A view that shows list of civis\n that profile with given username created\n \"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n user = profile.user\n civis = user.civis.all()\n\n return TemplateResponse(\n request,\n \"user_civis.html\",\n {\"profile\": profile, \"civis\": civis},\n )\n\n\n@login_required\ndef expunge_user(request):\n \"\"\"\n Delete User Information\n \"\"\"\n\n user_model = get_user_model()\n user = get_object_or_404(user_model, username=request.user.username)\n\n profile = get_object_or_404(Profile, user=user)\n\n # Expunge personally identifiable data in user\n expunged_user_data = {\n \"is_active\": False,\n \"email\": \"\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"username\": f\"expunged-{ user.id }\",\n }\n user.__dict__.update(expunged_user_data)\n user.save()\n\n # Expunge personally identifiable data in profile\n expunged_profile_data = {\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"about_me\": \"\",\n }\n profile.__dict__.update(expunged_profile_data)\n profile.save()\n\n return redirect(\"/\")\n\n\nclass UserIssues(LoginRequiredMixin, View):\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n user = profile.user\n civis = user.civis.all()\n followers = profile.followers.all()\n\n return TemplateResponse(\n request,\n \"user_civis.html\",\n {\"profile\": profile, \"followers\": followers, \"civis\": civis},\n )\n", "path": "project/accounts/views.py"}]} | 2,988 | 142 |
gh_patches_debug_336 | rasdani/github-patches | git_diff | piskvorky__gensim-919 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
import gensim fails since updating to Xcode 7.3
I just updated my version of Xcode to 7.3. When I run `pip install --upgrade gensim` the process completed without any issues. However, when I try `import gensim` within the python shell the terminal barfs a bunch of C++ output with a block of execution errors that begins with:
`Exception: Compilation failed (return status=1): clang: error: unsupported option '-b mi2'. clang: error: unsupported option '-b mi'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-sse4a'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-tbm'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'....`
I think this has something to do with where gensim is looking for its header files, but I'm somewhat at a loss. Any help debugging would be greatly appreciated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gensim/corpora/__init__.py`
Content:
```
1 """
2 This package contains implementations of various streaming corpus I/O format.
3 """
4
5 # bring corpus classes directly into package namespace, to save some typing
6 from .indexedcorpus import IndexedCorpus # must appear before the other classes
7
8 from .mmcorpus import MmCorpus
9 from .bleicorpus import BleiCorpus
10 from .svmlightcorpus import SvmLightCorpus
11 from .lowcorpus import LowCorpus
12 from .dictionary import Dictionary
13 from .hashdictionary import HashDictionary
14 from .wikicorpus import WikiCorpus
15 from .textcorpus import TextCorpus
16 from .ucicorpus import UciCorpus
17 from .malletcorpus import MalletCorpus
18 from .sharded_corpus import ShardedCorpus
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gensim/corpora/__init__.py b/gensim/corpora/__init__.py
--- a/gensim/corpora/__init__.py
+++ b/gensim/corpora/__init__.py
@@ -15,4 +15,3 @@
from .textcorpus import TextCorpus
from .ucicorpus import UciCorpus
from .malletcorpus import MalletCorpus
-from .sharded_corpus import ShardedCorpus
| {"golden_diff": "diff --git a/gensim/corpora/__init__.py b/gensim/corpora/__init__.py\n--- a/gensim/corpora/__init__.py\n+++ b/gensim/corpora/__init__.py\n@@ -15,4 +15,3 @@\n from .textcorpus import TextCorpus\n from .ucicorpus import UciCorpus\n from .malletcorpus import MalletCorpus\n-from .sharded_corpus import ShardedCorpus\n", "issue": "import gensim fails since updating to Xcode 7.3 \nI just updated my version of Xcode to 7.3. When I run `pip install --upgrade gensim` the process completed without any issues. However, when I try `import gensim` within the python shell the terminal barfs a bunch of C++ output with a block of execution errors that begins with: \n\n`Exception: Compilation failed (return status=1): clang: error: unsupported option '-b mi2'. clang: error: unsupported option '-b mi'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-sse4a'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-tbm'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'....`\n\nI think this has something to do with where gensim is looking for its header files, but I'm somewhat at a loss. Any help debugging would be greatly appreciated. \n\n", "before_files": [{"content": "\"\"\"\nThis package contains implementations of various streaming corpus I/O format.\n\"\"\"\n\n# bring corpus classes directly into package namespace, to save some typing\nfrom .indexedcorpus import IndexedCorpus # must appear before the other classes\n\nfrom .mmcorpus import MmCorpus\nfrom .bleicorpus import BleiCorpus\nfrom .svmlightcorpus import SvmLightCorpus\nfrom .lowcorpus import LowCorpus\nfrom .dictionary import Dictionary\nfrom .hashdictionary import HashDictionary\nfrom .wikicorpus import WikiCorpus\nfrom .textcorpus import TextCorpus\nfrom .ucicorpus import UciCorpus\nfrom .malletcorpus import MalletCorpus\nfrom .sharded_corpus import ShardedCorpus\n", "path": "gensim/corpora/__init__.py"}], "after_files": [{"content": "\"\"\"\nThis package contains implementations of various streaming corpus I/O format.\n\"\"\"\n\n# bring corpus classes directly into package namespace, to save some typing\nfrom .indexedcorpus import IndexedCorpus # must appear before the other classes\n\nfrom .mmcorpus import MmCorpus\nfrom .bleicorpus import BleiCorpus\nfrom .svmlightcorpus import SvmLightCorpus\nfrom .lowcorpus import LowCorpus\nfrom .dictionary import Dictionary\nfrom .hashdictionary import HashDictionary\nfrom .wikicorpus import WikiCorpus\nfrom .textcorpus import TextCorpus\nfrom .ucicorpus import UciCorpus\nfrom .malletcorpus import MalletCorpus\n", "path": "gensim/corpora/__init__.py"}]} | 695 | 109 |
gh_patches_debug_23866 | rasdani/github-patches | git_diff | elastic__ecs-1191 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error when using usage-example with generator.py
<!--
GitHub is reserved for bug reports and feature requests; it is not the place
for general questions. If you have a general question, please visit the
discuss forums: https://discuss.elastic.co/tag/ecs-elastic-common-schema.
Please fill in the following details to help us reproduce the bug:
-->
**Description of the problem including expected versus actual behavior**:
Trying `script/generator.py` for the first time, I stumble upon a stack trace throwing `KeyError: 'error'`. I initially though I maybe passed wrong parameters but it looks like example documented in `USAGE.md` also have the same issue.
Thanks for writing this tool, I'm eager to use it since it can greatly help me to generate/maintain configuration for custom Beats indices. Let me know if I can somehow help you further.
**Steps to reproduce**:
1. `git clone https://github.com/elastic/ecs && cd ecs`
2. Running `generator.py` as documented in `USAGE.md`:
```sh
$ python3 scripts/generator.py --ref v1.6.0 \
--subset usage-example/fields/subset.yml \
--include usage-example/fields/custom/ \
--out usage-example/ \
--template-settings usage-example/fields/template-settings.json \
--mapping-settings usage-example/fields/mapping-settings.json
```
**Provide logs (if relevant)**:
```
Loading schemas from git ref v1.6.0
Running generator. ECS version 1.6.0
Loading user defined schemas: ['usage-example/fields/custom/']
/Users/florent/Sources/ecs/scripts/schema/cleaner.py:185: UserWarning: Example value for field `header_flags` contains an object or array which must be quoted to avoid YAML interpretation.
This will cause an exception when running in strict mode.
Warning check:
check_example_value(field, strict=strict_mode)
/Users/florent/Sources/ecs/scripts/schema/cleaner.py:185: UserWarning: Example value for field `resolved_ip` contains an object or array which must be quoted to avoid YAML interpretation.
[...]
Traceback (most recent call last):
File "/Users/florent/Sources/ecs/scripts/generator.py", line 106, in <module>
main()
File "/Users/florent/Sources/ecs/scripts/generator.py", line 60, in main
es_template.generate_legacy(flat, ecs_version, out_dir, args.template_settings, args.mapping_settings)
File "/Users/florent/Sources/ecs/scripts/generators/es_template.py", line 109, in generate_legacy
generate_legacy_template_version(6, ecs_version, mappings_section, out_dir, template_settings_file)
File "/Users/florent/Sources/ecs/scripts/generators/es_template.py", line 115, in generate_legacy_template_version
template = template_settings(es_version, ecs_version, mappings_section, template_settings_file)
File "/Users/florent/Sources/ecs/scripts/generators/es_template.py", line 202, in template_settings
error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']
KeyError: 'error'
```
**Any additional context:**
```
$ git rev-parse HEAD
83ced31c9da6b3e5599b01e319a827a7941e6439
$ python3 --version
Python 3.9.0
$ pip3 list | grep -i -E '(yaml|pep|mock|git|jinja)'
autopep8 1.4.4
gitdb 4.0.5
GitPython 3.1.2
Jinja2 2.11.2
mock 4.0.2
PyYAML 5.3b1
yamllint 1.19.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/generators/es_template.py`
Content:
```
1 import copy
2 import json
3 import sys
4
5 from os.path import join
6
7 from generators import ecs_helpers
8 from schema.cleaner import field_or_multi_field_datatype_defaults
9 from schema.oss import TYPE_FALLBACKS
10
11
12 # Composable Template
13
14 def generate(ecs_nested, ecs_version, out_dir, mapping_settings_file):
15 """This generates all artifacts for the composable template approach"""
16 all_component_templates(ecs_nested, ecs_version, out_dir)
17 component_names = component_name_convention(ecs_version, ecs_nested)
18 save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file)
19
20
21 def save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file):
22 """Generate the master sample composable template"""
23 template = {
24 "index_patterns": ["try-ecs-*"],
25 "composed_of": component_names,
26 "priority": 1, # Very low, as this is a sample template
27 "_meta": {
28 "ecs_version": ecs_version,
29 "description": "Sample composable template that includes all ECS fields"
30 },
31 "template": {
32 "settings": {
33 "index": {
34 "mapping": {
35 "total_fields": {
36 "limit": 2000
37 }
38 }
39 }
40 },
41 "mappings": mapping_settings(mapping_settings_file)
42 }
43 }
44 filename = join(out_dir, "elasticsearch/template.json")
45 save_json(filename, template)
46
47
48 def all_component_templates(ecs_nested, ecs_version, out_dir):
49 """Generate one component template per field set"""
50 component_dir = join(out_dir, 'elasticsearch/component')
51 ecs_helpers.make_dirs(component_dir)
52
53 for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():
54 field_mappings = {}
55 for (flat_name, field) in fieldset['fields'].items():
56 name_parts = flat_name.split('.')
57 dict_add_nested(field_mappings, name_parts, entry_for(field))
58
59 save_component_template(fieldset_name, ecs_version, component_dir, field_mappings)
60
61
62 def save_component_template(template_name, ecs_version, out_dir, field_mappings):
63 filename = join(out_dir, template_name) + ".json"
64 reference_url = "https://www.elastic.co/guide/en/ecs/current/ecs-{}.html".format(template_name)
65
66 template = {
67 'template': {'mappings': {'properties': field_mappings}},
68 '_meta': {
69 'ecs_version': ecs_version,
70 'documentation': reference_url
71 }
72 }
73 save_json(filename, template)
74
75
76 def component_name_convention(ecs_version, ecs_nested):
77 version = ecs_version.replace('+', '-')
78 names = []
79 for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():
80 names.append("ecs_{}_{}".format(version, fieldset_name))
81 return names
82
83
84 def candidate_components(ecs_nested):
85 """Returns same structure as ecs_nested, but skips all field sets with reusable.top_level: False"""
86 components = {}
87 for (fieldset_name, fieldset) in ecs_nested.items():
88 if fieldset.get('reusable', None):
89 if not fieldset['reusable']['top_level']:
90 continue
91 components[fieldset_name] = fieldset
92 return components
93
94
95 # Legacy template
96
97
98 def generate_legacy(ecs_flat, ecs_version, out_dir, template_settings_file, mapping_settings_file):
99 """Generate the legacy index template"""
100 field_mappings = {}
101 for flat_name in sorted(ecs_flat):
102 field = ecs_flat[flat_name]
103 name_parts = flat_name.split('.')
104 dict_add_nested(field_mappings, name_parts, entry_for(field))
105
106 mappings_section = mapping_settings(mapping_settings_file)
107 mappings_section['properties'] = field_mappings
108
109 generate_legacy_template_version(6, ecs_version, mappings_section, out_dir, template_settings_file)
110 generate_legacy_template_version(7, ecs_version, mappings_section, out_dir, template_settings_file)
111
112
113 def generate_legacy_template_version(es_version, ecs_version, mappings_section, out_dir, template_settings_file):
114 ecs_helpers.make_dirs(join(out_dir, 'elasticsearch', str(es_version)))
115 template = template_settings(es_version, ecs_version, mappings_section, template_settings_file)
116
117 filename = join(out_dir, "elasticsearch/{}/template.json".format(es_version))
118 save_json(filename, template)
119
120
121 # Common helpers
122
123
124 def dict_add_nested(dct, name_parts, value):
125 current_nesting = name_parts[0]
126 rest_name_parts = name_parts[1:]
127 if len(rest_name_parts) > 0:
128 dct.setdefault(current_nesting, {})
129 dct[current_nesting].setdefault('properties', {})
130
131 dict_add_nested(
132 dct[current_nesting]['properties'],
133 rest_name_parts,
134 value)
135
136 else:
137 if current_nesting in dct and 'type' in value and 'object' == value['type']:
138 return
139 dct[current_nesting] = value
140
141
142 def entry_for(field):
143 field_entry = {'type': field['type']}
144 try:
145 if field['type'] == 'object' or field['type'] == 'nested':
146 if 'enabled' in field and not field['enabled']:
147 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['enabled'])
148 # the index field is only valid for field types that are not object and nested
149 elif 'index' in field and not field['index']:
150 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values'])
151
152 if field['type'] == 'keyword':
153 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above'])
154 elif field['type'] == 'constant_keyword':
155 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value'])
156 elif field['type'] == 'text':
157 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['norms'])
158 elif field['type'] == 'alias':
159 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['path'])
160 elif field['type'] == 'scaled_float':
161 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['scaling_factor'])
162
163 if 'multi_fields' in field:
164 field_entry['fields'] = {}
165 for mf in field['multi_fields']:
166 mf_type = mf['type']
167 mf_entry = {'type': mf_type}
168 if mf_type == 'keyword':
169 ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['normalizer', 'ignore_above'])
170 elif mf_type == 'text':
171 ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['norms'])
172 field_entry['fields'][mf['name']] = mf_entry
173
174 except KeyError as ex:
175 print("Exception {} occurred for field {}".format(ex, field))
176 raise ex
177 return field_entry
178
179
180 def mapping_settings(mapping_settings_file):
181 if mapping_settings_file:
182 with open(mapping_settings_file) as f:
183 mappings = json.load(f)
184 else:
185 mappings = default_mapping_settings()
186 return mappings
187
188
189 def template_settings(es_version, ecs_version, mappings_section, template_settings_file):
190 if template_settings_file:
191 with open(template_settings_file) as f:
192 template = json.load(f)
193 else:
194 template = default_template_settings(ecs_version)
195
196 if es_version == 6:
197 mappings_section = copy.deepcopy(mappings_section)
198 es6_type_fallback(mappings_section['properties'])
199
200 # error.stack_trace needs special handling to set
201 # index: false and doc_values: false
202 error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']
203 error_stack_trace_mappings.setdefault('index', False)
204 error_stack_trace_mappings.setdefault('doc_values', False)
205
206 template['mappings'] = {'_doc': mappings_section}
207 else:
208 template['mappings'] = mappings_section
209
210 # _meta can't be at template root in legacy templates, so moving back to mappings section
211 mappings_section['_meta'] = template.pop('_meta')
212
213 return template
214
215
216 def save_json(file, data):
217 open_mode = "wb"
218 if sys.version_info >= (3, 0):
219 open_mode = "w"
220 with open(file, open_mode) as jsonfile:
221 jsonfile.write(json.dumps(data, indent=2, sort_keys=True))
222
223
224 def default_template_settings(ecs_version):
225 return {
226 "index_patterns": ["try-ecs-*"],
227 "_meta": {"version": ecs_version},
228 "order": 1,
229 "settings": {
230 "index": {
231 "mapping": {
232 "total_fields": {
233 "limit": 10000
234 }
235 },
236 "refresh_interval": "5s"
237 }
238 }
239 }
240
241
242 def default_mapping_settings():
243 return {
244 "date_detection": False,
245 "dynamic_templates": [
246 {
247 "strings_as_keyword": {
248 "mapping": {
249 "ignore_above": 1024,
250 "type": "keyword"
251 },
252 "match_mapping_type": "string"
253 }
254 }
255 ]
256 }
257
258
259 def es6_type_fallback(mappings):
260 '''
261 Visits each leaf in mappings object and fallback to an
262 Elasticsearch 6.x supported type.
263
264 Since a field like `wildcard` won't have the same defaults as
265 a `keyword` field, we must add any missing defaults.
266 '''
267
268 for (name, details) in mappings.items():
269 if 'type' in details:
270 fallback_type = TYPE_FALLBACKS.get(details['type'])
271 if fallback_type:
272 mappings[name]['type'] = fallback_type
273 field_or_multi_field_datatype_defaults(mappings[name])
274 if 'properties' in details:
275 es6_type_fallback(details['properties'])
276
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/generators/es_template.py b/scripts/generators/es_template.py
--- a/scripts/generators/es_template.py
+++ b/scripts/generators/es_template.py
@@ -198,17 +198,23 @@
es6_type_fallback(mappings_section['properties'])
# error.stack_trace needs special handling to set
- # index: false and doc_values: false
- error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']
- error_stack_trace_mappings.setdefault('index', False)
- error_stack_trace_mappings.setdefault('doc_values', False)
+ # index: false and doc_values: false if the field
+ # is present in the mappings
+ try:
+ error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']
+ error_stack_trace_mappings.setdefault('index', False)
+ error_stack_trace_mappings.setdefault('doc_values', False)
+ except KeyError:
+ pass
template['mappings'] = {'_doc': mappings_section}
else:
template['mappings'] = mappings_section
# _meta can't be at template root in legacy templates, so moving back to mappings section
- mappings_section['_meta'] = template.pop('_meta')
+ # if present
+ if '_meta' in template:
+ mappings_section['_meta'] = template.pop('_meta')
return template
| {"golden_diff": "diff --git a/scripts/generators/es_template.py b/scripts/generators/es_template.py\n--- a/scripts/generators/es_template.py\n+++ b/scripts/generators/es_template.py\n@@ -198,17 +198,23 @@\n es6_type_fallback(mappings_section['properties'])\n \n # error.stack_trace needs special handling to set\n- # index: false and doc_values: false\n- error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']\n- error_stack_trace_mappings.setdefault('index', False)\n- error_stack_trace_mappings.setdefault('doc_values', False)\n+ # index: false and doc_values: false if the field\n+ # is present in the mappings\n+ try:\n+ error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']\n+ error_stack_trace_mappings.setdefault('index', False)\n+ error_stack_trace_mappings.setdefault('doc_values', False)\n+ except KeyError:\n+ pass\n \n template['mappings'] = {'_doc': mappings_section}\n else:\n template['mappings'] = mappings_section\n \n # _meta can't be at template root in legacy templates, so moving back to mappings section\n- mappings_section['_meta'] = template.pop('_meta')\n+ # if present\n+ if '_meta' in template:\n+ mappings_section['_meta'] = template.pop('_meta')\n \n return template\n", "issue": "Error when using usage-example with generator.py\n<!--\r\nGitHub is reserved for bug reports and feature requests; it is not the place\r\nfor general questions. If you have a general question, please visit the\r\ndiscuss forums: https://discuss.elastic.co/tag/ecs-elastic-common-schema.\r\n\r\nPlease fill in the following details to help us reproduce the bug:\r\n-->\r\n\r\n**Description of the problem including expected versus actual behavior**:\r\n\r\nTrying `script/generator.py` for the first time, I stumble upon a stack trace throwing `KeyError: 'error'`. I initially though I maybe passed wrong parameters but it looks like example documented in `USAGE.md` also have the same issue.\r\n\r\nThanks for writing this tool, I'm eager to use it since it can greatly help me to generate/maintain configuration for custom Beats indices. Let me know if I can somehow help you further.\r\n\r\n**Steps to reproduce**:\r\n\r\n1. `git clone https://github.com/elastic/ecs && cd ecs`\r\n2. Running `generator.py` as documented in `USAGE.md`:\r\n```sh\r\n$ python3 scripts/generator.py --ref v1.6.0 \\\r\n --subset usage-example/fields/subset.yml \\\r\n --include usage-example/fields/custom/ \\\r\n --out usage-example/ \\\r\n --template-settings usage-example/fields/template-settings.json \\\r\n --mapping-settings usage-example/fields/mapping-settings.json\r\n```\r\n\r\n**Provide logs (if relevant)**:\r\n\r\n```\r\nLoading schemas from git ref v1.6.0\r\nRunning generator. ECS version 1.6.0\r\nLoading user defined schemas: ['usage-example/fields/custom/']\r\n/Users/florent/Sources/ecs/scripts/schema/cleaner.py:185: UserWarning: Example value for field `header_flags` contains an object or array which must be quoted to avoid YAML interpretation.\r\n\r\nThis will cause an exception when running in strict mode.\r\nWarning check:\r\n check_example_value(field, strict=strict_mode)\r\n/Users/florent/Sources/ecs/scripts/schema/cleaner.py:185: UserWarning: Example value for field `resolved_ip` contains an object or array which must be quoted to avoid YAML interpretation.\r\n[...]\r\nTraceback (most recent call last):\r\n File \"/Users/florent/Sources/ecs/scripts/generator.py\", line 106, in <module>\r\n main()\r\n File \"/Users/florent/Sources/ecs/scripts/generator.py\", line 60, in main\r\n es_template.generate_legacy(flat, ecs_version, out_dir, args.template_settings, args.mapping_settings)\r\n File \"/Users/florent/Sources/ecs/scripts/generators/es_template.py\", line 109, in generate_legacy\r\n generate_legacy_template_version(6, ecs_version, mappings_section, out_dir, template_settings_file)\r\n File \"/Users/florent/Sources/ecs/scripts/generators/es_template.py\", line 115, in generate_legacy_template_version\r\n template = template_settings(es_version, ecs_version, mappings_section, template_settings_file)\r\n File \"/Users/florent/Sources/ecs/scripts/generators/es_template.py\", line 202, in template_settings\r\n error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']\r\nKeyError: 'error'\r\n```\r\n\r\n**Any additional context:**\r\n\r\n```\r\n$ git rev-parse HEAD\r\n83ced31c9da6b3e5599b01e319a827a7941e6439\r\n\r\n$ python3 --version\r\nPython 3.9.0\r\n\r\n$ pip3 list | grep -i -E '(yaml|pep|mock|git|jinja)'\r\nautopep8 1.4.4\r\ngitdb 4.0.5\r\nGitPython 3.1.2\r\nJinja2 2.11.2\r\nmock 4.0.2\r\nPyYAML 5.3b1\r\nyamllint 1.19.0\r\n```\n", "before_files": [{"content": "import copy\nimport json\nimport sys\n\nfrom os.path import join\n\nfrom generators import ecs_helpers\nfrom schema.cleaner import field_or_multi_field_datatype_defaults\nfrom schema.oss import TYPE_FALLBACKS\n\n\n# Composable Template\n\ndef generate(ecs_nested, ecs_version, out_dir, mapping_settings_file):\n \"\"\"This generates all artifacts for the composable template approach\"\"\"\n all_component_templates(ecs_nested, ecs_version, out_dir)\n component_names = component_name_convention(ecs_version, ecs_nested)\n save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file)\n\n\ndef save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file):\n \"\"\"Generate the master sample composable template\"\"\"\n template = {\n \"index_patterns\": [\"try-ecs-*\"],\n \"composed_of\": component_names,\n \"priority\": 1, # Very low, as this is a sample template\n \"_meta\": {\n \"ecs_version\": ecs_version,\n \"description\": \"Sample composable template that includes all ECS fields\"\n },\n \"template\": {\n \"settings\": {\n \"index\": {\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 2000\n }\n }\n }\n },\n \"mappings\": mapping_settings(mapping_settings_file)\n }\n }\n filename = join(out_dir, \"elasticsearch/template.json\")\n save_json(filename, template)\n\n\ndef all_component_templates(ecs_nested, ecs_version, out_dir):\n \"\"\"Generate one component template per field set\"\"\"\n component_dir = join(out_dir, 'elasticsearch/component')\n ecs_helpers.make_dirs(component_dir)\n\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n field_mappings = {}\n for (flat_name, field) in fieldset['fields'].items():\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n save_component_template(fieldset_name, ecs_version, component_dir, field_mappings)\n\n\ndef save_component_template(template_name, ecs_version, out_dir, field_mappings):\n filename = join(out_dir, template_name) + \".json\"\n reference_url = \"https://www.elastic.co/guide/en/ecs/current/ecs-{}.html\".format(template_name)\n\n template = {\n 'template': {'mappings': {'properties': field_mappings}},\n '_meta': {\n 'ecs_version': ecs_version,\n 'documentation': reference_url\n }\n }\n save_json(filename, template)\n\n\ndef component_name_convention(ecs_version, ecs_nested):\n version = ecs_version.replace('+', '-')\n names = []\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n names.append(\"ecs_{}_{}\".format(version, fieldset_name))\n return names\n\n\ndef candidate_components(ecs_nested):\n \"\"\"Returns same structure as ecs_nested, but skips all field sets with reusable.top_level: False\"\"\"\n components = {}\n for (fieldset_name, fieldset) in ecs_nested.items():\n if fieldset.get('reusable', None):\n if not fieldset['reusable']['top_level']:\n continue\n components[fieldset_name] = fieldset\n return components\n\n\n# Legacy template\n\n\ndef generate_legacy(ecs_flat, ecs_version, out_dir, template_settings_file, mapping_settings_file):\n \"\"\"Generate the legacy index template\"\"\"\n field_mappings = {}\n for flat_name in sorted(ecs_flat):\n field = ecs_flat[flat_name]\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n mappings_section = mapping_settings(mapping_settings_file)\n mappings_section['properties'] = field_mappings\n\n generate_legacy_template_version(6, ecs_version, mappings_section, out_dir, template_settings_file)\n generate_legacy_template_version(7, ecs_version, mappings_section, out_dir, template_settings_file)\n\n\ndef generate_legacy_template_version(es_version, ecs_version, mappings_section, out_dir, template_settings_file):\n ecs_helpers.make_dirs(join(out_dir, 'elasticsearch', str(es_version)))\n template = template_settings(es_version, ecs_version, mappings_section, template_settings_file)\n\n filename = join(out_dir, \"elasticsearch/{}/template.json\".format(es_version))\n save_json(filename, template)\n\n\n# Common helpers\n\n\ndef dict_add_nested(dct, name_parts, value):\n current_nesting = name_parts[0]\n rest_name_parts = name_parts[1:]\n if len(rest_name_parts) > 0:\n dct.setdefault(current_nesting, {})\n dct[current_nesting].setdefault('properties', {})\n\n dict_add_nested(\n dct[current_nesting]['properties'],\n rest_name_parts,\n value)\n\n else:\n if current_nesting in dct and 'type' in value and 'object' == value['type']:\n return\n dct[current_nesting] = value\n\n\ndef entry_for(field):\n field_entry = {'type': field['type']}\n try:\n if field['type'] == 'object' or field['type'] == 'nested':\n if 'enabled' in field and not field['enabled']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['enabled'])\n # the index field is only valid for field types that are not object and nested\n elif 'index' in field and not field['index']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values'])\n\n if field['type'] == 'keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above'])\n elif field['type'] == 'constant_keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value'])\n elif field['type'] == 'text':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['norms'])\n elif field['type'] == 'alias':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['path'])\n elif field['type'] == 'scaled_float':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['scaling_factor'])\n\n if 'multi_fields' in field:\n field_entry['fields'] = {}\n for mf in field['multi_fields']:\n mf_type = mf['type']\n mf_entry = {'type': mf_type}\n if mf_type == 'keyword':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['normalizer', 'ignore_above'])\n elif mf_type == 'text':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['norms'])\n field_entry['fields'][mf['name']] = mf_entry\n\n except KeyError as ex:\n print(\"Exception {} occurred for field {}\".format(ex, field))\n raise ex\n return field_entry\n\n\ndef mapping_settings(mapping_settings_file):\n if mapping_settings_file:\n with open(mapping_settings_file) as f:\n mappings = json.load(f)\n else:\n mappings = default_mapping_settings()\n return mappings\n\n\ndef template_settings(es_version, ecs_version, mappings_section, template_settings_file):\n if template_settings_file:\n with open(template_settings_file) as f:\n template = json.load(f)\n else:\n template = default_template_settings(ecs_version)\n\n if es_version == 6:\n mappings_section = copy.deepcopy(mappings_section)\n es6_type_fallback(mappings_section['properties'])\n\n # error.stack_trace needs special handling to set\n # index: false and doc_values: false\n error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']\n error_stack_trace_mappings.setdefault('index', False)\n error_stack_trace_mappings.setdefault('doc_values', False)\n\n template['mappings'] = {'_doc': mappings_section}\n else:\n template['mappings'] = mappings_section\n\n # _meta can't be at template root in legacy templates, so moving back to mappings section\n mappings_section['_meta'] = template.pop('_meta')\n\n return template\n\n\ndef save_json(file, data):\n open_mode = \"wb\"\n if sys.version_info >= (3, 0):\n open_mode = \"w\"\n with open(file, open_mode) as jsonfile:\n jsonfile.write(json.dumps(data, indent=2, sort_keys=True))\n\n\ndef default_template_settings(ecs_version):\n return {\n \"index_patterns\": [\"try-ecs-*\"],\n \"_meta\": {\"version\": ecs_version},\n \"order\": 1,\n \"settings\": {\n \"index\": {\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 10000\n }\n },\n \"refresh_interval\": \"5s\"\n }\n }\n }\n\n\ndef default_mapping_settings():\n return {\n \"date_detection\": False,\n \"dynamic_templates\": [\n {\n \"strings_as_keyword\": {\n \"mapping\": {\n \"ignore_above\": 1024,\n \"type\": \"keyword\"\n },\n \"match_mapping_type\": \"string\"\n }\n }\n ]\n }\n\n\ndef es6_type_fallback(mappings):\n '''\n Visits each leaf in mappings object and fallback to an\n Elasticsearch 6.x supported type.\n\n Since a field like `wildcard` won't have the same defaults as\n a `keyword` field, we must add any missing defaults.\n '''\n\n for (name, details) in mappings.items():\n if 'type' in details:\n fallback_type = TYPE_FALLBACKS.get(details['type'])\n if fallback_type:\n mappings[name]['type'] = fallback_type\n field_or_multi_field_datatype_defaults(mappings[name])\n if 'properties' in details:\n es6_type_fallback(details['properties'])\n", "path": "scripts/generators/es_template.py"}], "after_files": [{"content": "import copy\nimport json\nimport sys\n\nfrom os.path import join\n\nfrom generators import ecs_helpers\nfrom schema.cleaner import field_or_multi_field_datatype_defaults\nfrom schema.oss import TYPE_FALLBACKS\n\n\n# Composable Template\n\ndef generate(ecs_nested, ecs_version, out_dir, mapping_settings_file):\n \"\"\"This generates all artifacts for the composable template approach\"\"\"\n all_component_templates(ecs_nested, ecs_version, out_dir)\n component_names = component_name_convention(ecs_version, ecs_nested)\n save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file)\n\n\ndef save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file):\n \"\"\"Generate the master sample composable template\"\"\"\n template = {\n \"index_patterns\": [\"try-ecs-*\"],\n \"composed_of\": component_names,\n \"priority\": 1, # Very low, as this is a sample template\n \"_meta\": {\n \"ecs_version\": ecs_version,\n \"description\": \"Sample composable template that includes all ECS fields\"\n },\n \"template\": {\n \"settings\": {\n \"index\": {\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 2000\n }\n }\n }\n },\n \"mappings\": mapping_settings(mapping_settings_file)\n }\n }\n filename = join(out_dir, \"elasticsearch/template.json\")\n save_json(filename, template)\n\n\ndef all_component_templates(ecs_nested, ecs_version, out_dir):\n \"\"\"Generate one component template per field set\"\"\"\n component_dir = join(out_dir, 'elasticsearch/component')\n ecs_helpers.make_dirs(component_dir)\n\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n field_mappings = {}\n for (flat_name, field) in fieldset['fields'].items():\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n save_component_template(fieldset_name, ecs_version, component_dir, field_mappings)\n\n\ndef save_component_template(template_name, ecs_version, out_dir, field_mappings):\n filename = join(out_dir, template_name) + \".json\"\n reference_url = \"https://www.elastic.co/guide/en/ecs/current/ecs-{}.html\".format(template_name)\n\n template = {\n 'template': {'mappings': {'properties': field_mappings}},\n '_meta': {\n 'ecs_version': ecs_version,\n 'documentation': reference_url\n }\n }\n save_json(filename, template)\n\n\ndef component_name_convention(ecs_version, ecs_nested):\n version = ecs_version.replace('+', '-')\n names = []\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n names.append(\"ecs_{}_{}\".format(version, fieldset_name))\n return names\n\n\ndef candidate_components(ecs_nested):\n \"\"\"Returns same structure as ecs_nested, but skips all field sets with reusable.top_level: False\"\"\"\n components = {}\n for (fieldset_name, fieldset) in ecs_nested.items():\n if fieldset.get('reusable', None):\n if not fieldset['reusable']['top_level']:\n continue\n components[fieldset_name] = fieldset\n return components\n\n\n# Legacy template\n\n\ndef generate_legacy(ecs_flat, ecs_version, out_dir, template_settings_file, mapping_settings_file):\n \"\"\"Generate the legacy index template\"\"\"\n field_mappings = {}\n for flat_name in sorted(ecs_flat):\n field = ecs_flat[flat_name]\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n mappings_section = mapping_settings(mapping_settings_file)\n mappings_section['properties'] = field_mappings\n\n generate_legacy_template_version(6, ecs_version, mappings_section, out_dir, template_settings_file)\n generate_legacy_template_version(7, ecs_version, mappings_section, out_dir, template_settings_file)\n\n\ndef generate_legacy_template_version(es_version, ecs_version, mappings_section, out_dir, template_settings_file):\n ecs_helpers.make_dirs(join(out_dir, 'elasticsearch', str(es_version)))\n template = template_settings(es_version, ecs_version, mappings_section, template_settings_file)\n\n filename = join(out_dir, \"elasticsearch/{}/template.json\".format(es_version))\n save_json(filename, template)\n\n\n# Common helpers\n\n\ndef dict_add_nested(dct, name_parts, value):\n current_nesting = name_parts[0]\n rest_name_parts = name_parts[1:]\n if len(rest_name_parts) > 0:\n dct.setdefault(current_nesting, {})\n dct[current_nesting].setdefault('properties', {})\n\n dict_add_nested(\n dct[current_nesting]['properties'],\n rest_name_parts,\n value)\n\n else:\n if current_nesting in dct and 'type' in value and 'object' == value['type']:\n return\n dct[current_nesting] = value\n\n\ndef entry_for(field):\n field_entry = {'type': field['type']}\n try:\n if field['type'] == 'object' or field['type'] == 'nested':\n if 'enabled' in field and not field['enabled']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['enabled'])\n # the index field is only valid for field types that are not object and nested\n elif 'index' in field and not field['index']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values'])\n\n if field['type'] == 'keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above'])\n elif field['type'] == 'constant_keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value'])\n elif field['type'] == 'text':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['norms'])\n elif field['type'] == 'alias':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['path'])\n elif field['type'] == 'scaled_float':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['scaling_factor'])\n\n if 'multi_fields' in field:\n field_entry['fields'] = {}\n for mf in field['multi_fields']:\n mf_type = mf['type']\n mf_entry = {'type': mf_type}\n if mf_type == 'keyword':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['normalizer', 'ignore_above'])\n elif mf_type == 'text':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['norms'])\n field_entry['fields'][mf['name']] = mf_entry\n\n except KeyError as ex:\n print(\"Exception {} occurred for field {}\".format(ex, field))\n raise ex\n return field_entry\n\n\ndef mapping_settings(mapping_settings_file):\n if mapping_settings_file:\n with open(mapping_settings_file) as f:\n mappings = json.load(f)\n else:\n mappings = default_mapping_settings()\n return mappings\n\n\ndef template_settings(es_version, ecs_version, mappings_section, template_settings_file):\n if template_settings_file:\n with open(template_settings_file) as f:\n template = json.load(f)\n else:\n template = default_template_settings(ecs_version)\n\n if es_version == 6:\n mappings_section = copy.deepcopy(mappings_section)\n es6_type_fallback(mappings_section['properties'])\n\n # error.stack_trace needs special handling to set\n # index: false and doc_values: false if the field\n # is present in the mappings\n try:\n error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']\n error_stack_trace_mappings.setdefault('index', False)\n error_stack_trace_mappings.setdefault('doc_values', False)\n except KeyError:\n pass\n\n template['mappings'] = {'_doc': mappings_section}\n else:\n template['mappings'] = mappings_section\n\n # _meta can't be at template root in legacy templates, so moving back to mappings section\n # if present\n if '_meta' in template:\n mappings_section['_meta'] = template.pop('_meta')\n\n return template\n\n\ndef save_json(file, data):\n open_mode = \"wb\"\n if sys.version_info >= (3, 0):\n open_mode = \"w\"\n with open(file, open_mode) as jsonfile:\n jsonfile.write(json.dumps(data, indent=2, sort_keys=True))\n\n\ndef default_template_settings(ecs_version):\n return {\n \"index_patterns\": [\"try-ecs-*\"],\n \"_meta\": {\"version\": ecs_version},\n \"order\": 1,\n \"settings\": {\n \"index\": {\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 10000\n }\n },\n \"refresh_interval\": \"5s\"\n }\n }\n }\n\n\ndef default_mapping_settings():\n return {\n \"date_detection\": False,\n \"dynamic_templates\": [\n {\n \"strings_as_keyword\": {\n \"mapping\": {\n \"ignore_above\": 1024,\n \"type\": \"keyword\"\n },\n \"match_mapping_type\": \"string\"\n }\n }\n ]\n }\n\n\ndef es6_type_fallback(mappings):\n '''\n Visits each leaf in mappings object and fallback to an\n Elasticsearch 6.x supported type.\n\n Since a field like `wildcard` won't have the same defaults as\n a `keyword` field, we must add any missing defaults.\n '''\n\n for (name, details) in mappings.items():\n if 'type' in details:\n fallback_type = TYPE_FALLBACKS.get(details['type'])\n if fallback_type:\n mappings[name]['type'] = fallback_type\n field_or_multi_field_datatype_defaults(mappings[name])\n if 'properties' in details:\n es6_type_fallback(details['properties'])\n", "path": "scripts/generators/es_template.py"}]} | 3,938 | 311 |
gh_patches_debug_1358 | rasdani/github-patches | git_diff | mirumee__ariadne-270 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade to GraphQL-core v3
I'm getting the following deprecation warning. Is this something that is already on your radar / that you are planning to resolve for the next release?
>**DeprecationWarning**: GraphQL-core-next has been discontinued. It is now released as GraphQL-core v3 and newer.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2 import os
3 from setuptools import setup
4
5 CLASSIFIERS = [
6 "Development Status :: 4 - Beta",
7 "Intended Audience :: Developers",
8 "License :: OSI Approved :: BSD License",
9 "Operating System :: OS Independent",
10 "Programming Language :: Python",
11 "Programming Language :: Python :: 3.6",
12 "Programming Language :: Python :: 3.7",
13 "Programming Language :: Python :: 3.8",
14 "Topic :: Software Development :: Libraries :: Python Modules",
15 ]
16
17 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
18 with open(README_PATH, "r") as f:
19 README = f.read()
20
21 setup(
22 name="ariadne",
23 author="Mirumee Software",
24 author_email="[email protected]",
25 description="Ariadne is a Python library for implementing GraphQL servers.",
26 long_description=README,
27 long_description_content_type="text/markdown",
28 license="BSD",
29 version="0.8.0",
30 url="https://github.com/mirumee/ariadne",
31 packages=["ariadne"],
32 include_package_data=True,
33 install_requires=[
34 "graphql-core-next<3.0.0",
35 "starlette<0.14",
36 "typing_extensions>=3.6.0",
37 ],
38 extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
39 classifiers=CLASSIFIERS,
40 platforms=["any"],
41 zip_safe=False,
42 )
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -31,7 +31,7 @@
packages=["ariadne"],
include_package_data=True,
install_requires=[
- "graphql-core-next<3.0.0",
+ "graphql-core>=3.0.0",
"starlette<0.14",
"typing_extensions>=3.6.0",
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,7 +31,7 @@\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n- \"graphql-core-next<3.0.0\",\n+ \"graphql-core>=3.0.0\",\n \"starlette<0.14\",\n \"typing_extensions>=3.6.0\",\n ],\n", "issue": "Upgrade to GraphQL-core v3\nI'm getting the following deprecation warning. Is this something that is already on your radar / that you are planning to resolve for the next release?\r\n\r\n>**DeprecationWarning**: GraphQL-core-next has been discontinued. It is now released as GraphQL-core v3 and newer.\n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.8.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core-next<3.0.0\",\n \"starlette<0.14\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.8.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.0.0\",\n \"starlette<0.14\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 732 | 97 |
gh_patches_debug_39045 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-580 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Event callback should not check the existence of master pod
Currently, there's error log when event callback checks the pod existence like this `Unknown pod name: elasticdl-master-test-mnist`. This is because `elasticdl-master-test-mnist` is the master pod that's created by users instead of being created by `WorkerManager`. We should fix this since it should only check pod existence for worker pod.
Complete log:
```
2019-06-06 17:38:37,952 elasticdl.python.elasticdl.master.k8s_worker_manager INFO [k8s_worker_manager.py:121] Got event ADDED, phase Running for pod: elasticdl-master-test-mnist
2019-06-06 17:38:37,953 elasticdl.python.elasticdl.master.k8s_worker_manager INFO [k8s_worker_manager.py:71] Starting worker: 1
2019-06-06 17:38:37,953 elasticdl.python.elasticdl.master.k8s_worker_manager ERROR [k8s_worker_manager.py:127] Unknown pod name: elasticdl-master-test-mnist
2019-06-06 17:38:37,954 elasticdl.python.elasticdl.master.k8s_client INFO [k8s_client.py:105] Creating worker: 1
2019-06-06 17:38:37,955 elasticdl.python.elasticdl.master.k8s_worker_manager INFO [k8s_worker_manager.py:121] Got event ADDED, phase Pending for pod: elasticdl-worker-test-mnist-0
```
Event callback should not check the existence of master pod
Currently, there's error log when event callback checks the pod existence like this `Unknown pod name: elasticdl-master-test-mnist`. This is because `elasticdl-master-test-mnist` is the master pod that's created by users instead of being created by `WorkerManager`. We should fix this since it should only check pod existence for worker pod.
Complete log:
```
2019-06-06 17:38:37,952 elasticdl.python.elasticdl.master.k8s_worker_manager INFO [k8s_worker_manager.py:121] Got event ADDED, phase Running for pod: elasticdl-master-test-mnist
2019-06-06 17:38:37,953 elasticdl.python.elasticdl.master.k8s_worker_manager INFO [k8s_worker_manager.py:71] Starting worker: 1
2019-06-06 17:38:37,953 elasticdl.python.elasticdl.master.k8s_worker_manager ERROR [k8s_worker_manager.py:127] Unknown pod name: elasticdl-master-test-mnist
2019-06-06 17:38:37,954 elasticdl.python.elasticdl.master.k8s_client INFO [k8s_client.py:105] Creating worker: 1
2019-06-06 17:38:37,955 elasticdl.python.elasticdl.master.k8s_worker_manager INFO [k8s_worker_manager.py:121] Got event ADDED, phase Pending for pod: elasticdl-worker-test-mnist-0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/elasticdl/master/k8s_worker_manager.py`
Content:
```
1 import itertools
2 import logging
3 import threading
4
5 from collections import Counter
6 from elasticdl.python.elasticdl.master import k8s_client as k8s
7
8
9 class WorkerManager(object):
10 def __init__(
11 self,
12 task_q,
13 command,
14 args,
15 num_worker=1,
16 cpu_request="1000m",
17 cpu_limit="1000m",
18 memory_request="4096Mi",
19 memory_limit="4096Mi",
20 pod_priority=None,
21 mount_path=None,
22 volume_name=None,
23 image_pull_policy=None,
24 restart_policy="OnFailure",
25 **kwargs):
26 self._logger = logging.getLogger(__name__)
27 self._command = command
28 self._args = args
29 self._num_worker = num_worker
30 self._resource_requests = {
31 "cpu": cpu_request,
32 "memory": memory_request
33 }
34 self._resource_limits = {
35 "cpu": cpu_limit,
36 "memory": memory_limit
37 }
38 self._restart_policy = restart_policy
39 self._pod_priority = pod_priority
40 self._mount_path = mount_path
41 self._volume_name = volume_name
42 self._image_pull_policy = image_pull_policy
43 self._task_q = task_q
44 self._next_worker_id = itertools.count().__next__
45
46 # protects followed variables, which are accessed from event_cb.
47 self._lock = threading.Lock()
48 # worker id to (pod name, phase) mapping
49 # phase: None/Pending/Running/Succeeded/Failed/Unknown
50 # None: worker was just launched, haven't received event yet.
51 # Pending: worker pod not started yet
52 # Running: worker pod is running
53 # Succeeded: worker pod finishes all tasks and terminates with
54 # no issue.
55 # Failed: worker pod is killed for some reason
56 # Unknown: unknown
57 self._pods_phase = {}
58 # pod name to worker id mapping
59 self._pod_name_to_id = {}
60
61 self._relaunch_deleted_live_worker = True
62
63 self._k8s_client = k8s.Client(
64 event_callback=self._event_cb, **kwargs
65 )
66
67 def set_relaunch_deleted_live_worker(self, val):
68 self._relaunch_deleted_live_worker = bool(val)
69
70 def _start_worker(self, worker_id):
71 self._logger.info("Starting worker: %d" % worker_id)
72 with self._lock:
73 pod = self._k8s_client.create_worker(
74 worker_id,
75 self._resource_requests,
76 self._resource_limits,
77 self._pod_priority,
78 self._mount_path,
79 self._volume_name,
80 self._image_pull_policy,
81 command=self._command,
82 args=self._args + ["--worker_id", str(worker_id)],
83 restart_policy=self._restart_policy,
84 )
85 name = pod.metadata.name
86 self._pod_name_to_id[name] = worker_id
87 self._pods_phase[worker_id] = (name, None)
88
89 def start_workers(self):
90 for i in range(self._num_worker):
91 self._start_worker(self._next_worker_id())
92
93 def _remove_worker(self, worker_id):
94 with self._lock:
95 if worker_id not in self._pods_phase:
96 self._logger.error("Unknown worker id: %s" % worker_id)
97 return
98
99 # TODO: change _k8s_client to accept pod name instead of worker id.
100 self._k8s_client.delete_worker(worker_id)
101
102 def stop_relaunch_and_remove_workers(self):
103 with self._lock:
104 self._relaunch_deleted_live_worker = False
105 for worker_id in self._pods_phase:
106 self._k8s_client.delete_worker(worker_id)
107
108 def get_counters(self):
109 with self._lock:
110 return Counter([v for _, v in self._pods_phase.values()])
111
112 def _event_cb(self, event):
113 evt_obj = event.get("object")
114 evt_type = event.get("type")
115 if not evt_obj or not evt_type:
116 self._logger.error("Event doesn't have object or type: %s" % event)
117 return
118
119 pod_name = evt_obj.metadata.name
120 phase = evt_obj.status.phase
121 self._logger.info("Got event %s, phase %s for pod: %s" % (evt_type, phase, pod_name))
122
123 relaunch = False
124 with self._lock:
125 worker_id = self._pod_name_to_id.get(pod_name)
126 if worker_id is None:
127 self._logger.error("Unknown pod name: %s" % pod_name)
128 return
129
130 self._pods_phase[worker_id] = (pod_name, phase)
131 if evt_type == "DELETED":
132 del self._pods_phase[worker_id]
133 del self._pod_name_to_id[pod_name]
134 self._task_q.recover_tasks(worker_id)
135
136 # If the pod being deleted was not "Succeeded", relaunch a worker.
137 relaunch = self._relaunch_deleted_live_worker and phase != "Succeeded"
138 if relaunch:
139 self._logger.info("Relaunching worker.")
140 self._start_worker(self._next_worker_id())
141
```
Path: `elasticdl/python/elasticdl/master/k8s_client.py`
Content:
```
1 import logging
2 import os
3 import threading
4 import traceback
5
6 from kubernetes import client, config, watch
7
8
9 class Client(object):
10 def __init__(
11 self, *, worker_image, namespace, job_name, event_callback
12 ):
13 """
14 ElasticDL k8s client.
15
16 Args:
17 worker_image: Docker image path for ElasticDL workers.
18 namespace: k8s namespace for ElasticDL pods.
19 job_name: ElasticDL job name, should be unique in the namespace.
20 Used as worker pod name prefix and value for "elasticdl" label.
21 event_callback: If not None, an event watcher will be created and
22 events passed to the callback.
23 """
24 if os.getenv("KUBERNETES_SERVICE_HOST"):
25 # We are running inside k8s
26 config.load_incluster_config()
27 else:
28 # Use user's kube config
29 config.load_kube_config()
30
31 self._v1 = client.CoreV1Api()
32 self._logger = logging.getLogger(__name__)
33 self._image = worker_image
34 self._ns = namespace
35 self._job_name = job_name
36 self._event_cb = event_callback
37 if self._event_cb:
38 threading.Thread(
39 target=self._watch, name="event_watcher", daemon=True
40 ).start()
41
42 def _watch(self):
43 stream = watch.Watch().stream(
44 self._v1.list_namespaced_pod,
45 self._ns,
46 label_selector="elasticdl_job_name=" + self._job_name,
47 )
48 for event in stream:
49 try:
50 self._event_cb(event)
51 except Exception:
52 traceback.print_exc()
53
54 def get_pod_name(self, worker_id):
55 return "elasticdl-worker-" + self._job_name + "-" + str(worker_id)
56
57 def _create_worker_pod(self, worker_id, resource_requests, resource_limits, priority,
58 mount_path, volume_name, image_pull_policy, command, args, restart_policy):
59 # Worker container config
60 container = client.V1Container(
61 name=self.get_pod_name(worker_id),
62 image=self._image,
63 command=command,
64 resources=client.V1ResourceRequirements(
65 requests=resource_requests,
66 limits=resource_limits
67 ),
68 image_pull_policy=image_pull_policy,
69 args=args
70 )
71
72 # Pod
73 spec = client.V1PodSpec(
74 containers=[container],
75 restart_policy=restart_policy,
76 )
77
78 # Mount data path
79 if mount_path is not None and volume_name is not None:
80 volume = client.V1Volume(
81 name='data-volume',
82 persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
83 claim_name="fileserver-claim", read_only=False))
84 spec.volumes = [volume]
85 container.volume_mounts = [client.V1VolumeMount(name=volume_name, mount_path=mount_path)]
86
87 if priority is not None:
88 spec.priority_class_name = priority
89
90 pod = client.V1Pod(
91 spec=spec,
92 metadata=client.V1ObjectMeta(
93 name=self.get_pod_name(worker_id),
94 labels={
95 "app": "elasticdl",
96 "elasticdl_job_name": self._job_name
97 },
98 ),
99 )
100 return pod
101
102 def create_worker(self, worker_id, resource_requests, resource_limits, priority=None,
103 mount_path=None, volume_name=None, image_pull_policy=None,
104 command=None, args=None, restart_policy="OnFailure"):
105 self._logger.info("Creating worker: " + str(worker_id))
106 pod = self._create_worker_pod(
107 worker_id, resource_requests, resource_limits, priority,
108 mount_path, volume_name, image_pull_policy, command=command,
109 args=args, restart_policy=restart_policy)
110 return self._v1.create_namespaced_pod(self._ns, pod)
111
112 def delete_worker(self, worker_id):
113 self._logger.info("Deleting worker: " + str(worker_id))
114 self._v1.delete_namespaced_pod(
115 self.get_pod_name(worker_id),
116 self._ns,
117 body=client.V1DeleteOptions(grace_period_seconds=0),
118 )
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/elasticdl/master/k8s_client.py b/elasticdl/python/elasticdl/master/k8s_client.py
--- a/elasticdl/python/elasticdl/master/k8s_client.py
+++ b/elasticdl/python/elasticdl/master/k8s_client.py
@@ -5,6 +5,8 @@
from kubernetes import client, config, watch
+WORKER_POD_NAME_PREFIX = "elasticdl-worker-"
+
class Client(object):
def __init__(
@@ -51,14 +53,14 @@
except Exception:
traceback.print_exc()
- def get_pod_name(self, worker_id):
- return "elasticdl-worker-" + self._job_name + "-" + str(worker_id)
+ def get_worker_pod_name(self, worker_id):
+ return WORKER_POD_NAME_PREFIX + self._job_name + "-" + str(worker_id)
def _create_worker_pod(self, worker_id, resource_requests, resource_limits, priority,
mount_path, volume_name, image_pull_policy, command, args, restart_policy):
# Worker container config
container = client.V1Container(
- name=self.get_pod_name(worker_id),
+ name=self.get_worker_pod_name(worker_id),
image=self._image,
command=command,
resources=client.V1ResourceRequirements(
@@ -90,7 +92,7 @@
pod = client.V1Pod(
spec=spec,
metadata=client.V1ObjectMeta(
- name=self.get_pod_name(worker_id),
+ name=self.get_worker_pod_name(worker_id),
labels={
"app": "elasticdl",
"elasticdl_job_name": self._job_name
@@ -112,7 +114,7 @@
def delete_worker(self, worker_id):
self._logger.info("Deleting worker: " + str(worker_id))
self._v1.delete_namespaced_pod(
- self.get_pod_name(worker_id),
+ self.get_worker_pod_name(worker_id),
self._ns,
body=client.V1DeleteOptions(grace_period_seconds=0),
)
diff --git a/elasticdl/python/elasticdl/master/k8s_worker_manager.py b/elasticdl/python/elasticdl/master/k8s_worker_manager.py
--- a/elasticdl/python/elasticdl/master/k8s_worker_manager.py
+++ b/elasticdl/python/elasticdl/master/k8s_worker_manager.py
@@ -123,8 +123,8 @@
relaunch = False
with self._lock:
worker_id = self._pod_name_to_id.get(pod_name)
- if worker_id is None:
- self._logger.error("Unknown pod name: %s" % pod_name)
+ if worker_id is None and k8s.WORKER_POD_NAME_PREFIX in pod_name:
+ self._logger.error("Unknown worker pod name: %s" % pod_name)
return
self._pods_phase[worker_id] = (pod_name, phase)
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/master/k8s_client.py b/elasticdl/python/elasticdl/master/k8s_client.py\n--- a/elasticdl/python/elasticdl/master/k8s_client.py\n+++ b/elasticdl/python/elasticdl/master/k8s_client.py\n@@ -5,6 +5,8 @@\n \n from kubernetes import client, config, watch\n \n+WORKER_POD_NAME_PREFIX = \"elasticdl-worker-\"\n+\n \n class Client(object):\n def __init__(\n@@ -51,14 +53,14 @@\n except Exception:\n traceback.print_exc()\n \n- def get_pod_name(self, worker_id):\n- return \"elasticdl-worker-\" + self._job_name + \"-\" + str(worker_id)\n+ def get_worker_pod_name(self, worker_id):\n+ return WORKER_POD_NAME_PREFIX + self._job_name + \"-\" + str(worker_id)\n \n def _create_worker_pod(self, worker_id, resource_requests, resource_limits, priority,\n mount_path, volume_name, image_pull_policy, command, args, restart_policy):\n # Worker container config\n container = client.V1Container(\n- name=self.get_pod_name(worker_id),\n+ name=self.get_worker_pod_name(worker_id),\n image=self._image,\n command=command,\n resources=client.V1ResourceRequirements(\n@@ -90,7 +92,7 @@\n pod = client.V1Pod(\n spec=spec,\n metadata=client.V1ObjectMeta(\n- name=self.get_pod_name(worker_id),\n+ name=self.get_worker_pod_name(worker_id),\n labels={\n \"app\": \"elasticdl\",\n \"elasticdl_job_name\": self._job_name\n@@ -112,7 +114,7 @@\n def delete_worker(self, worker_id):\n self._logger.info(\"Deleting worker: \" + str(worker_id))\n self._v1.delete_namespaced_pod(\n- self.get_pod_name(worker_id),\n+ self.get_worker_pod_name(worker_id),\n self._ns,\n body=client.V1DeleteOptions(grace_period_seconds=0),\n )\ndiff --git a/elasticdl/python/elasticdl/master/k8s_worker_manager.py b/elasticdl/python/elasticdl/master/k8s_worker_manager.py\n--- a/elasticdl/python/elasticdl/master/k8s_worker_manager.py\n+++ b/elasticdl/python/elasticdl/master/k8s_worker_manager.py\n@@ -123,8 +123,8 @@\n relaunch = False\n with self._lock:\n worker_id = self._pod_name_to_id.get(pod_name)\n- if worker_id is None:\n- self._logger.error(\"Unknown pod name: %s\" % pod_name)\n+ if worker_id is None and k8s.WORKER_POD_NAME_PREFIX in pod_name:\n+ self._logger.error(\"Unknown worker pod name: %s\" % pod_name)\n return\n \n self._pods_phase[worker_id] = (pod_name, phase)\n", "issue": "Event callback should not check the existence of master pod\nCurrently, there's error log when event callback checks the pod existence like this `Unknown pod name: elasticdl-master-test-mnist`. This is because `elasticdl-master-test-mnist` is the master pod that's created by users instead of being created by `WorkerManager`. We should fix this since it should only check pod existence for worker pod.\r\n\r\nComplete log:\r\n```\r\n2019-06-06 17:38:37,952 elasticdl.python.elasticdl.master.k8s_worker_manager INFO [k8s_worker_manager.py:121] Got event ADDED, phase Running for pod: elasticdl-master-test-mnist\r\n2019-06-06 17:38:37,953 elasticdl.python.elasticdl.master.k8s_worker_manager INFO [k8s_worker_manager.py:71] Starting worker: 1\r\n2019-06-06 17:38:37,953 elasticdl.python.elasticdl.master.k8s_worker_manager ERROR [k8s_worker_manager.py:127] Unknown pod name: elasticdl-master-test-mnist\r\n2019-06-06 17:38:37,954 elasticdl.python.elasticdl.master.k8s_client INFO [k8s_client.py:105] Creating worker: 1\r\n2019-06-06 17:38:37,955 elasticdl.python.elasticdl.master.k8s_worker_manager INFO [k8s_worker_manager.py:121] Got event ADDED, phase Pending for pod: elasticdl-worker-test-mnist-0\r\n```\nEvent callback should not check the existence of master pod\nCurrently, there's error log when event callback checks the pod existence like this `Unknown pod name: elasticdl-master-test-mnist`. This is because `elasticdl-master-test-mnist` is the master pod that's created by users instead of being created by `WorkerManager`. We should fix this since it should only check pod existence for worker pod.\r\n\r\nComplete log:\r\n```\r\n2019-06-06 17:38:37,952 elasticdl.python.elasticdl.master.k8s_worker_manager INFO [k8s_worker_manager.py:121] Got event ADDED, phase Running for pod: elasticdl-master-test-mnist\r\n2019-06-06 17:38:37,953 elasticdl.python.elasticdl.master.k8s_worker_manager INFO [k8s_worker_manager.py:71] Starting worker: 1\r\n2019-06-06 17:38:37,953 elasticdl.python.elasticdl.master.k8s_worker_manager ERROR [k8s_worker_manager.py:127] Unknown pod name: elasticdl-master-test-mnist\r\n2019-06-06 17:38:37,954 elasticdl.python.elasticdl.master.k8s_client INFO [k8s_client.py:105] Creating worker: 1\r\n2019-06-06 17:38:37,955 elasticdl.python.elasticdl.master.k8s_worker_manager INFO [k8s_worker_manager.py:121] Got event ADDED, phase Pending for pod: elasticdl-worker-test-mnist-0\r\n```\n", "before_files": [{"content": "import itertools\nimport logging\nimport threading\n\nfrom collections import Counter\nfrom elasticdl.python.elasticdl.master import k8s_client as k8s\n\n\nclass WorkerManager(object):\n def __init__(\n self,\n task_q,\n command,\n args,\n num_worker=1,\n cpu_request=\"1000m\",\n cpu_limit=\"1000m\",\n memory_request=\"4096Mi\",\n memory_limit=\"4096Mi\",\n pod_priority=None,\n mount_path=None,\n volume_name=None,\n image_pull_policy=None,\n restart_policy=\"OnFailure\",\n **kwargs):\n self._logger = logging.getLogger(__name__)\n self._command = command\n self._args = args\n self._num_worker = num_worker\n self._resource_requests = {\n \"cpu\": cpu_request,\n \"memory\": memory_request\n }\n self._resource_limits = {\n \"cpu\": cpu_limit,\n \"memory\": memory_limit\n }\n self._restart_policy = restart_policy\n self._pod_priority = pod_priority\n self._mount_path = mount_path\n self._volume_name = volume_name\n self._image_pull_policy = image_pull_policy\n self._task_q = task_q\n self._next_worker_id = itertools.count().__next__\n\n # protects followed variables, which are accessed from event_cb.\n self._lock = threading.Lock()\n # worker id to (pod name, phase) mapping\n # phase: None/Pending/Running/Succeeded/Failed/Unknown\n # None: worker was just launched, haven't received event yet.\n # Pending: worker pod not started yet\n # Running: worker pod is running\n # Succeeded: worker pod finishes all tasks and terminates with\n # no issue.\n # Failed: worker pod is killed for some reason\n # Unknown: unknown\n self._pods_phase = {}\n # pod name to worker id mapping\n self._pod_name_to_id = {}\n\n self._relaunch_deleted_live_worker = True\n\n self._k8s_client = k8s.Client(\n event_callback=self._event_cb, **kwargs\n )\n\n def set_relaunch_deleted_live_worker(self, val):\n self._relaunch_deleted_live_worker = bool(val)\n\n def _start_worker(self, worker_id):\n self._logger.info(\"Starting worker: %d\" % worker_id)\n with self._lock:\n pod = self._k8s_client.create_worker(\n worker_id,\n self._resource_requests,\n self._resource_limits,\n self._pod_priority,\n self._mount_path,\n self._volume_name,\n self._image_pull_policy,\n command=self._command,\n args=self._args + [\"--worker_id\", str(worker_id)],\n restart_policy=self._restart_policy,\n )\n name = pod.metadata.name\n self._pod_name_to_id[name] = worker_id\n self._pods_phase[worker_id] = (name, None)\n\n def start_workers(self):\n for i in range(self._num_worker):\n self._start_worker(self._next_worker_id())\n\n def _remove_worker(self, worker_id):\n with self._lock:\n if worker_id not in self._pods_phase:\n self._logger.error(\"Unknown worker id: %s\" % worker_id)\n return\n\n # TODO: change _k8s_client to accept pod name instead of worker id.\n self._k8s_client.delete_worker(worker_id)\n\n def stop_relaunch_and_remove_workers(self):\n with self._lock:\n self._relaunch_deleted_live_worker = False\n for worker_id in self._pods_phase:\n self._k8s_client.delete_worker(worker_id)\n\n def get_counters(self):\n with self._lock:\n return Counter([v for _, v in self._pods_phase.values()])\n\n def _event_cb(self, event):\n evt_obj = event.get(\"object\")\n evt_type = event.get(\"type\")\n if not evt_obj or not evt_type:\n self._logger.error(\"Event doesn't have object or type: %s\" % event)\n return\n\n pod_name = evt_obj.metadata.name\n phase = evt_obj.status.phase\n self._logger.info(\"Got event %s, phase %s for pod: %s\" % (evt_type, phase, pod_name))\n\n relaunch = False\n with self._lock:\n worker_id = self._pod_name_to_id.get(pod_name)\n if worker_id is None:\n self._logger.error(\"Unknown pod name: %s\" % pod_name)\n return\n\n self._pods_phase[worker_id] = (pod_name, phase)\n if evt_type == \"DELETED\":\n del self._pods_phase[worker_id]\n del self._pod_name_to_id[pod_name]\n self._task_q.recover_tasks(worker_id)\n\n # If the pod being deleted was not \"Succeeded\", relaunch a worker.\n relaunch = self._relaunch_deleted_live_worker and phase != \"Succeeded\"\n if relaunch:\n self._logger.info(\"Relaunching worker.\")\n self._start_worker(self._next_worker_id())\n", "path": "elasticdl/python/elasticdl/master/k8s_worker_manager.py"}, {"content": "import logging\nimport os\nimport threading\nimport traceback\n\nfrom kubernetes import client, config, watch\n\n\nclass Client(object):\n def __init__(\n self, *, worker_image, namespace, job_name, event_callback\n ):\n \"\"\"\n ElasticDL k8s client.\n\n Args:\n worker_image: Docker image path for ElasticDL workers.\n namespace: k8s namespace for ElasticDL pods.\n job_name: ElasticDL job name, should be unique in the namespace.\n Used as worker pod name prefix and value for \"elasticdl\" label.\n event_callback: If not None, an event watcher will be created and\n events passed to the callback.\n \"\"\"\n if os.getenv(\"KUBERNETES_SERVICE_HOST\"):\n # We are running inside k8s\n config.load_incluster_config()\n else:\n # Use user's kube config\n config.load_kube_config()\n\n self._v1 = client.CoreV1Api()\n self._logger = logging.getLogger(__name__)\n self._image = worker_image\n self._ns = namespace\n self._job_name = job_name\n self._event_cb = event_callback\n if self._event_cb:\n threading.Thread(\n target=self._watch, name=\"event_watcher\", daemon=True\n ).start()\n\n def _watch(self):\n stream = watch.Watch().stream(\n self._v1.list_namespaced_pod,\n self._ns,\n label_selector=\"elasticdl_job_name=\" + self._job_name,\n )\n for event in stream:\n try:\n self._event_cb(event)\n except Exception:\n traceback.print_exc()\n\n def get_pod_name(self, worker_id):\n return \"elasticdl-worker-\" + self._job_name + \"-\" + str(worker_id)\n\n def _create_worker_pod(self, worker_id, resource_requests, resource_limits, priority,\n mount_path, volume_name, image_pull_policy, command, args, restart_policy):\n # Worker container config\n container = client.V1Container(\n name=self.get_pod_name(worker_id),\n image=self._image,\n command=command,\n resources=client.V1ResourceRequirements(\n requests=resource_requests,\n limits=resource_limits\n ),\n image_pull_policy=image_pull_policy,\n args=args\n )\n\n # Pod\n spec = client.V1PodSpec(\n containers=[container],\n restart_policy=restart_policy,\n )\n\n # Mount data path\n if mount_path is not None and volume_name is not None:\n volume = client.V1Volume(\n name='data-volume',\n persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(\n claim_name=\"fileserver-claim\", read_only=False))\n spec.volumes = [volume]\n container.volume_mounts = [client.V1VolumeMount(name=volume_name, mount_path=mount_path)]\n\n if priority is not None:\n spec.priority_class_name = priority\n\n pod = client.V1Pod(\n spec=spec,\n metadata=client.V1ObjectMeta(\n name=self.get_pod_name(worker_id),\n labels={\n \"app\": \"elasticdl\",\n \"elasticdl_job_name\": self._job_name\n },\n ),\n )\n return pod\n\n def create_worker(self, worker_id, resource_requests, resource_limits, priority=None,\n mount_path=None, volume_name=None, image_pull_policy=None,\n command=None, args=None, restart_policy=\"OnFailure\"):\n self._logger.info(\"Creating worker: \" + str(worker_id))\n pod = self._create_worker_pod(\n worker_id, resource_requests, resource_limits, priority,\n mount_path, volume_name, image_pull_policy, command=command,\n args=args, restart_policy=restart_policy)\n return self._v1.create_namespaced_pod(self._ns, pod)\n\n def delete_worker(self, worker_id):\n self._logger.info(\"Deleting worker: \" + str(worker_id))\n self._v1.delete_namespaced_pod(\n self.get_pod_name(worker_id),\n self._ns,\n body=client.V1DeleteOptions(grace_period_seconds=0),\n )\n", "path": "elasticdl/python/elasticdl/master/k8s_client.py"}], "after_files": [{"content": "import itertools\nimport logging\nimport threading\n\nfrom collections import Counter\nfrom elasticdl.python.elasticdl.master import k8s_client as k8s\n\n\nclass WorkerManager(object):\n def __init__(\n self,\n task_q,\n command,\n args,\n num_worker=1,\n cpu_request=\"1000m\",\n cpu_limit=\"1000m\",\n memory_request=\"4096Mi\",\n memory_limit=\"4096Mi\",\n pod_priority=None,\n mount_path=None,\n volume_name=None,\n image_pull_policy=None,\n restart_policy=\"OnFailure\",\n **kwargs):\n self._logger = logging.getLogger(__name__)\n self._command = command\n self._args = args\n self._num_worker = num_worker\n self._resource_requests = {\n \"cpu\": cpu_request,\n \"memory\": memory_request\n }\n self._resource_limits = {\n \"cpu\": cpu_limit,\n \"memory\": memory_limit\n }\n self._restart_policy = restart_policy\n self._pod_priority = pod_priority\n self._mount_path = mount_path\n self._volume_name = volume_name\n self._image_pull_policy = image_pull_policy\n self._task_q = task_q\n self._next_worker_id = itertools.count().__next__\n\n # protects followed variables, which are accessed from event_cb.\n self._lock = threading.Lock()\n # worker id to (pod name, phase) mapping\n # phase: None/Pending/Running/Succeeded/Failed/Unknown\n # None: worker was just launched, haven't received event yet.\n # Pending: worker pod not started yet\n # Running: worker pod is running\n # Succeeded: worker pod finishes all tasks and terminates with\n # no issue.\n # Failed: worker pod is killed for some reason\n # Unknown: unknown\n self._pods_phase = {}\n # pod name to worker id mapping\n self._pod_name_to_id = {}\n\n self._relaunch_deleted_live_worker = True\n\n self._k8s_client = k8s.Client(\n event_callback=self._event_cb, **kwargs\n )\n\n def set_relaunch_deleted_live_worker(self, val):\n self._relaunch_deleted_live_worker = bool(val)\n\n def _start_worker(self, worker_id):\n self._logger.info(\"Starting worker: %d\" % worker_id)\n with self._lock:\n pod = self._k8s_client.create_worker(\n worker_id,\n self._resource_requests,\n self._resource_limits,\n self._pod_priority,\n self._mount_path,\n self._volume_name,\n self._image_pull_policy,\n command=self._command,\n args=self._args + [\"--worker_id\", str(worker_id)],\n restart_policy=self._restart_policy,\n )\n name = pod.metadata.name\n self._pod_name_to_id[name] = worker_id\n self._pods_phase[worker_id] = (name, None)\n\n def start_workers(self):\n for i in range(self._num_worker):\n self._start_worker(self._next_worker_id())\n\n def _remove_worker(self, worker_id):\n with self._lock:\n if worker_id not in self._pods_phase:\n self._logger.error(\"Unknown worker id: %s\" % worker_id)\n return\n\n # TODO: change _k8s_client to accept pod name instead of worker id.\n self._k8s_client.delete_worker(worker_id)\n\n def stop_relaunch_and_remove_workers(self):\n with self._lock:\n self._relaunch_deleted_live_worker = False\n for worker_id in self._pods_phase:\n self._k8s_client.delete_worker(worker_id)\n\n def get_counters(self):\n with self._lock:\n return Counter([v for _, v in self._pods_phase.values()])\n\n def _event_cb(self, event):\n evt_obj = event.get(\"object\")\n evt_type = event.get(\"type\")\n if not evt_obj or not evt_type:\n self._logger.error(\"Event doesn't have object or type: %s\" % event)\n return\n\n pod_name = evt_obj.metadata.name\n phase = evt_obj.status.phase\n self._logger.info(\"Got event %s, phase %s for pod: %s\" % (evt_type, phase, pod_name))\n\n relaunch = False\n with self._lock:\n worker_id = self._pod_name_to_id.get(pod_name)\n if worker_id is None and k8s.WORKER_POD_NAME_PREFIX in pod_name:\n self._logger.error(\"Unknown worker pod name: %s\" % pod_name)\n return\n\n self._pods_phase[worker_id] = (pod_name, phase)\n if evt_type == \"DELETED\":\n del self._pods_phase[worker_id]\n del self._pod_name_to_id[pod_name]\n self._task_q.recover_tasks(worker_id)\n\n # If the pod being deleted was not \"Succeeded\", relaunch a worker.\n relaunch = self._relaunch_deleted_live_worker and phase != \"Succeeded\"\n if relaunch:\n self._logger.info(\"Relaunching worker.\")\n self._start_worker(self._next_worker_id())\n", "path": "elasticdl/python/elasticdl/master/k8s_worker_manager.py"}, {"content": "import logging\nimport os\nimport threading\nimport traceback\n\nfrom kubernetes import client, config, watch\n\nWORKER_POD_NAME_PREFIX = \"elasticdl-worker-\"\n\n\nclass Client(object):\n def __init__(\n self, *, worker_image, namespace, job_name, event_callback\n ):\n \"\"\"\n ElasticDL k8s client.\n\n Args:\n worker_image: Docker image path for ElasticDL workers.\n namespace: k8s namespace for ElasticDL pods.\n job_name: ElasticDL job name, should be unique in the namespace.\n Used as worker pod name prefix and value for \"elasticdl\" label.\n event_callback: If not None, an event watcher will be created and\n events passed to the callback.\n \"\"\"\n if os.getenv(\"KUBERNETES_SERVICE_HOST\"):\n # We are running inside k8s\n config.load_incluster_config()\n else:\n # Use user's kube config\n config.load_kube_config()\n\n self._v1 = client.CoreV1Api()\n self._logger = logging.getLogger(__name__)\n self._image = worker_image\n self._ns = namespace\n self._job_name = job_name\n self._event_cb = event_callback\n if self._event_cb:\n threading.Thread(\n target=self._watch, name=\"event_watcher\", daemon=True\n ).start()\n\n def _watch(self):\n stream = watch.Watch().stream(\n self._v1.list_namespaced_pod,\n self._ns,\n label_selector=\"elasticdl_job_name=\" + self._job_name,\n )\n for event in stream:\n try:\n self._event_cb(event)\n except Exception:\n traceback.print_exc()\n\n def get_worker_pod_name(self, worker_id):\n return WORKER_POD_NAME_PREFIX + self._job_name + \"-\" + str(worker_id)\n\n def _create_worker_pod(self, worker_id, resource_requests, resource_limits, priority,\n mount_path, volume_name, image_pull_policy, command, args, restart_policy):\n # Worker container config\n container = client.V1Container(\n name=self.get_worker_pod_name(worker_id),\n image=self._image,\n command=command,\n resources=client.V1ResourceRequirements(\n requests=resource_requests,\n limits=resource_limits\n ),\n image_pull_policy=image_pull_policy,\n args=args\n )\n\n # Pod\n spec = client.V1PodSpec(\n containers=[container],\n restart_policy=restart_policy,\n )\n\n # Mount data path\n if mount_path is not None and volume_name is not None:\n volume = client.V1Volume(\n name='data-volume',\n persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(\n claim_name=\"fileserver-claim\", read_only=False))\n spec.volumes = [volume]\n container.volume_mounts = [client.V1VolumeMount(name=volume_name, mount_path=mount_path)]\n\n if priority is not None:\n spec.priority_class_name = priority\n\n pod = client.V1Pod(\n spec=spec,\n metadata=client.V1ObjectMeta(\n name=self.get_worker_pod_name(worker_id),\n labels={\n \"app\": \"elasticdl\",\n \"elasticdl_job_name\": self._job_name\n },\n ),\n )\n return pod\n\n def create_worker(self, worker_id, resource_requests, resource_limits, priority=None,\n mount_path=None, volume_name=None, image_pull_policy=None,\n command=None, args=None, restart_policy=\"OnFailure\"):\n self._logger.info(\"Creating worker: \" + str(worker_id))\n pod = self._create_worker_pod(\n worker_id, resource_requests, resource_limits, priority,\n mount_path, volume_name, image_pull_policy, command=command,\n args=args, restart_policy=restart_policy)\n return self._v1.create_namespaced_pod(self._ns, pod)\n\n def delete_worker(self, worker_id):\n self._logger.info(\"Deleting worker: \" + str(worker_id))\n self._v1.delete_namespaced_pod(\n self.get_worker_pod_name(worker_id),\n self._ns,\n body=client.V1DeleteOptions(grace_period_seconds=0),\n )\n", "path": "elasticdl/python/elasticdl/master/k8s_client.py"}]} | 3,649 | 654 |
gh_patches_debug_40122 | rasdani/github-patches | git_diff | google__timesketch-1717 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow other OIDC providers for authentication
**Is your feature request related to a problem? Please describe.**
Currently authentication using OIDC is only available to Google identity federation
**Describe the solution you'd like**
To be able to use other OIDC providers for authentication
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `timesketch/lib/google_auth.py`
Content:
```
1 # Copyright 2018 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """
15 Based on the example code from the public Google IAP documentation:
16 https://cloud.google.com/iap/docs/signed-headers-howto
17 """
18
19 from __future__ import unicode_literals
20
21 import time
22 import json
23 import hashlib
24 import os
25 import six
26
27 # six.moves is a dynamically-created namespace that doesn't actually
28 # exist and therefore pylint can't statically analyze it.
29 # pylint: disable-msg=import-error
30 from six.moves.urllib import parse as urlparse
31
32 import jwt
33 import requests
34
35 from flask import url_for
36 from flask import current_app
37 from flask import session
38
39 from timesketch.lib.definitions import HTTP_STATUS_CODE_OK
40
41
42 CSRF_KEY = 'google_oauth2_csrf_token'
43 AUTH_URI = 'https://accounts.google.com/o/oauth2/v2/auth'
44 DISCOVERY_URL = 'https://accounts.google.com/.well-known/openid-configuration'
45
46
47 class JwtValidationError(Exception):
48 """Raised when a JSON Web Token cannot be validated."""
49
50
51 class JwtKeyError(Exception):
52 """Raised when there is a problem with the public key used for signing."""
53
54
55 class JwtFetchError(Exception):
56 """Raised when there is a problem with the public key used for signing."""
57
58
59 class DiscoveryDocumentError(Exception):
60 """Raised when there is a problem with the discovery document."""
61
62
63 def _fetch_public_keys(url):
64 """Fetch public keys used for verifying signatures.
65
66 Args:
67 url: URL where keys can be fetched.
68
69 Raises:
70 JwTKeyError if keys cannot be fetched.
71
72 Returns:
73 HTTP response.
74 """
75 try:
76 resp = requests.get(url)
77 except requests.exceptions.RequestException as e:
78 raise JwtKeyError('Cannot fetch public keys: {}'.format(e)) from e
79 if resp.status_code != HTTP_STATUS_CODE_OK:
80 raise JwtKeyError(
81 'Cannot fetch public keys: {}'.format(resp.status_code))
82 return resp.json()
83
84
85 def _fetch_oauth2_discovery_document():
86 """Fetch Google OAuth2 discovery document.
87
88 Raises:
89 DiscoveryDocumentError if document cannot be fetched.
90
91 Returns:
92 HTTP response.
93 """
94 try:
95 resp = requests.get(DISCOVERY_URL)
96 except requests.exceptions.RequestException as e:
97 raise DiscoveryDocumentError(
98 'Cannot fetch discovery document: {}'.format(e)) from e
99 if resp.status_code != HTTP_STATUS_CODE_OK:
100 raise DiscoveryDocumentError(
101 'Cannot fetch discovery_document: {}'.format(resp.status_code))
102 return resp.json()
103
104
105 def _generate_random_token():
106 """Generate random string to use as CSRF and nonce tokens.
107
108 Returns:
109 Random string.
110 """
111 return hashlib.sha256(os.urandom(1024)).hexdigest()
112
113
114 def get_oauth2_authorize_url(hosted_domain=None):
115 """Generate an authorization URL for Google's OAuth2 service.
116
117 Args:
118 hosted_domain: Optional GSuite domain to limit access to.
119
120 Returns:
121 Authorization URL.
122 """
123 csrf_token = _generate_random_token()
124 nonce = _generate_random_token()
125 redirect_uri = url_for(
126 'user_views.google_openid_connect',
127 _scheme='https',
128 _external=True
129 )
130 scopes = ('openid', 'email', 'profile')
131
132 # Add the generated CSRF token to the client session for later validation.
133 session[CSRF_KEY] = csrf_token
134
135 # Generate authorization URL
136 params = dict(
137 client_id=current_app.config.get('GOOGLE_OIDC_CLIENT_ID'),
138 scope=' '.join(scopes),
139 response_type='code',
140 access_type='online', # Because we don't need a refresh token.
141 state=csrf_token,
142 nonce=nonce, # Enable replay attack protection attack.
143 redirect_uri=redirect_uri
144 )
145 if hosted_domain:
146 params['hd'] = hosted_domain
147
148 urlencoded_params = urlparse.urlencode(params)
149 google_authorization_url = '{}?{}'.format(AUTH_URI, urlencoded_params)
150 return google_authorization_url
151
152
153 def get_encoded_jwt_over_https(code):
154 """Fetch a JSON Web Token (JWT) using a authentication code.
155
156 Args:
157 code: Authentication code obtained from an OAuth2 flow.
158
159 Raises:
160 JwtFetchError if JWT cannot be fetched.
161
162 Returns:
163 Encoded JWT.
164 """
165
166 discovery_document = get_oauth2_discovery_document()
167 redirect_uri = url_for(
168 'user_views.google_openid_connect',
169 _scheme='https',
170 _external=True
171 )
172 post_data = {
173 'code': code,
174 'client_id': current_app.config.get('GOOGLE_OIDC_CLIENT_ID'),
175 'client_secret': current_app.config.get('GOOGLE_OIDC_CLIENT_SECRET'),
176 'redirect_uri': redirect_uri,
177 'grant_type': 'authorization_code'
178 }
179 token_url = discovery_document.get('token_endpoint')
180 try:
181 response = requests.post(token_url, data=post_data)
182 encoded_jwt = response.json().get('id_token')
183 except requests.exceptions.RequestException as e:
184 raise JwtFetchError(
185 'Cannot fetch JWT: {}'.format(e)) from e
186 if response.status_code != HTTP_STATUS_CODE_OK:
187 raise JwtFetchError(
188 'Cannot fetch JWT: {}'.format(response.status_code))
189
190 if not encoded_jwt:
191 raise JwtFetchError('Cannot fetch JWT: Missing id_token in response')
192
193 return encoded_jwt
194
195
196 def decode_jwt(encoded_jwt, public_key, algorithm, expected_audience):
197 """Decode a JSON Web Token (JWT).
198
199 Args:
200 encoded_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.
201 public_key: Key to verify signature of the JWT.
202 algorithm: Algorithm used for the key. E.g. ES256, RS256
203 expected_audience: Expected audience in the JWT.
204
205 Returns:
206 Decoded JWT as a dict object.
207
208 Raises:
209 JwtValidationError: if the JWT token cannot be decoded.
210 """
211 try:
212 decoded_jwt = jwt.decode(
213 jwt=encoded_jwt, key=public_key, algorithms=[algorithm],
214 audience=expected_audience)
215 return decoded_jwt
216 except (jwt.exceptions.InvalidTokenError,
217 jwt.exceptions.InvalidKeyError) as e:
218 raise JwtValidationError('JWT validation error: {}'.format(e)) from e
219
220 return None
221
222
223 def validate_jwt(decoded_jwt, expected_issuer, expected_domain=None):
224 """Decode and validate a JSON Web token (JWT).
225
226 Cloud IAP:
227 https://cloud.google.com/iap/docs/signed-headers-howto
228
229 Google OpenID Connect:
230 https://developers.google.com/identity/protocols/OpenIDConnect
231
232 Args:
233 decoded_jwt: A dict object containing the decoded JWT token.
234 expected_issuer: Expected issuer of the JWT.
235 expected_domain: Expected GSuite domain in the JWT (optional).
236
237 Raises:
238 JwtValidationError: If unable to validate the JWT.
239 """
240 # Make sure the token is not created in the future or has expired.
241 try:
242 now = int(time.time())
243 issued_at = decoded_jwt['iat']
244 if isinstance(issued_at, six.string_types):
245 issued_at = int(issued_at, 10)
246
247 expires_at = decoded_jwt['exp']
248 if isinstance(expires_at, six.string_types):
249 expires_at = int(expires_at, 10)
250
251 if issued_at > now:
252 raise JwtValidationError('Token was issued in the future')
253 if expires_at < now:
254 raise JwtValidationError('Token has expired')
255 except KeyError as e:
256 raise JwtValidationError('Missing timestamp: {}'.format(e)) from e
257
258 # Check that the issuer of the token is correct.
259 try:
260 issuer = decoded_jwt['iss']
261 if issuer != expected_issuer:
262 raise JwtValidationError('Wrong issuer: {}'.format(issuer))
263 except KeyError as e:
264 raise JwtValidationError('Missing issuer') from e
265
266 if 'email' not in decoded_jwt:
267 raise JwtValidationError('Missing email field in token')
268
269 if expected_domain:
270 try:
271 domain = decoded_jwt['hd']
272 if not domain == expected_domain:
273 raise JwtValidationError('Wrong domain: {}'.format(domain))
274 except KeyError as e:
275 raise JwtValidationError('Missing domain: {}'.format(e)) from e
276
277
278 def get_public_key_for_jwt(encoded_jwt, url):
279 """Get public key for JWT in order to verify the signature.
280
281 The keys get cached in order to limit the amount of network round trips.
282
283 Args:
284 encoded_jwt: Base64 encoded JWT.
285 url: URL where keys can be fetched.
286
287 Raises:
288 JwTKeyError if keys cannot be fetched.
289
290 Returns:
291 Key as string.
292 """
293 # Get the Key ID from the JWT header.
294 key_id = jwt.get_unverified_header(encoded_jwt).get('kid')
295 if not key_id:
296 raise JwtKeyError('Missing key ID field in token header')
297 key_cache = get_public_key_for_jwt.key_cache
298 key = key_cache.get(key_id)
299 if key:
300 return key
301
302 # Re-fetch the key file.
303 keys_json = _fetch_public_keys(url)
304 if 'keys' in keys_json:
305 _new_keys_dict = {}
306 for key_dict in keys_json['keys']:
307 public_key = jwt.algorithms.RSAAlgorithm.from_jwk(
308 json.dumps(key_dict))
309 _new_keys_dict[key_dict['kid']] = public_key
310 key_cache = _new_keys_dict
311 else:
312 key_cache = keys_json
313 get_public_key_for_jwt.key_cache = key_cache
314 key = key_cache.get(key_id)
315 if not key:
316 raise JwtKeyError('IAP public key {!r} not found'.format(key_id))
317
318 return key
319
320
321 def get_oauth2_discovery_document():
322 """Get Google OAuth2 discovery document.
323
324 The document is cached in order to limit the amount of network round trips
325 and is set to expire in 12 hours from when it was fetched.
326
327 Returns:
328 Discovery document as a dictionary.
329 """
330 now = int(time.time())
331 cache = get_oauth2_discovery_document.cache
332 discovery_document = cache.get('current')
333 if discovery_document:
334 # Check if the document has expired.
335 created_at = discovery_document['created_at']
336 expires_at = created_at + 12*60*60 # 12 hours in seconds
337 if now < expires_at:
338 return discovery_document['document']
339
340 # Re-fetch the discovery document.
341 new_discovery_document = _fetch_oauth2_discovery_document()
342 cache = {
343 'current': {
344 'created_at': now,
345 'document': new_discovery_document
346 }
347 }
348 get_oauth2_discovery_document.cache = cache
349 return new_discovery_document
350
351
352 # Used to cache public keys.
353 get_public_key_for_jwt.key_cache = {}
354
355 # Used to cache discovery documents.
356 get_oauth2_discovery_document.cache = {}
357
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/timesketch/lib/google_auth.py b/timesketch/lib/google_auth.py
--- a/timesketch/lib/google_auth.py
+++ b/timesketch/lib/google_auth.py
@@ -38,12 +38,10 @@
from timesketch.lib.definitions import HTTP_STATUS_CODE_OK
-
CSRF_KEY = 'google_oauth2_csrf_token'
AUTH_URI = 'https://accounts.google.com/o/oauth2/v2/auth'
DISCOVERY_URL = 'https://accounts.google.com/.well-known/openid-configuration'
-
class JwtValidationError(Exception):
"""Raised when a JSON Web Token cannot be validated."""
@@ -91,8 +89,10 @@
Returns:
HTTP response.
"""
+ discovery_url = current_app.config.get(
+ 'GOOGLE_OIDC_DISCOVERY_URL', DISCOVERY_URL)
try:
- resp = requests.get(DISCOVERY_URL)
+ resp = requests.get(discovery_url)
except requests.exceptions.RequestException as e:
raise DiscoveryDocumentError(
'Cannot fetch discovery document: {}'.format(e)) from e
@@ -120,6 +120,7 @@
Returns:
Authorization URL.
"""
+ auth_uri = current_app.config.get('GOOGLE_OIDC_AUTH_URL', AUTH_URI)
csrf_token = _generate_random_token()
nonce = _generate_random_token()
redirect_uri = url_for(
@@ -146,7 +147,7 @@
params['hd'] = hosted_domain
urlencoded_params = urlparse.urlencode(params)
- google_authorization_url = '{}?{}'.format(AUTH_URI, urlencoded_params)
+ google_authorization_url = '{}?{}'.format(auth_uri, urlencoded_params)
return google_authorization_url
@@ -199,7 +200,9 @@
Args:
encoded_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.
public_key: Key to verify signature of the JWT.
- algorithm: Algorithm used for the key. E.g. ES256, RS256
+ algorithm: Algorithm used for the key. E.g. ES256, RS256. If the
+ GOOGLE_OIDC_ALGORITHM is set in the config, it will overwrite
+ the algorithm used here.
expected_audience: Expected audience in the JWT.
Returns:
@@ -208,9 +211,12 @@
Raises:
JwtValidationError: if the JWT token cannot be decoded.
"""
+ chosen_algorithm = current_app.config.get(
+ 'GOOGLE_OIDC_ALGORITHM', algorithm)
try:
decoded_jwt = jwt.decode(
- jwt=encoded_jwt, key=public_key, algorithms=[algorithm],
+ jwt=encoded_jwt, key=public_key,
+ algorithms=[chosen_algorithm],
audience=expected_audience)
return decoded_jwt
except (jwt.exceptions.InvalidTokenError,
| {"golden_diff": "diff --git a/timesketch/lib/google_auth.py b/timesketch/lib/google_auth.py\n--- a/timesketch/lib/google_auth.py\n+++ b/timesketch/lib/google_auth.py\n@@ -38,12 +38,10 @@\n \n from timesketch.lib.definitions import HTTP_STATUS_CODE_OK\n \n-\n CSRF_KEY = 'google_oauth2_csrf_token'\n AUTH_URI = 'https://accounts.google.com/o/oauth2/v2/auth'\n DISCOVERY_URL = 'https://accounts.google.com/.well-known/openid-configuration'\n \n-\n class JwtValidationError(Exception):\n \"\"\"Raised when a JSON Web Token cannot be validated.\"\"\"\n \n@@ -91,8 +89,10 @@\n Returns:\n HTTP response.\n \"\"\"\n+ discovery_url = current_app.config.get(\n+ 'GOOGLE_OIDC_DISCOVERY_URL', DISCOVERY_URL)\n try:\n- resp = requests.get(DISCOVERY_URL)\n+ resp = requests.get(discovery_url)\n except requests.exceptions.RequestException as e:\n raise DiscoveryDocumentError(\n 'Cannot fetch discovery document: {}'.format(e)) from e\n@@ -120,6 +120,7 @@\n Returns:\n Authorization URL.\n \"\"\"\n+ auth_uri = current_app.config.get('GOOGLE_OIDC_AUTH_URL', AUTH_URI)\n csrf_token = _generate_random_token()\n nonce = _generate_random_token()\n redirect_uri = url_for(\n@@ -146,7 +147,7 @@\n params['hd'] = hosted_domain\n \n urlencoded_params = urlparse.urlencode(params)\n- google_authorization_url = '{}?{}'.format(AUTH_URI, urlencoded_params)\n+ google_authorization_url = '{}?{}'.format(auth_uri, urlencoded_params)\n return google_authorization_url\n \n \n@@ -199,7 +200,9 @@\n Args:\n encoded_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.\n public_key: Key to verify signature of the JWT.\n- algorithm: Algorithm used for the key. E.g. ES256, RS256\n+ algorithm: Algorithm used for the key. E.g. ES256, RS256. If the\n+ GOOGLE_OIDC_ALGORITHM is set in the config, it will overwrite\n+ the algorithm used here.\n expected_audience: Expected audience in the JWT.\n \n Returns:\n@@ -208,9 +211,12 @@\n Raises:\n JwtValidationError: if the JWT token cannot be decoded.\n \"\"\"\n+ chosen_algorithm = current_app.config.get(\n+ 'GOOGLE_OIDC_ALGORITHM', algorithm)\n try:\n decoded_jwt = jwt.decode(\n- jwt=encoded_jwt, key=public_key, algorithms=[algorithm],\n+ jwt=encoded_jwt, key=public_key,\n+ algorithms=[chosen_algorithm],\n audience=expected_audience)\n return decoded_jwt\n except (jwt.exceptions.InvalidTokenError,\n", "issue": "Allow other OIDC providers for authentication\n**Is your feature request related to a problem? Please describe.**\r\nCurrently authentication using OIDC is only available to Google identity federation\r\n\r\n**Describe the solution you'd like**\r\nTo be able to use other OIDC providers for authentication \r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nBased on the example code from the public Google IAP documentation:\nhttps://cloud.google.com/iap/docs/signed-headers-howto\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport time\nimport json\nimport hashlib\nimport os\nimport six\n\n# six.moves is a dynamically-created namespace that doesn't actually\n# exist and therefore pylint can't statically analyze it.\n# pylint: disable-msg=import-error\nfrom six.moves.urllib import parse as urlparse\n\nimport jwt\nimport requests\n\nfrom flask import url_for\nfrom flask import current_app\nfrom flask import session\n\nfrom timesketch.lib.definitions import HTTP_STATUS_CODE_OK\n\n\nCSRF_KEY = 'google_oauth2_csrf_token'\nAUTH_URI = 'https://accounts.google.com/o/oauth2/v2/auth'\nDISCOVERY_URL = 'https://accounts.google.com/.well-known/openid-configuration'\n\n\nclass JwtValidationError(Exception):\n \"\"\"Raised when a JSON Web Token cannot be validated.\"\"\"\n\n\nclass JwtKeyError(Exception):\n \"\"\"Raised when there is a problem with the public key used for signing.\"\"\"\n\n\nclass JwtFetchError(Exception):\n \"\"\"Raised when there is a problem with the public key used for signing.\"\"\"\n\n\nclass DiscoveryDocumentError(Exception):\n \"\"\"Raised when there is a problem with the discovery document.\"\"\"\n\n\ndef _fetch_public_keys(url):\n \"\"\"Fetch public keys used for verifying signatures.\n\n Args:\n url: URL where keys can be fetched.\n\n Raises:\n JwTKeyError if keys cannot be fetched.\n\n Returns:\n HTTP response.\n \"\"\"\n try:\n resp = requests.get(url)\n except requests.exceptions.RequestException as e:\n raise JwtKeyError('Cannot fetch public keys: {}'.format(e)) from e\n if resp.status_code != HTTP_STATUS_CODE_OK:\n raise JwtKeyError(\n 'Cannot fetch public keys: {}'.format(resp.status_code))\n return resp.json()\n\n\ndef _fetch_oauth2_discovery_document():\n \"\"\"Fetch Google OAuth2 discovery document.\n\n Raises:\n DiscoveryDocumentError if document cannot be fetched.\n\n Returns:\n HTTP response.\n \"\"\"\n try:\n resp = requests.get(DISCOVERY_URL)\n except requests.exceptions.RequestException as e:\n raise DiscoveryDocumentError(\n 'Cannot fetch discovery document: {}'.format(e)) from e\n if resp.status_code != HTTP_STATUS_CODE_OK:\n raise DiscoveryDocumentError(\n 'Cannot fetch discovery_document: {}'.format(resp.status_code))\n return resp.json()\n\n\ndef _generate_random_token():\n \"\"\"Generate random string to use as CSRF and nonce tokens.\n\n Returns:\n Random string.\n \"\"\"\n return hashlib.sha256(os.urandom(1024)).hexdigest()\n\n\ndef get_oauth2_authorize_url(hosted_domain=None):\n \"\"\"Generate an authorization URL for Google's OAuth2 service.\n\n Args:\n hosted_domain: Optional GSuite domain to limit access to.\n\n Returns:\n Authorization URL.\n \"\"\"\n csrf_token = _generate_random_token()\n nonce = _generate_random_token()\n redirect_uri = url_for(\n 'user_views.google_openid_connect',\n _scheme='https',\n _external=True\n )\n scopes = ('openid', 'email', 'profile')\n\n # Add the generated CSRF token to the client session for later validation.\n session[CSRF_KEY] = csrf_token\n\n # Generate authorization URL\n params = dict(\n client_id=current_app.config.get('GOOGLE_OIDC_CLIENT_ID'),\n scope=' '.join(scopes),\n response_type='code',\n access_type='online', # Because we don't need a refresh token.\n state=csrf_token,\n nonce=nonce, # Enable replay attack protection attack.\n redirect_uri=redirect_uri\n )\n if hosted_domain:\n params['hd'] = hosted_domain\n\n urlencoded_params = urlparse.urlencode(params)\n google_authorization_url = '{}?{}'.format(AUTH_URI, urlencoded_params)\n return google_authorization_url\n\n\ndef get_encoded_jwt_over_https(code):\n \"\"\"Fetch a JSON Web Token (JWT) using a authentication code.\n\n Args:\n code: Authentication code obtained from an OAuth2 flow.\n\n Raises:\n JwtFetchError if JWT cannot be fetched.\n\n Returns:\n Encoded JWT.\n \"\"\"\n\n discovery_document = get_oauth2_discovery_document()\n redirect_uri = url_for(\n 'user_views.google_openid_connect',\n _scheme='https',\n _external=True\n )\n post_data = {\n 'code': code,\n 'client_id': current_app.config.get('GOOGLE_OIDC_CLIENT_ID'),\n 'client_secret': current_app.config.get('GOOGLE_OIDC_CLIENT_SECRET'),\n 'redirect_uri': redirect_uri,\n 'grant_type': 'authorization_code'\n }\n token_url = discovery_document.get('token_endpoint')\n try:\n response = requests.post(token_url, data=post_data)\n encoded_jwt = response.json().get('id_token')\n except requests.exceptions.RequestException as e:\n raise JwtFetchError(\n 'Cannot fetch JWT: {}'.format(e)) from e\n if response.status_code != HTTP_STATUS_CODE_OK:\n raise JwtFetchError(\n 'Cannot fetch JWT: {}'.format(response.status_code))\n\n if not encoded_jwt:\n raise JwtFetchError('Cannot fetch JWT: Missing id_token in response')\n\n return encoded_jwt\n\n\ndef decode_jwt(encoded_jwt, public_key, algorithm, expected_audience):\n \"\"\"Decode a JSON Web Token (JWT).\n\n Args:\n encoded_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.\n public_key: Key to verify signature of the JWT.\n algorithm: Algorithm used for the key. E.g. ES256, RS256\n expected_audience: Expected audience in the JWT.\n\n Returns:\n Decoded JWT as a dict object.\n\n Raises:\n JwtValidationError: if the JWT token cannot be decoded.\n \"\"\"\n try:\n decoded_jwt = jwt.decode(\n jwt=encoded_jwt, key=public_key, algorithms=[algorithm],\n audience=expected_audience)\n return decoded_jwt\n except (jwt.exceptions.InvalidTokenError,\n jwt.exceptions.InvalidKeyError) as e:\n raise JwtValidationError('JWT validation error: {}'.format(e)) from e\n\n return None\n\n\ndef validate_jwt(decoded_jwt, expected_issuer, expected_domain=None):\n \"\"\"Decode and validate a JSON Web token (JWT).\n\n Cloud IAP:\n https://cloud.google.com/iap/docs/signed-headers-howto\n\n Google OpenID Connect:\n https://developers.google.com/identity/protocols/OpenIDConnect\n\n Args:\n decoded_jwt: A dict object containing the decoded JWT token.\n expected_issuer: Expected issuer of the JWT.\n expected_domain: Expected GSuite domain in the JWT (optional).\n\n Raises:\n JwtValidationError: If unable to validate the JWT.\n \"\"\"\n # Make sure the token is not created in the future or has expired.\n try:\n now = int(time.time())\n issued_at = decoded_jwt['iat']\n if isinstance(issued_at, six.string_types):\n issued_at = int(issued_at, 10)\n\n expires_at = decoded_jwt['exp']\n if isinstance(expires_at, six.string_types):\n expires_at = int(expires_at, 10)\n\n if issued_at > now:\n raise JwtValidationError('Token was issued in the future')\n if expires_at < now:\n raise JwtValidationError('Token has expired')\n except KeyError as e:\n raise JwtValidationError('Missing timestamp: {}'.format(e)) from e\n\n # Check that the issuer of the token is correct.\n try:\n issuer = decoded_jwt['iss']\n if issuer != expected_issuer:\n raise JwtValidationError('Wrong issuer: {}'.format(issuer))\n except KeyError as e:\n raise JwtValidationError('Missing issuer') from e\n\n if 'email' not in decoded_jwt:\n raise JwtValidationError('Missing email field in token')\n\n if expected_domain:\n try:\n domain = decoded_jwt['hd']\n if not domain == expected_domain:\n raise JwtValidationError('Wrong domain: {}'.format(domain))\n except KeyError as e:\n raise JwtValidationError('Missing domain: {}'.format(e)) from e\n\n\ndef get_public_key_for_jwt(encoded_jwt, url):\n \"\"\"Get public key for JWT in order to verify the signature.\n\n The keys get cached in order to limit the amount of network round trips.\n\n Args:\n encoded_jwt: Base64 encoded JWT.\n url: URL where keys can be fetched.\n\n Raises:\n JwTKeyError if keys cannot be fetched.\n\n Returns:\n Key as string.\n \"\"\"\n # Get the Key ID from the JWT header.\n key_id = jwt.get_unverified_header(encoded_jwt).get('kid')\n if not key_id:\n raise JwtKeyError('Missing key ID field in token header')\n key_cache = get_public_key_for_jwt.key_cache\n key = key_cache.get(key_id)\n if key:\n return key\n\n # Re-fetch the key file.\n keys_json = _fetch_public_keys(url)\n if 'keys' in keys_json:\n _new_keys_dict = {}\n for key_dict in keys_json['keys']:\n public_key = jwt.algorithms.RSAAlgorithm.from_jwk(\n json.dumps(key_dict))\n _new_keys_dict[key_dict['kid']] = public_key\n key_cache = _new_keys_dict\n else:\n key_cache = keys_json\n get_public_key_for_jwt.key_cache = key_cache\n key = key_cache.get(key_id)\n if not key:\n raise JwtKeyError('IAP public key {!r} not found'.format(key_id))\n\n return key\n\n\ndef get_oauth2_discovery_document():\n \"\"\"Get Google OAuth2 discovery document.\n\n The document is cached in order to limit the amount of network round trips\n and is set to expire in 12 hours from when it was fetched.\n\n Returns:\n Discovery document as a dictionary.\n \"\"\"\n now = int(time.time())\n cache = get_oauth2_discovery_document.cache\n discovery_document = cache.get('current')\n if discovery_document:\n # Check if the document has expired.\n created_at = discovery_document['created_at']\n expires_at = created_at + 12*60*60 # 12 hours in seconds\n if now < expires_at:\n return discovery_document['document']\n\n # Re-fetch the discovery document.\n new_discovery_document = _fetch_oauth2_discovery_document()\n cache = {\n 'current': {\n 'created_at': now,\n 'document': new_discovery_document\n }\n }\n get_oauth2_discovery_document.cache = cache\n return new_discovery_document\n\n\n# Used to cache public keys.\nget_public_key_for_jwt.key_cache = {}\n\n# Used to cache discovery documents.\nget_oauth2_discovery_document.cache = {}\n", "path": "timesketch/lib/google_auth.py"}], "after_files": [{"content": "# Copyright 2018 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nBased on the example code from the public Google IAP documentation:\nhttps://cloud.google.com/iap/docs/signed-headers-howto\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport time\nimport json\nimport hashlib\nimport os\nimport six\n\n# six.moves is a dynamically-created namespace that doesn't actually\n# exist and therefore pylint can't statically analyze it.\n# pylint: disable-msg=import-error\nfrom six.moves.urllib import parse as urlparse\n\nimport jwt\nimport requests\n\nfrom flask import url_for\nfrom flask import current_app\nfrom flask import session\n\nfrom timesketch.lib.definitions import HTTP_STATUS_CODE_OK\n\nCSRF_KEY = 'google_oauth2_csrf_token'\nAUTH_URI = 'https://accounts.google.com/o/oauth2/v2/auth'\nDISCOVERY_URL = 'https://accounts.google.com/.well-known/openid-configuration'\n\nclass JwtValidationError(Exception):\n \"\"\"Raised when a JSON Web Token cannot be validated.\"\"\"\n\n\nclass JwtKeyError(Exception):\n \"\"\"Raised when there is a problem with the public key used for signing.\"\"\"\n\n\nclass JwtFetchError(Exception):\n \"\"\"Raised when there is a problem with the public key used for signing.\"\"\"\n\n\nclass DiscoveryDocumentError(Exception):\n \"\"\"Raised when there is a problem with the discovery document.\"\"\"\n\n\ndef _fetch_public_keys(url):\n \"\"\"Fetch public keys used for verifying signatures.\n\n Args:\n url: URL where keys can be fetched.\n\n Raises:\n JwTKeyError if keys cannot be fetched.\n\n Returns:\n HTTP response.\n \"\"\"\n try:\n resp = requests.get(url)\n except requests.exceptions.RequestException as e:\n raise JwtKeyError('Cannot fetch public keys: {}'.format(e)) from e\n if resp.status_code != HTTP_STATUS_CODE_OK:\n raise JwtKeyError(\n 'Cannot fetch public keys: {}'.format(resp.status_code))\n return resp.json()\n\n\ndef _fetch_oauth2_discovery_document():\n \"\"\"Fetch Google OAuth2 discovery document.\n\n Raises:\n DiscoveryDocumentError if document cannot be fetched.\n\n Returns:\n HTTP response.\n \"\"\"\n discovery_url = current_app.config.get(\n 'GOOGLE_OIDC_DISCOVERY_URL', DISCOVERY_URL)\n try:\n resp = requests.get(discovery_url)\n except requests.exceptions.RequestException as e:\n raise DiscoveryDocumentError(\n 'Cannot fetch discovery document: {}'.format(e)) from e\n if resp.status_code != HTTP_STATUS_CODE_OK:\n raise DiscoveryDocumentError(\n 'Cannot fetch discovery_document: {}'.format(resp.status_code))\n return resp.json()\n\n\ndef _generate_random_token():\n \"\"\"Generate random string to use as CSRF and nonce tokens.\n\n Returns:\n Random string.\n \"\"\"\n return hashlib.sha256(os.urandom(1024)).hexdigest()\n\n\ndef get_oauth2_authorize_url(hosted_domain=None):\n \"\"\"Generate an authorization URL for Google's OAuth2 service.\n\n Args:\n hosted_domain: Optional GSuite domain to limit access to.\n\n Returns:\n Authorization URL.\n \"\"\"\n auth_uri = current_app.config.get('GOOGLE_OIDC_AUTH_URL', AUTH_URI)\n csrf_token = _generate_random_token()\n nonce = _generate_random_token()\n redirect_uri = url_for(\n 'user_views.google_openid_connect',\n _scheme='https',\n _external=True\n )\n scopes = ('openid', 'email', 'profile')\n\n # Add the generated CSRF token to the client session for later validation.\n session[CSRF_KEY] = csrf_token\n\n # Generate authorization URL\n params = dict(\n client_id=current_app.config.get('GOOGLE_OIDC_CLIENT_ID'),\n scope=' '.join(scopes),\n response_type='code',\n access_type='online', # Because we don't need a refresh token.\n state=csrf_token,\n nonce=nonce, # Enable replay attack protection attack.\n redirect_uri=redirect_uri\n )\n if hosted_domain:\n params['hd'] = hosted_domain\n\n urlencoded_params = urlparse.urlencode(params)\n google_authorization_url = '{}?{}'.format(auth_uri, urlencoded_params)\n return google_authorization_url\n\n\ndef get_encoded_jwt_over_https(code):\n \"\"\"Fetch a JSON Web Token (JWT) using a authentication code.\n\n Args:\n code: Authentication code obtained from an OAuth2 flow.\n\n Raises:\n JwtFetchError if JWT cannot be fetched.\n\n Returns:\n Encoded JWT.\n \"\"\"\n\n discovery_document = get_oauth2_discovery_document()\n redirect_uri = url_for(\n 'user_views.google_openid_connect',\n _scheme='https',\n _external=True\n )\n post_data = {\n 'code': code,\n 'client_id': current_app.config.get('GOOGLE_OIDC_CLIENT_ID'),\n 'client_secret': current_app.config.get('GOOGLE_OIDC_CLIENT_SECRET'),\n 'redirect_uri': redirect_uri,\n 'grant_type': 'authorization_code'\n }\n token_url = discovery_document.get('token_endpoint')\n try:\n response = requests.post(token_url, data=post_data)\n encoded_jwt = response.json().get('id_token')\n except requests.exceptions.RequestException as e:\n raise JwtFetchError(\n 'Cannot fetch JWT: {}'.format(e)) from e\n if response.status_code != HTTP_STATUS_CODE_OK:\n raise JwtFetchError(\n 'Cannot fetch JWT: {}'.format(response.status_code))\n\n if not encoded_jwt:\n raise JwtFetchError('Cannot fetch JWT: Missing id_token in response')\n\n return encoded_jwt\n\n\ndef decode_jwt(encoded_jwt, public_key, algorithm, expected_audience):\n \"\"\"Decode a JSON Web Token (JWT).\n\n Args:\n encoded_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.\n public_key: Key to verify signature of the JWT.\n algorithm: Algorithm used for the key. E.g. ES256, RS256. If the\n GOOGLE_OIDC_ALGORITHM is set in the config, it will overwrite\n the algorithm used here.\n expected_audience: Expected audience in the JWT.\n\n Returns:\n Decoded JWT as a dict object.\n\n Raises:\n JwtValidationError: if the JWT token cannot be decoded.\n \"\"\"\n chosen_algorithm = current_app.config.get(\n 'GOOGLE_OIDC_ALGORITHM', algorithm)\n try:\n decoded_jwt = jwt.decode(\n jwt=encoded_jwt, key=public_key,\n algorithms=[chosen_algorithm],\n audience=expected_audience)\n return decoded_jwt\n except (jwt.exceptions.InvalidTokenError,\n jwt.exceptions.InvalidKeyError) as e:\n raise JwtValidationError('JWT validation error: {}'.format(e)) from e\n\n return None\n\n\ndef validate_jwt(decoded_jwt, expected_issuer, expected_domain=None):\n \"\"\"Decode and validate a JSON Web token (JWT).\n\n Cloud IAP:\n https://cloud.google.com/iap/docs/signed-headers-howto\n\n Google OpenID Connect:\n https://developers.google.com/identity/protocols/OpenIDConnect\n\n Args:\n decoded_jwt: A dict object containing the decoded JWT token.\n expected_issuer: Expected issuer of the JWT.\n expected_domain: Expected GSuite domain in the JWT (optional).\n\n Raises:\n JwtValidationError: If unable to validate the JWT.\n \"\"\"\n # Make sure the token is not created in the future or has expired.\n try:\n now = int(time.time())\n issued_at = decoded_jwt['iat']\n if isinstance(issued_at, six.string_types):\n issued_at = int(issued_at, 10)\n\n expires_at = decoded_jwt['exp']\n if isinstance(expires_at, six.string_types):\n expires_at = int(expires_at, 10)\n\n if issued_at > now:\n raise JwtValidationError('Token was issued in the future')\n if expires_at < now:\n raise JwtValidationError('Token has expired')\n except KeyError as e:\n raise JwtValidationError('Missing timestamp: {}'.format(e)) from e\n\n # Check that the issuer of the token is correct.\n try:\n issuer = decoded_jwt['iss']\n if issuer != expected_issuer:\n raise JwtValidationError('Wrong issuer: {}'.format(issuer))\n except KeyError as e:\n raise JwtValidationError('Missing issuer') from e\n\n if 'email' not in decoded_jwt:\n raise JwtValidationError('Missing email field in token')\n\n if expected_domain:\n try:\n domain = decoded_jwt['hd']\n if not domain == expected_domain:\n raise JwtValidationError('Wrong domain: {}'.format(domain))\n except KeyError as e:\n raise JwtValidationError('Missing domain: {}'.format(e)) from e\n\n\ndef get_public_key_for_jwt(encoded_jwt, url):\n \"\"\"Get public key for JWT in order to verify the signature.\n\n The keys get cached in order to limit the amount of network round trips.\n\n Args:\n encoded_jwt: Base64 encoded JWT.\n url: URL where keys can be fetched.\n\n Raises:\n JwTKeyError if keys cannot be fetched.\n\n Returns:\n Key as string.\n \"\"\"\n # Get the Key ID from the JWT header.\n key_id = jwt.get_unverified_header(encoded_jwt).get('kid')\n if not key_id:\n raise JwtKeyError('Missing key ID field in token header')\n key_cache = get_public_key_for_jwt.key_cache\n key = key_cache.get(key_id)\n if key:\n return key\n\n # Re-fetch the key file.\n keys_json = _fetch_public_keys(url)\n if 'keys' in keys_json:\n _new_keys_dict = {}\n for key_dict in keys_json['keys']:\n public_key = jwt.algorithms.RSAAlgorithm.from_jwk(\n json.dumps(key_dict))\n _new_keys_dict[key_dict['kid']] = public_key\n key_cache = _new_keys_dict\n else:\n key_cache = keys_json\n get_public_key_for_jwt.key_cache = key_cache\n key = key_cache.get(key_id)\n if not key:\n raise JwtKeyError('IAP public key {!r} not found'.format(key_id))\n\n return key\n\n\ndef get_oauth2_discovery_document():\n \"\"\"Get Google OAuth2 discovery document.\n\n The document is cached in order to limit the amount of network round trips\n and is set to expire in 12 hours from when it was fetched.\n\n Returns:\n Discovery document as a dictionary.\n \"\"\"\n now = int(time.time())\n cache = get_oauth2_discovery_document.cache\n discovery_document = cache.get('current')\n if discovery_document:\n # Check if the document has expired.\n created_at = discovery_document['created_at']\n expires_at = created_at + 12*60*60 # 12 hours in seconds\n if now < expires_at:\n return discovery_document['document']\n\n # Re-fetch the discovery document.\n new_discovery_document = _fetch_oauth2_discovery_document()\n cache = {\n 'current': {\n 'created_at': now,\n 'document': new_discovery_document\n }\n }\n get_oauth2_discovery_document.cache = cache\n return new_discovery_document\n\n\n# Used to cache public keys.\nget_public_key_for_jwt.key_cache = {}\n\n# Used to cache discovery documents.\nget_oauth2_discovery_document.cache = {}\n", "path": "timesketch/lib/google_auth.py"}]} | 3,768 | 641 |
gh_patches_debug_36393 | rasdani/github-patches | git_diff | ethereum__web3.py-1020 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rename middleware_stack to middleware_onion
### How can it be fixed?
see #1020
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/__init__.py`
Content:
```
1 import pkg_resources
2 import sys
3
4 if sys.version_info < (3, 5):
5 raise EnvironmentError("Python 3.5 or above is required")
6
7 from eth_account import Account # noqa: E402
8 from web3.main import Web3 # noqa: E402
9 from web3.providers.rpc import ( # noqa: E402
10 HTTPProvider,
11 )
12 from web3.providers.eth_tester import ( # noqa: E402
13 EthereumTesterProvider,
14 )
15 from web3.providers.tester import ( # noqa: E402
16 TestRPCProvider,
17 )
18 from web3.providers.ipc import ( # noqa: E402
19 IPCProvider,
20 )
21 from web3.providers.websocket import ( # noqa: E402
22 WebsocketProvider,
23 )
24
25 __version__ = pkg_resources.get_distribution("web3").version
26
27 __all__ = [
28 "__version__",
29 "Web3",
30 "HTTPProvider",
31 "IPCProvider",
32 "WebsocketProvider",
33 "TestRPCProvider",
34 "EthereumTesterProvider",
35 "Account",
36 ]
37
```
Path: `web3/main.py`
Content:
```
1 from eth_utils import (
2 apply_to_return_value,
3 add_0x_prefix,
4 from_wei,
5 is_address,
6 is_checksum_address,
7 keccak,
8 remove_0x_prefix,
9 to_checksum_address,
10 to_wei,
11 )
12
13 from ens import ENS
14
15 from web3.admin import Admin
16 from web3.eth import Eth
17 from web3.iban import Iban
18 from web3.miner import Miner
19 from web3.net import Net
20 from web3.parity import Parity
21 from web3.personal import Personal
22 from web3.testing import Testing
23 from web3.txpool import TxPool
24 from web3.version import Version
25
26 from web3.providers.eth_tester import (
27 EthereumTesterProvider,
28 )
29 from web3.providers.ipc import (
30 IPCProvider,
31 )
32 from web3.providers.rpc import (
33 HTTPProvider,
34 )
35 from web3.providers.tester import (
36 TestRPCProvider,
37 )
38 from web3.providers.websocket import (
39 WebsocketProvider
40 )
41
42 from web3.manager import (
43 RequestManager,
44 )
45
46 from web3.utils.abi import (
47 map_abi_data,
48 )
49 from hexbytes import (
50 HexBytes,
51 )
52 from web3.utils.decorators import (
53 combomethod,
54 )
55 from web3.utils.empty import empty
56 from web3.utils.encoding import (
57 hex_encode_abi_type,
58 to_bytes,
59 to_int,
60 to_hex,
61 to_text,
62 )
63 from web3.utils.normalizers import (
64 abi_ens_resolver,
65 )
66
67
68 def get_default_modules():
69 return {
70 "eth": Eth,
71 "net": Net,
72 "personal": Personal,
73 "version": Version,
74 "txpool": TxPool,
75 "miner": Miner,
76 "admin": Admin,
77 "parity": Parity,
78 "testing": Testing,
79 }
80
81
82 class Web3:
83 # Providers
84 HTTPProvider = HTTPProvider
85 IPCProvider = IPCProvider
86 TestRPCProvider = TestRPCProvider
87 EthereumTesterProvider = EthereumTesterProvider
88 WebsocketProvider = WebsocketProvider
89
90 # Managers
91 RequestManager = RequestManager
92
93 # Iban
94 Iban = Iban
95
96 # Encoding and Decoding
97 toBytes = staticmethod(to_bytes)
98 toInt = staticmethod(to_int)
99 toHex = staticmethod(to_hex)
100 toText = staticmethod(to_text)
101
102 # Currency Utility
103 toWei = staticmethod(to_wei)
104 fromWei = staticmethod(from_wei)
105
106 # Address Utility
107 isAddress = staticmethod(is_address)
108 isChecksumAddress = staticmethod(is_checksum_address)
109 toChecksumAddress = staticmethod(to_checksum_address)
110
111 def __init__(self, providers=empty, middlewares=None, modules=None, ens=empty):
112 self.manager = RequestManager(self, providers, middlewares)
113
114 if modules is None:
115 modules = get_default_modules()
116
117 for module_name, module_class in modules.items():
118 module_class.attach(self, module_name)
119
120 self.ens = ens
121
122 @property
123 def middleware_stack(self):
124 return self.manager.middleware_stack
125
126 @property
127 def providers(self):
128 return self.manager.providers
129
130 @providers.setter
131 def providers(self, providers):
132 self.manager.providers = providers
133
134 @staticmethod
135 @apply_to_return_value(HexBytes)
136 def sha3(primitive=None, text=None, hexstr=None):
137 if isinstance(primitive, (bytes, int, type(None))):
138 input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)
139 return keccak(input_bytes)
140
141 raise TypeError(
142 "You called sha3 with first arg %r and keywords %r. You must call it with one of "
143 "these approaches: sha3(text='txt'), sha3(hexstr='0x747874'), "
144 "sha3(b'\\x74\\x78\\x74'), or sha3(0x747874)." % (
145 primitive,
146 {'text': text, 'hexstr': hexstr}
147 )
148 )
149
150 @combomethod
151 def soliditySha3(cls, abi_types, values):
152 """
153 Executes sha3 (keccak256) exactly as Solidity does.
154 Takes list of abi_types as inputs -- `[uint24, int8[], bool]`
155 and list of corresponding values -- `[20, [-1, 5, 0], True]`
156 """
157 if len(abi_types) != len(values):
158 raise ValueError(
159 "Length mismatch between provided abi types and values. Got "
160 "{0} types and {1} values.".format(len(abi_types), len(values))
161 )
162
163 if isinstance(cls, type):
164 w3 = None
165 else:
166 w3 = cls
167 normalized_values = map_abi_data([abi_ens_resolver(w3)], abi_types, values)
168
169 hex_string = add_0x_prefix(''.join(
170 remove_0x_prefix(hex_encode_abi_type(abi_type, value))
171 for abi_type, value
172 in zip(abi_types, normalized_values)
173 ))
174 return cls.sha3(hexstr=hex_string)
175
176 def isConnected(self):
177 for provider in self.providers:
178 if provider.isConnected():
179 return True
180 else:
181 return False
182
183 @property
184 def ens(self):
185 if self._ens is empty:
186 return ENS.fromWeb3(self)
187 else:
188 return self._ens
189
190 @ens.setter
191 def ens(self, new_ens):
192 self._ens = new_ens
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/web3/__init__.py b/web3/__init__.py
--- a/web3/__init__.py
+++ b/web3/__init__.py
@@ -1,8 +1,17 @@
import pkg_resources
import sys
+import warnings
+
+if (3, 5) <= sys.version_info < (3, 6):
+ warnings.warn(
+ "Support for Python 3.5 will be removed in web3.py v5",
+ category=DeprecationWarning,
+ stacklevel=2)
if sys.version_info < (3, 5):
- raise EnvironmentError("Python 3.5 or above is required")
+ raise EnvironmentError(
+ "Python 3.5 or above is required. "
+ "Note that support for Python 3.5 will be remove in web3.py v5")
from eth_account import Account # noqa: E402
from web3.main import Web3 # noqa: E402
diff --git a/web3/main.py b/web3/main.py
--- a/web3/main.py
+++ b/web3/main.py
@@ -4,7 +4,7 @@
from_wei,
is_address,
is_checksum_address,
- keccak,
+ keccak as eth_utils_keccak,
remove_0x_prefix,
to_checksum_address,
to_wei,
@@ -51,6 +51,7 @@
)
from web3.utils.decorators import (
combomethod,
+ deprecated_for,
)
from web3.utils.empty import empty
from web3.utils.encoding import (
@@ -132,16 +133,22 @@
self.manager.providers = providers
@staticmethod
+ @deprecated_for("This method has been renamed to keccak")
@apply_to_return_value(HexBytes)
def sha3(primitive=None, text=None, hexstr=None):
+ return Web3.keccak(primitive, text, hexstr)
+
+ @staticmethod
+ @apply_to_return_value(HexBytes)
+ def keccak(primitive=None, text=None, hexstr=None):
if isinstance(primitive, (bytes, int, type(None))):
input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)
- return keccak(input_bytes)
+ return eth_utils_keccak(input_bytes)
raise TypeError(
- "You called sha3 with first arg %r and keywords %r. You must call it with one of "
- "these approaches: sha3(text='txt'), sha3(hexstr='0x747874'), "
- "sha3(b'\\x74\\x78\\x74'), or sha3(0x747874)." % (
+ "You called keccak with first arg %r and keywords %r. You must call it with one of "
+ "these approaches: keccak(text='txt'), keccak(hexstr='0x747874'), "
+ "keccak(b'\\x74\\x78\\x74'), or keccak(0x747874)." % (
primitive,
{'text': text, 'hexstr': hexstr}
)
| {"golden_diff": "diff --git a/web3/__init__.py b/web3/__init__.py\n--- a/web3/__init__.py\n+++ b/web3/__init__.py\n@@ -1,8 +1,17 @@\n import pkg_resources\n import sys\n+import warnings\n+\n+if (3, 5) <= sys.version_info < (3, 6):\n+ warnings.warn(\n+ \"Support for Python 3.5 will be removed in web3.py v5\",\n+ category=DeprecationWarning,\n+ stacklevel=2)\n \n if sys.version_info < (3, 5):\n- raise EnvironmentError(\"Python 3.5 or above is required\")\n+ raise EnvironmentError(\n+ \"Python 3.5 or above is required. \"\n+ \"Note that support for Python 3.5 will be remove in web3.py v5\")\n \n from eth_account import Account # noqa: E402\n from web3.main import Web3 # noqa: E402\ndiff --git a/web3/main.py b/web3/main.py\n--- a/web3/main.py\n+++ b/web3/main.py\n@@ -4,7 +4,7 @@\n from_wei,\n is_address,\n is_checksum_address,\n- keccak,\n+ keccak as eth_utils_keccak,\n remove_0x_prefix,\n to_checksum_address,\n to_wei,\n@@ -51,6 +51,7 @@\n )\n from web3.utils.decorators import (\n combomethod,\n+ deprecated_for,\n )\n from web3.utils.empty import empty\n from web3.utils.encoding import (\n@@ -132,16 +133,22 @@\n self.manager.providers = providers\n \n @staticmethod\n+ @deprecated_for(\"This method has been renamed to keccak\")\n @apply_to_return_value(HexBytes)\n def sha3(primitive=None, text=None, hexstr=None):\n+ return Web3.keccak(primitive, text, hexstr)\n+\n+ @staticmethod\n+ @apply_to_return_value(HexBytes)\n+ def keccak(primitive=None, text=None, hexstr=None):\n if isinstance(primitive, (bytes, int, type(None))):\n input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)\n- return keccak(input_bytes)\n+ return eth_utils_keccak(input_bytes)\n \n raise TypeError(\n- \"You called sha3 with first arg %r and keywords %r. You must call it with one of \"\n- \"these approaches: sha3(text='txt'), sha3(hexstr='0x747874'), \"\n- \"sha3(b'\\\\x74\\\\x78\\\\x74'), or sha3(0x747874).\" % (\n+ \"You called keccak with first arg %r and keywords %r. You must call it with one of \"\n+ \"these approaches: keccak(text='txt'), keccak(hexstr='0x747874'), \"\n+ \"keccak(b'\\\\x74\\\\x78\\\\x74'), or keccak(0x747874).\" % (\n primitive,\n {'text': text, 'hexstr': hexstr}\n )\n", "issue": "Rename middleware_stack to middleware_onion\n### How can it be fixed?\r\n\r\nsee #1020 \r\n\n", "before_files": [{"content": "import pkg_resources\nimport sys\n\nif sys.version_info < (3, 5):\n raise EnvironmentError(\"Python 3.5 or above is required\")\n\nfrom eth_account import Account # noqa: E402\nfrom web3.main import Web3 # noqa: E402\nfrom web3.providers.rpc import ( # noqa: E402\n HTTPProvider,\n)\nfrom web3.providers.eth_tester import ( # noqa: E402\n EthereumTesterProvider,\n)\nfrom web3.providers.tester import ( # noqa: E402\n TestRPCProvider,\n)\nfrom web3.providers.ipc import ( # noqa: E402\n IPCProvider,\n)\nfrom web3.providers.websocket import ( # noqa: E402\n WebsocketProvider,\n)\n\n__version__ = pkg_resources.get_distribution(\"web3\").version\n\n__all__ = [\n \"__version__\",\n \"Web3\",\n \"HTTPProvider\",\n \"IPCProvider\",\n \"WebsocketProvider\",\n \"TestRPCProvider\",\n \"EthereumTesterProvider\",\n \"Account\",\n]\n", "path": "web3/__init__.py"}, {"content": "from eth_utils import (\n apply_to_return_value,\n add_0x_prefix,\n from_wei,\n is_address,\n is_checksum_address,\n keccak,\n remove_0x_prefix,\n to_checksum_address,\n to_wei,\n)\n\nfrom ens import ENS\n\nfrom web3.admin import Admin\nfrom web3.eth import Eth\nfrom web3.iban import Iban\nfrom web3.miner import Miner\nfrom web3.net import Net\nfrom web3.parity import Parity\nfrom web3.personal import Personal\nfrom web3.testing import Testing\nfrom web3.txpool import TxPool\nfrom web3.version import Version\n\nfrom web3.providers.eth_tester import (\n EthereumTesterProvider,\n)\nfrom web3.providers.ipc import (\n IPCProvider,\n)\nfrom web3.providers.rpc import (\n HTTPProvider,\n)\nfrom web3.providers.tester import (\n TestRPCProvider,\n)\nfrom web3.providers.websocket import (\n WebsocketProvider\n)\n\nfrom web3.manager import (\n RequestManager,\n)\n\nfrom web3.utils.abi import (\n map_abi_data,\n)\nfrom hexbytes import (\n HexBytes,\n)\nfrom web3.utils.decorators import (\n combomethod,\n)\nfrom web3.utils.empty import empty\nfrom web3.utils.encoding import (\n hex_encode_abi_type,\n to_bytes,\n to_int,\n to_hex,\n to_text,\n)\nfrom web3.utils.normalizers import (\n abi_ens_resolver,\n)\n\n\ndef get_default_modules():\n return {\n \"eth\": Eth,\n \"net\": Net,\n \"personal\": Personal,\n \"version\": Version,\n \"txpool\": TxPool,\n \"miner\": Miner,\n \"admin\": Admin,\n \"parity\": Parity,\n \"testing\": Testing,\n }\n\n\nclass Web3:\n # Providers\n HTTPProvider = HTTPProvider\n IPCProvider = IPCProvider\n TestRPCProvider = TestRPCProvider\n EthereumTesterProvider = EthereumTesterProvider\n WebsocketProvider = WebsocketProvider\n\n # Managers\n RequestManager = RequestManager\n\n # Iban\n Iban = Iban\n\n # Encoding and Decoding\n toBytes = staticmethod(to_bytes)\n toInt = staticmethod(to_int)\n toHex = staticmethod(to_hex)\n toText = staticmethod(to_text)\n\n # Currency Utility\n toWei = staticmethod(to_wei)\n fromWei = staticmethod(from_wei)\n\n # Address Utility\n isAddress = staticmethod(is_address)\n isChecksumAddress = staticmethod(is_checksum_address)\n toChecksumAddress = staticmethod(to_checksum_address)\n\n def __init__(self, providers=empty, middlewares=None, modules=None, ens=empty):\n self.manager = RequestManager(self, providers, middlewares)\n\n if modules is None:\n modules = get_default_modules()\n\n for module_name, module_class in modules.items():\n module_class.attach(self, module_name)\n\n self.ens = ens\n\n @property\n def middleware_stack(self):\n return self.manager.middleware_stack\n\n @property\n def providers(self):\n return self.manager.providers\n\n @providers.setter\n def providers(self, providers):\n self.manager.providers = providers\n\n @staticmethod\n @apply_to_return_value(HexBytes)\n def sha3(primitive=None, text=None, hexstr=None):\n if isinstance(primitive, (bytes, int, type(None))):\n input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)\n return keccak(input_bytes)\n\n raise TypeError(\n \"You called sha3 with first arg %r and keywords %r. You must call it with one of \"\n \"these approaches: sha3(text='txt'), sha3(hexstr='0x747874'), \"\n \"sha3(b'\\\\x74\\\\x78\\\\x74'), or sha3(0x747874).\" % (\n primitive,\n {'text': text, 'hexstr': hexstr}\n )\n )\n\n @combomethod\n def soliditySha3(cls, abi_types, values):\n \"\"\"\n Executes sha3 (keccak256) exactly as Solidity does.\n Takes list of abi_types as inputs -- `[uint24, int8[], bool]`\n and list of corresponding values -- `[20, [-1, 5, 0], True]`\n \"\"\"\n if len(abi_types) != len(values):\n raise ValueError(\n \"Length mismatch between provided abi types and values. Got \"\n \"{0} types and {1} values.\".format(len(abi_types), len(values))\n )\n\n if isinstance(cls, type):\n w3 = None\n else:\n w3 = cls\n normalized_values = map_abi_data([abi_ens_resolver(w3)], abi_types, values)\n\n hex_string = add_0x_prefix(''.join(\n remove_0x_prefix(hex_encode_abi_type(abi_type, value))\n for abi_type, value\n in zip(abi_types, normalized_values)\n ))\n return cls.sha3(hexstr=hex_string)\n\n def isConnected(self):\n for provider in self.providers:\n if provider.isConnected():\n return True\n else:\n return False\n\n @property\n def ens(self):\n if self._ens is empty:\n return ENS.fromWeb3(self)\n else:\n return self._ens\n\n @ens.setter\n def ens(self, new_ens):\n self._ens = new_ens\n", "path": "web3/main.py"}], "after_files": [{"content": "import pkg_resources\nimport sys\nimport warnings\n\nif (3, 5) <= sys.version_info < (3, 6):\n warnings.warn(\n \"Support for Python 3.5 will be removed in web3.py v5\",\n category=DeprecationWarning,\n stacklevel=2)\n\nif sys.version_info < (3, 5):\n raise EnvironmentError(\n \"Python 3.5 or above is required. \"\n \"Note that support for Python 3.5 will be remove in web3.py v5\")\n\nfrom eth_account import Account # noqa: E402\nfrom web3.main import Web3 # noqa: E402\nfrom web3.providers.rpc import ( # noqa: E402\n HTTPProvider,\n)\nfrom web3.providers.eth_tester import ( # noqa: E402\n EthereumTesterProvider,\n)\nfrom web3.providers.tester import ( # noqa: E402\n TestRPCProvider,\n)\nfrom web3.providers.ipc import ( # noqa: E402\n IPCProvider,\n)\nfrom web3.providers.websocket import ( # noqa: E402\n WebsocketProvider,\n)\n\n__version__ = pkg_resources.get_distribution(\"web3\").version\n\n__all__ = [\n \"__version__\",\n \"Web3\",\n \"HTTPProvider\",\n \"IPCProvider\",\n \"WebsocketProvider\",\n \"TestRPCProvider\",\n \"EthereumTesterProvider\",\n \"Account\",\n]\n", "path": "web3/__init__.py"}, {"content": "from eth_utils import (\n apply_to_return_value,\n add_0x_prefix,\n from_wei,\n is_address,\n is_checksum_address,\n keccak as eth_utils_keccak,\n remove_0x_prefix,\n to_checksum_address,\n to_wei,\n)\n\nfrom ens import ENS\n\nfrom web3.admin import Admin\nfrom web3.eth import Eth\nfrom web3.iban import Iban\nfrom web3.miner import Miner\nfrom web3.net import Net\nfrom web3.parity import Parity\nfrom web3.personal import Personal\nfrom web3.testing import Testing\nfrom web3.txpool import TxPool\nfrom web3.version import Version\n\nfrom web3.providers.eth_tester import (\n EthereumTesterProvider,\n)\nfrom web3.providers.ipc import (\n IPCProvider,\n)\nfrom web3.providers.rpc import (\n HTTPProvider,\n)\nfrom web3.providers.tester import (\n TestRPCProvider,\n)\nfrom web3.providers.websocket import (\n WebsocketProvider\n)\n\nfrom web3.manager import (\n RequestManager,\n)\n\nfrom web3.utils.abi import (\n map_abi_data,\n)\nfrom hexbytes import (\n HexBytes,\n)\nfrom web3.utils.decorators import (\n combomethod,\n deprecated_for,\n)\nfrom web3.utils.empty import empty\nfrom web3.utils.encoding import (\n hex_encode_abi_type,\n to_bytes,\n to_int,\n to_hex,\n to_text,\n)\nfrom web3.utils.normalizers import (\n abi_ens_resolver,\n)\n\n\ndef get_default_modules():\n return {\n \"eth\": Eth,\n \"net\": Net,\n \"personal\": Personal,\n \"version\": Version,\n \"txpool\": TxPool,\n \"miner\": Miner,\n \"admin\": Admin,\n \"parity\": Parity,\n \"testing\": Testing,\n }\n\n\nclass Web3:\n # Providers\n HTTPProvider = HTTPProvider\n IPCProvider = IPCProvider\n TestRPCProvider = TestRPCProvider\n EthereumTesterProvider = EthereumTesterProvider\n WebsocketProvider = WebsocketProvider\n\n # Managers\n RequestManager = RequestManager\n\n # Iban\n Iban = Iban\n\n # Encoding and Decoding\n toBytes = staticmethod(to_bytes)\n toInt = staticmethod(to_int)\n toHex = staticmethod(to_hex)\n toText = staticmethod(to_text)\n\n # Currency Utility\n toWei = staticmethod(to_wei)\n fromWei = staticmethod(from_wei)\n\n # Address Utility\n isAddress = staticmethod(is_address)\n isChecksumAddress = staticmethod(is_checksum_address)\n toChecksumAddress = staticmethod(to_checksum_address)\n\n def __init__(self, providers=empty, middlewares=None, modules=None, ens=empty):\n self.manager = RequestManager(self, providers, middlewares)\n\n if modules is None:\n modules = get_default_modules()\n\n for module_name, module_class in modules.items():\n module_class.attach(self, module_name)\n\n self.ens = ens\n\n @property\n def middleware_stack(self):\n return self.manager.middleware_stack\n\n @property\n def providers(self):\n return self.manager.providers\n\n @providers.setter\n def providers(self, providers):\n self.manager.providers = providers\n\n @staticmethod\n @deprecated_for(\"This method has been renamed to keccak\")\n @apply_to_return_value(HexBytes)\n def sha3(primitive=None, text=None, hexstr=None):\n return Web3.keccak(primitive, text, hexstr)\n\n @staticmethod\n @apply_to_return_value(HexBytes)\n def keccak(primitive=None, text=None, hexstr=None):\n if isinstance(primitive, (bytes, int, type(None))):\n input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)\n return eth_utils_keccak(input_bytes)\n\n raise TypeError(\n \"You called keccak with first arg %r and keywords %r. You must call it with one of \"\n \"these approaches: keccak(text='txt'), keccak(hexstr='0x747874'), \"\n \"keccak(b'\\\\x74\\\\x78\\\\x74'), or keccak(0x747874).\" % (\n primitive,\n {'text': text, 'hexstr': hexstr}\n )\n )\n\n @combomethod\n def soliditySha3(cls, abi_types, values):\n \"\"\"\n Executes sha3 (keccak256) exactly as Solidity does.\n Takes list of abi_types as inputs -- `[uint24, int8[], bool]`\n and list of corresponding values -- `[20, [-1, 5, 0], True]`\n \"\"\"\n if len(abi_types) != len(values):\n raise ValueError(\n \"Length mismatch between provided abi types and values. Got \"\n \"{0} types and {1} values.\".format(len(abi_types), len(values))\n )\n\n if isinstance(cls, type):\n w3 = None\n else:\n w3 = cls\n normalized_values = map_abi_data([abi_ens_resolver(w3)], abi_types, values)\n\n hex_string = add_0x_prefix(''.join(\n remove_0x_prefix(hex_encode_abi_type(abi_type, value))\n for abi_type, value\n in zip(abi_types, normalized_values)\n ))\n return cls.sha3(hexstr=hex_string)\n\n def isConnected(self):\n for provider in self.providers:\n if provider.isConnected():\n return True\n else:\n return False\n\n @property\n def ens(self):\n if self._ens is empty:\n return ENS.fromWeb3(self)\n else:\n return self._ens\n\n @ens.setter\n def ens(self, new_ens):\n self._ens = new_ens\n", "path": "web3/main.py"}]} | 2,284 | 712 |
gh_patches_debug_26070 | rasdani/github-patches | git_diff | getnikola__nikola-1391 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SHOW_UNTRANSLATED_POSTS=False leads to 404s in archives
Even though SHOW_UNTRANSLATED_POSTS=False, archives still display posts that do not exist, leading to 404s everywhere.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/task/archive.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 import os
28
29 # for tearDown with _reload we cannot use 'import from' to access LocaleBorg
30 import nikola.utils
31 from nikola.plugin_categories import Task
32 from nikola.utils import config_changed
33
34
35 class Archive(Task):
36 """Render the post archives."""
37
38 name = "render_archive"
39
40 def set_site(self, site):
41 site.register_path_handler('archive', self.archive_path)
42 return super(Archive, self).set_site(site)
43
44 def _prepare_task(self, kw, name, lang, posts, items, template_name,
45 title, deps_translatable=None):
46 # name: used to build permalink and destination
47 # posts, items: posts or items; only one of them should be used,
48 # the other be None
49 # template_name: name of the template to use
50 # title: the (translated) title for the generated page
51 # deps_translatable: dependencies (None if not added)
52 assert posts is not None or items is not None
53
54 context = {}
55 context["lang"] = lang
56 context["title"] = title
57 context["permalink"] = self.site.link("archive", name, lang)
58 if posts is not None:
59 context["posts"] = posts
60 n = len(posts)
61 else:
62 context["items"] = items
63 n = len(items)
64 task = self.site.generic_post_list_renderer(
65 lang,
66 [],
67 os.path.join(kw['output_folder'], self.site.path("archive", name, lang)),
68 template_name,
69 kw['filters'],
70 context,
71 )
72
73 task_cfg = {1: task['uptodate'][0].config, 2: kw, 3: n}
74 if deps_translatable is not None:
75 task_cfg[4] = deps_translatable
76 task['uptodate'] = [config_changed(task_cfg)]
77 task['basename'] = self.name
78 return task
79
80 def _generate_posts_task(self, kw, name, lang, posts, title, deps_translatable=None):
81 posts = sorted(posts, key=lambda a: a.date)
82 posts.reverse()
83 yield self._prepare_task(kw, name, lang, posts, None, "list_post.tmpl", title, deps_translatable)
84
85 def gen_tasks(self):
86 kw = {
87 "messages": self.site.MESSAGES,
88 "translations": self.site.config['TRANSLATIONS'],
89 "output_folder": self.site.config['OUTPUT_FOLDER'],
90 "filters": self.site.config['FILTERS'],
91 "create_monthly_archive": self.site.config['CREATE_MONTHLY_ARCHIVE'],
92 "create_single_archive": self.site.config['CREATE_SINGLE_ARCHIVE'],
93 "create_full_archives": self.site.config['CREATE_FULL_ARCHIVES'],
94 "create_daily_archive": self.site.config['CREATE_DAILY_ARCHIVE'],
95 }
96 self.site.scan_posts()
97 yield self.group_task()
98 # TODO add next/prev links for years
99 if (kw['create_monthly_archive'] and kw['create_single_archive']) and not kw['create_full_archives']:
100 raise Exception('Cannot create monthly and single archives at the same time.')
101 for lang in kw["translations"]:
102 if kw['create_single_archive'] and not kw['create_full_archives']:
103 # if we are creating one single archive
104 archdata = {}
105 else:
106 # if we are not creating one single archive, start with all years
107 archdata = self.site.posts_per_year.copy()
108 if kw['create_single_archive'] or kw['create_full_archives']:
109 # if we are creating one single archive, or full archives
110 archdata[None] = self.site.posts # for create_single_archive
111
112 for year, posts in archdata.items():
113 # Add archive per year or total archive
114 if year:
115 title = kw["messages"][lang]["Posts for year %s"] % year
116 else:
117 title = kw["messages"][lang]["Archive"]
118 deps_translatable = {}
119 for k in self.site._GLOBAL_CONTEXT_TRANSLATABLE:
120 deps_translatable[k] = self.site.GLOBAL_CONTEXT[k](lang)
121 if not kw["create_monthly_archive"] or kw["create_full_archives"]:
122 yield self._generate_posts_task(kw, year, lang, posts, title, deps_translatable)
123 else:
124 months = set([(m.split('/')[1], self.site.link("archive", m, lang)) for m in self.site.posts_per_month.keys() if m.startswith(str(year))])
125 months = sorted(list(months))
126 months.reverse()
127 items = [[nikola.utils.LocaleBorg().get_month_name(int(month), lang), link] for month, link in months]
128 yield self._prepare_task(kw, year, lang, None, items, "list.tmpl", title, deps_translatable)
129
130 if not kw["create_monthly_archive"] and not kw["create_full_archives"] and not kw["create_daily_archive"]:
131 continue # Just to avoid nesting the other loop in this if
132 for yearmonth, posts in self.site.posts_per_month.items():
133 # Add archive per month
134 year, month = yearmonth.split('/')
135 if kw["create_monthly_archive"] or kw["create_full_archives"]:
136 title = kw["messages"][lang]["Posts for {month} {year}"].format(
137 year=year, month=nikola.utils.LocaleBorg().get_month_name(int(month), lang))
138 yield self._generate_posts_task(kw, yearmonth, lang, posts, title)
139
140 if not kw["create_full_archives"] and not kw["create_daily_archive"]:
141 continue # Just to avoid nesting the other loop in this if
142 # Add archive per day
143 days = dict()
144 for p in posts:
145 if p.date.day not in days:
146 days[p.date.day] = list()
147 days[p.date.day].append(p)
148 for day, posts in days.items():
149 title = kw["messages"][lang]["Posts for {month} {day}, {year}"].format(
150 year=year, month=nikola.utils.LocaleBorg().get_month_name(int(month), lang), day=day)
151 yield self._generate_posts_task(kw, yearmonth + '/{0:02d}'.format(day), lang, posts, title)
152
153 if not kw['create_single_archive'] and not kw['create_full_archives']:
154 # And an "all your years" page for yearly and monthly archives
155 years = list(self.site.posts_per_year.keys())
156 years.sort(reverse=True)
157 kw['years'] = years
158 for lang in kw["translations"]:
159 items = [(y, self.site.link("archive", y, lang)) for y in years]
160 yield self._prepare_task(kw, None, lang, None, items, "list.tmpl", kw["messages"][lang]["Archive"])
161
162 def archive_path(self, name, lang):
163 if name:
164 return [_f for _f in [self.site.config['TRANSLATIONS'][lang],
165 self.site.config['ARCHIVE_PATH'], name,
166 self.site.config['INDEX_FILE']] if _f]
167 else:
168 return [_f for _f in [self.site.config['TRANSLATIONS'][lang],
169 self.site.config['ARCHIVE_PATH'],
170 self.site.config['ARCHIVE_FILENAME']] if _f]
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/task/archive.py b/nikola/plugins/task/archive.py
--- a/nikola/plugins/task/archive.py
+++ b/nikola/plugins/task/archive.py
@@ -90,6 +90,7 @@
"filters": self.site.config['FILTERS'],
"create_monthly_archive": self.site.config['CREATE_MONTHLY_ARCHIVE'],
"create_single_archive": self.site.config['CREATE_SINGLE_ARCHIVE'],
+ "show_untranslated_posts": self.site.config['SHOW_UNTRANSLATED_POSTS'],
"create_full_archives": self.site.config['CREATE_FULL_ARCHIVES'],
"create_daily_archive": self.site.config['CREATE_DAILY_ARCHIVE'],
}
@@ -109,6 +110,11 @@
# if we are creating one single archive, or full archives
archdata[None] = self.site.posts # for create_single_archive
+ # Filter untranslated posts (Issue #1360)
+ if not kw["show_untranslated_posts"]:
+ for year, posts in archdata.items():
+ archdata[year] = [p for p in posts if lang in p.translated_to]
+
for year, posts in archdata.items():
# Add archive per year or total archive
if year:
| {"golden_diff": "diff --git a/nikola/plugins/task/archive.py b/nikola/plugins/task/archive.py\n--- a/nikola/plugins/task/archive.py\n+++ b/nikola/plugins/task/archive.py\n@@ -90,6 +90,7 @@\n \"filters\": self.site.config['FILTERS'],\n \"create_monthly_archive\": self.site.config['CREATE_MONTHLY_ARCHIVE'],\n \"create_single_archive\": self.site.config['CREATE_SINGLE_ARCHIVE'],\n+ \"show_untranslated_posts\": self.site.config['SHOW_UNTRANSLATED_POSTS'],\n \"create_full_archives\": self.site.config['CREATE_FULL_ARCHIVES'],\n \"create_daily_archive\": self.site.config['CREATE_DAILY_ARCHIVE'],\n }\n@@ -109,6 +110,11 @@\n # if we are creating one single archive, or full archives\n archdata[None] = self.site.posts # for create_single_archive\n \n+ # Filter untranslated posts (Issue #1360)\n+ if not kw[\"show_untranslated_posts\"]:\n+ for year, posts in archdata.items():\n+ archdata[year] = [p for p in posts if lang in p.translated_to]\n+\n for year, posts in archdata.items():\n # Add archive per year or total archive\n if year:\n", "issue": "SHOW_UNTRANSLATED_POSTS=False leads to 404s in archives\nEven though SHOW_UNTRANSLATED_POSTS=False, archives still display posts that do not exist, leading to 404s everywhere.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport os\n\n# for tearDown with _reload we cannot use 'import from' to access LocaleBorg\nimport nikola.utils\nfrom nikola.plugin_categories import Task\nfrom nikola.utils import config_changed\n\n\nclass Archive(Task):\n \"\"\"Render the post archives.\"\"\"\n\n name = \"render_archive\"\n\n def set_site(self, site):\n site.register_path_handler('archive', self.archive_path)\n return super(Archive, self).set_site(site)\n\n def _prepare_task(self, kw, name, lang, posts, items, template_name,\n title, deps_translatable=None):\n # name: used to build permalink and destination\n # posts, items: posts or items; only one of them should be used,\n # the other be None\n # template_name: name of the template to use\n # title: the (translated) title for the generated page\n # deps_translatable: dependencies (None if not added)\n assert posts is not None or items is not None\n\n context = {}\n context[\"lang\"] = lang\n context[\"title\"] = title\n context[\"permalink\"] = self.site.link(\"archive\", name, lang)\n if posts is not None:\n context[\"posts\"] = posts\n n = len(posts)\n else:\n context[\"items\"] = items\n n = len(items)\n task = self.site.generic_post_list_renderer(\n lang,\n [],\n os.path.join(kw['output_folder'], self.site.path(\"archive\", name, lang)),\n template_name,\n kw['filters'],\n context,\n )\n\n task_cfg = {1: task['uptodate'][0].config, 2: kw, 3: n}\n if deps_translatable is not None:\n task_cfg[4] = deps_translatable\n task['uptodate'] = [config_changed(task_cfg)]\n task['basename'] = self.name\n return task\n\n def _generate_posts_task(self, kw, name, lang, posts, title, deps_translatable=None):\n posts = sorted(posts, key=lambda a: a.date)\n posts.reverse()\n yield self._prepare_task(kw, name, lang, posts, None, \"list_post.tmpl\", title, deps_translatable)\n\n def gen_tasks(self):\n kw = {\n \"messages\": self.site.MESSAGES,\n \"translations\": self.site.config['TRANSLATIONS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"create_monthly_archive\": self.site.config['CREATE_MONTHLY_ARCHIVE'],\n \"create_single_archive\": self.site.config['CREATE_SINGLE_ARCHIVE'],\n \"create_full_archives\": self.site.config['CREATE_FULL_ARCHIVES'],\n \"create_daily_archive\": self.site.config['CREATE_DAILY_ARCHIVE'],\n }\n self.site.scan_posts()\n yield self.group_task()\n # TODO add next/prev links for years\n if (kw['create_monthly_archive'] and kw['create_single_archive']) and not kw['create_full_archives']:\n raise Exception('Cannot create monthly and single archives at the same time.')\n for lang in kw[\"translations\"]:\n if kw['create_single_archive'] and not kw['create_full_archives']:\n # if we are creating one single archive\n archdata = {}\n else:\n # if we are not creating one single archive, start with all years\n archdata = self.site.posts_per_year.copy()\n if kw['create_single_archive'] or kw['create_full_archives']:\n # if we are creating one single archive, or full archives\n archdata[None] = self.site.posts # for create_single_archive\n\n for year, posts in archdata.items():\n # Add archive per year or total archive\n if year:\n title = kw[\"messages\"][lang][\"Posts for year %s\"] % year\n else:\n title = kw[\"messages\"][lang][\"Archive\"]\n deps_translatable = {}\n for k in self.site._GLOBAL_CONTEXT_TRANSLATABLE:\n deps_translatable[k] = self.site.GLOBAL_CONTEXT[k](lang)\n if not kw[\"create_monthly_archive\"] or kw[\"create_full_archives\"]:\n yield self._generate_posts_task(kw, year, lang, posts, title, deps_translatable)\n else:\n months = set([(m.split('/')[1], self.site.link(\"archive\", m, lang)) for m in self.site.posts_per_month.keys() if m.startswith(str(year))])\n months = sorted(list(months))\n months.reverse()\n items = [[nikola.utils.LocaleBorg().get_month_name(int(month), lang), link] for month, link in months]\n yield self._prepare_task(kw, year, lang, None, items, \"list.tmpl\", title, deps_translatable)\n\n if not kw[\"create_monthly_archive\"] and not kw[\"create_full_archives\"] and not kw[\"create_daily_archive\"]:\n continue # Just to avoid nesting the other loop in this if\n for yearmonth, posts in self.site.posts_per_month.items():\n # Add archive per month\n year, month = yearmonth.split('/')\n if kw[\"create_monthly_archive\"] or kw[\"create_full_archives\"]:\n title = kw[\"messages\"][lang][\"Posts for {month} {year}\"].format(\n year=year, month=nikola.utils.LocaleBorg().get_month_name(int(month), lang))\n yield self._generate_posts_task(kw, yearmonth, lang, posts, title)\n\n if not kw[\"create_full_archives\"] and not kw[\"create_daily_archive\"]:\n continue # Just to avoid nesting the other loop in this if\n # Add archive per day\n days = dict()\n for p in posts:\n if p.date.day not in days:\n days[p.date.day] = list()\n days[p.date.day].append(p)\n for day, posts in days.items():\n title = kw[\"messages\"][lang][\"Posts for {month} {day}, {year}\"].format(\n year=year, month=nikola.utils.LocaleBorg().get_month_name(int(month), lang), day=day)\n yield self._generate_posts_task(kw, yearmonth + '/{0:02d}'.format(day), lang, posts, title)\n\n if not kw['create_single_archive'] and not kw['create_full_archives']:\n # And an \"all your years\" page for yearly and monthly archives\n years = list(self.site.posts_per_year.keys())\n years.sort(reverse=True)\n kw['years'] = years\n for lang in kw[\"translations\"]:\n items = [(y, self.site.link(\"archive\", y, lang)) for y in years]\n yield self._prepare_task(kw, None, lang, None, items, \"list.tmpl\", kw[\"messages\"][lang][\"Archive\"])\n\n def archive_path(self, name, lang):\n if name:\n return [_f for _f in [self.site.config['TRANSLATIONS'][lang],\n self.site.config['ARCHIVE_PATH'], name,\n self.site.config['INDEX_FILE']] if _f]\n else:\n return [_f for _f in [self.site.config['TRANSLATIONS'][lang],\n self.site.config['ARCHIVE_PATH'],\n self.site.config['ARCHIVE_FILENAME']] if _f]\n", "path": "nikola/plugins/task/archive.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport os\n\n# for tearDown with _reload we cannot use 'import from' to access LocaleBorg\nimport nikola.utils\nfrom nikola.plugin_categories import Task\nfrom nikola.utils import config_changed\n\n\nclass Archive(Task):\n \"\"\"Render the post archives.\"\"\"\n\n name = \"render_archive\"\n\n def set_site(self, site):\n site.register_path_handler('archive', self.archive_path)\n return super(Archive, self).set_site(site)\n\n def _prepare_task(self, kw, name, lang, posts, items, template_name,\n title, deps_translatable=None):\n # name: used to build permalink and destination\n # posts, items: posts or items; only one of them should be used,\n # the other be None\n # template_name: name of the template to use\n # title: the (translated) title for the generated page\n # deps_translatable: dependencies (None if not added)\n assert posts is not None or items is not None\n\n context = {}\n context[\"lang\"] = lang\n context[\"title\"] = title\n context[\"permalink\"] = self.site.link(\"archive\", name, lang)\n if posts is not None:\n context[\"posts\"] = posts\n n = len(posts)\n else:\n context[\"items\"] = items\n n = len(items)\n task = self.site.generic_post_list_renderer(\n lang,\n [],\n os.path.join(kw['output_folder'], self.site.path(\"archive\", name, lang)),\n template_name,\n kw['filters'],\n context,\n )\n\n task_cfg = {1: task['uptodate'][0].config, 2: kw, 3: n}\n if deps_translatable is not None:\n task_cfg[4] = deps_translatable\n task['uptodate'] = [config_changed(task_cfg)]\n task['basename'] = self.name\n return task\n\n def _generate_posts_task(self, kw, name, lang, posts, title, deps_translatable=None):\n posts = sorted(posts, key=lambda a: a.date)\n posts.reverse()\n yield self._prepare_task(kw, name, lang, posts, None, \"list_post.tmpl\", title, deps_translatable)\n\n def gen_tasks(self):\n kw = {\n \"messages\": self.site.MESSAGES,\n \"translations\": self.site.config['TRANSLATIONS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"create_monthly_archive\": self.site.config['CREATE_MONTHLY_ARCHIVE'],\n \"create_single_archive\": self.site.config['CREATE_SINGLE_ARCHIVE'],\n \"show_untranslated_posts\": self.site.config['SHOW_UNTRANSLATED_POSTS'],\n \"create_full_archives\": self.site.config['CREATE_FULL_ARCHIVES'],\n \"create_daily_archive\": self.site.config['CREATE_DAILY_ARCHIVE'],\n }\n self.site.scan_posts()\n yield self.group_task()\n # TODO add next/prev links for years\n if (kw['create_monthly_archive'] and kw['create_single_archive']) and not kw['create_full_archives']:\n raise Exception('Cannot create monthly and single archives at the same time.')\n for lang in kw[\"translations\"]:\n if kw['create_single_archive'] and not kw['create_full_archives']:\n # if we are creating one single archive\n archdata = {}\n else:\n # if we are not creating one single archive, start with all years\n archdata = self.site.posts_per_year.copy()\n if kw['create_single_archive'] or kw['create_full_archives']:\n # if we are creating one single archive, or full archives\n archdata[None] = self.site.posts # for create_single_archive\n\n # Filter untranslated posts (Issue #1360)\n if not kw[\"show_untranslated_posts\"]:\n for year, posts in archdata.items():\n archdata[year] = [p for p in posts if lang in p.translated_to]\n\n for year, posts in archdata.items():\n # Add archive per year or total archive\n if year:\n title = kw[\"messages\"][lang][\"Posts for year %s\"] % year\n else:\n title = kw[\"messages\"][lang][\"Archive\"]\n deps_translatable = {}\n for k in self.site._GLOBAL_CONTEXT_TRANSLATABLE:\n deps_translatable[k] = self.site.GLOBAL_CONTEXT[k](lang)\n if not kw[\"create_monthly_archive\"] or kw[\"create_full_archives\"]:\n yield self._generate_posts_task(kw, year, lang, posts, title, deps_translatable)\n else:\n months = set([(m.split('/')[1], self.site.link(\"archive\", m, lang)) for m in self.site.posts_per_month.keys() if m.startswith(str(year))])\n months = sorted(list(months))\n months.reverse()\n items = [[nikola.utils.LocaleBorg().get_month_name(int(month), lang), link] for month, link in months]\n yield self._prepare_task(kw, year, lang, None, items, \"list.tmpl\", title, deps_translatable)\n\n if not kw[\"create_monthly_archive\"] and not kw[\"create_full_archives\"] and not kw[\"create_daily_archive\"]:\n continue # Just to avoid nesting the other loop in this if\n for yearmonth, posts in self.site.posts_per_month.items():\n # Add archive per month\n year, month = yearmonth.split('/')\n if kw[\"create_monthly_archive\"] or kw[\"create_full_archives\"]:\n title = kw[\"messages\"][lang][\"Posts for {month} {year}\"].format(\n year=year, month=nikola.utils.LocaleBorg().get_month_name(int(month), lang))\n yield self._generate_posts_task(kw, yearmonth, lang, posts, title)\n\n if not kw[\"create_full_archives\"] and not kw[\"create_daily_archive\"]:\n continue # Just to avoid nesting the other loop in this if\n # Add archive per day\n days = dict()\n for p in posts:\n if p.date.day not in days:\n days[p.date.day] = list()\n days[p.date.day].append(p)\n for day, posts in days.items():\n title = kw[\"messages\"][lang][\"Posts for {month} {day}, {year}\"].format(\n year=year, month=nikola.utils.LocaleBorg().get_month_name(int(month), lang), day=day)\n yield self._generate_posts_task(kw, yearmonth + '/{0:02d}'.format(day), lang, posts, title)\n\n if not kw['create_single_archive'] and not kw['create_full_archives']:\n # And an \"all your years\" page for yearly and monthly archives\n years = list(self.site.posts_per_year.keys())\n years.sort(reverse=True)\n kw['years'] = years\n for lang in kw[\"translations\"]:\n items = [(y, self.site.link(\"archive\", y, lang)) for y in years]\n yield self._prepare_task(kw, None, lang, None, items, \"list.tmpl\", kw[\"messages\"][lang][\"Archive\"])\n\n def archive_path(self, name, lang):\n if name:\n return [_f for _f in [self.site.config['TRANSLATIONS'][lang],\n self.site.config['ARCHIVE_PATH'], name,\n self.site.config['INDEX_FILE']] if _f]\n else:\n return [_f for _f in [self.site.config['TRANSLATIONS'][lang],\n self.site.config['ARCHIVE_PATH'],\n self.site.config['ARCHIVE_FILENAME']] if _f]\n", "path": "nikola/plugins/task/archive.py"}]} | 2,529 | 275 |
gh_patches_debug_33126 | rasdani/github-patches | git_diff | Flexget__Flexget-1474 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unicode issues with exec plugin
Same as https://github.com/Flexget/Flexget/issues/1269, but with exec plugin
It was working well recently, but now if the given path contains unicode (e. g. "ñ") characters it crashes.
Running flexget version 2.1.23 installed using pip, on Windows 8.1 x64 + Python 2.7.12
Relevant snippet of crash report (crash_report.2016.07.25.115814396000.log):
```
2016-07-25 11:58 CRITICAL task remove-folders BUG: Unhandled error in plugin exec: 'ascii' codec can't encode character u'\xf1' in position 124: ordinal not in range(128)
Traceback (most recent call last):
File "c:\python27\lib\site-packages\flexget\task.py", line 444, in __run_plugin
return method(*args, **kwargs)
File "c:\python27\lib\site-packages\flexget\event.py", line 23, in __call__
return self.func(*args, **kwargs)
File "c:\python27\lib\site-packages\flexget\plugins\output\exec.py", line 192, in phase_handler
self.execute(task, 'on_' + phase, config)
File "c:\python27\lib\site-packages\flexget\plugins\output\exec.py", line 163, in execute
if (self.execute_cmd(cmd, allow_background, config['encoding']) != 0 and
File "c:\python27\lib\site-packages\flexget\plugins\output\exec.py", line 109, in execute_cmd
stderr=subprocess.STDOUT, close_fds=False)
File "c:\python27\lib\subprocess.py", line 711, in __init__
errread, errwrite)
File "c:\python27\lib\subprocess.py", line 929, in _execute_child
args = '{} /c "{}"'.format (comspec, args)
UnicodeEncodeError: 'ascii' codec can't encode character u'\xf1' in position 124: ordinal not in range(128)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/output/exec.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # pylint: disable=unused-import, redefined-builtin
3 from past.builtins import basestring
4
5 import logging
6 import subprocess
7
8 from flexget import plugin
9 from flexget.entry import Entry
10 from flexget.event import event
11 from flexget.config_schema import one_or_more
12 from flexget.utils.template import render_from_entry, render_from_task, RenderError
13 from flexget.utils.tools import io_encoding, native_str_to_text
14
15 log = logging.getLogger('exec')
16
17
18 class EscapingEntry(Entry):
19 """Helper class, same as a Entry, but returns all string value with quotes escaped."""
20
21 def __init__(self, entry):
22 super(EscapingEntry, self).__init__(entry)
23
24 def __getitem__(self, key):
25 value = super(EscapingEntry, self).__getitem__(key)
26 # TODO: May need to be different depending on OS
27 if isinstance(value, basestring):
28 value = value.replace('"', '\\"')
29 return value
30
31
32 class PluginExec(object):
33 """
34 Execute commands
35
36 Simple example, xecute command for entries that reach output::
37
38 exec: echo 'found {{title}} at {{url}}' > file
39
40 Advanced Example::
41
42 exec:
43 on_start:
44 phase: echo "Started"
45 on_input:
46 for_entries: echo 'got {{title}}'
47 on_output:
48 for_accepted: echo 'accepted {{title}} - {{url}} > file
49
50 You can use all (available) entry fields in the command.
51 """
52
53 NAME = 'exec'
54 HANDLED_PHASES = ['start', 'input', 'filter', 'output', 'exit']
55
56 schema = {
57 'oneOf': [
58 one_or_more({'type': 'string'}),
59 {
60 'type': 'object',
61 'properties': {
62 'on_start': {'$ref': '#/definitions/phaseSettings'},
63 'on_input': {'$ref': '#/definitions/phaseSettings'},
64 'on_filter': {'$ref': '#/definitions/phaseSettings'},
65 'on_output': {'$ref': '#/definitions/phaseSettings'},
66 'on_exit': {'$ref': '#/definitions/phaseSettings'},
67 'fail_entries': {'type': 'boolean'},
68 'auto_escape': {'type': 'boolean'},
69 'encoding': {'type': 'string'},
70 'allow_background': {'type': 'boolean'}
71 },
72 'additionalProperties': False
73 }
74 ],
75 'definitions': {
76 'phaseSettings': {
77 'type': 'object',
78 'properties': {
79 'phase': one_or_more({'type': 'string'}),
80 'for_entries': one_or_more({'type': 'string'}),
81 'for_accepted': one_or_more({'type': 'string'}),
82 'for_rejected': one_or_more({'type': 'string'}),
83 'for_failed': one_or_more({'type': 'string'})
84 },
85 'additionalProperties': False
86 }
87 }
88 }
89
90 def prepare_config(self, config):
91 if isinstance(config, basestring):
92 config = [config]
93 if isinstance(config, list):
94 config = {'on_output': {'for_accepted': config}}
95 if not config.get('encoding'):
96 config['encoding'] = io_encoding
97 for phase_name in config:
98 if phase_name.startswith('on_'):
99 for items_name in config[phase_name]:
100 if isinstance(config[phase_name][items_name], basestring):
101 config[phase_name][items_name] = [config[phase_name][items_name]]
102
103 return config
104
105 def execute_cmd(self, cmd, allow_background, encoding):
106 log.verbose('Executing: %s' % cmd)
107 # if PY2: cmd = cmd.encode(encoding) ?
108 p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
109 stderr=subprocess.STDOUT, close_fds=False)
110 if not allow_background:
111 (r, w) = (p.stdout, p.stdin)
112 response = native_str_to_text(r.read(), encoding=encoding, errors='replace')
113 r.close()
114 w.close()
115 if response:
116 log.info('Stdout: %s' % response)
117 return p.wait()
118
119 def execute(self, task, phase_name, config):
120 config = self.prepare_config(config)
121 if phase_name not in config:
122 log.debug('phase %s not configured' % phase_name)
123 return
124
125 name_map = {'for_entries': task.entries, 'for_accepted': task.accepted,
126 'for_rejected': task.rejected, 'for_failed': task.failed}
127
128 allow_background = config.get('allow_background')
129 for operation, entries in name_map.items():
130 if operation not in config[phase_name]:
131 continue
132
133 log.debug('running phase_name: %s operation: %s entries: %s' % (phase_name, operation, len(entries)))
134
135 for entry in entries:
136 for cmd in config[phase_name][operation]:
137 entrydict = EscapingEntry(entry) if config.get('auto_escape') else entry
138 # Do string replacement from entry, but make sure quotes get escaped
139 try:
140 cmd = render_from_entry(cmd, entrydict)
141 except RenderError as e:
142 log.error('Could not set exec command for %s: %s' % (entry['title'], e))
143 # fail the entry if configured to do so
144 if config.get('fail_entries'):
145 entry.fail('Entry `%s` does not have required fields for string replacement.' %
146 entry['title'])
147 continue
148
149 log.debug('phase_name: %s operation: %s cmd: %s' % (phase_name, operation, cmd))
150 if task.options.test:
151 log.info('Would execute: %s' % cmd)
152 else:
153 # Make sure the command can be encoded into appropriate encoding, don't actually encode yet,
154 # so logging continues to work.
155 try:
156 cmd.encode(config['encoding'])
157 except UnicodeEncodeError:
158 log.error('Unable to encode cmd `%s` to %s' % (cmd, config['encoding']))
159 if config.get('fail_entries'):
160 entry.fail('cmd `%s` could not be encoded to %s.' % (cmd, config['encoding']))
161 continue
162 # Run the command, fail entries with non-zero return code if configured to
163 if (self.execute_cmd(cmd, allow_background, config['encoding']) != 0 and
164 config.get('fail_entries')):
165 entry.fail('exec return code was non-zero')
166
167 # phase keyword in this
168 if 'phase' in config[phase_name]:
169 for cmd in config[phase_name]['phase']:
170 try:
171 cmd = render_from_task(cmd, task)
172 except RenderError as e:
173 log.error('Error rendering `%s`: %s' % (cmd, e))
174 else:
175 log.debug('phase cmd: %s' % cmd)
176 if task.options.test:
177 log.info('Would execute: %s' % cmd)
178 else:
179 self.execute_cmd(cmd, allow_background, config['encoding'])
180
181 def __getattr__(self, item):
182 """Creates methods to handle task phases."""
183 for phase in self.HANDLED_PHASES:
184 if item == plugin.phase_methods[phase]:
185 # A phase method we handle has been requested
186 break
187 else:
188 # We don't handle this phase
189 raise AttributeError(item)
190
191 def phase_handler(task, config):
192 self.execute(task, 'on_' + phase, config)
193
194 # Make sure we run after other plugins so exec can use their output
195 phase_handler.priority = 100
196 return phase_handler
197
198
199 @event('plugin.register')
200 def register_plugin():
201 plugin.register(PluginExec, 'exec', api_ver=2)
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/plugins/output/exec.py b/flexget/plugins/output/exec.py
--- a/flexget/plugins/output/exec.py
+++ b/flexget/plugins/output/exec.py
@@ -1,6 +1,7 @@
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
+from future.utils import text_to_native_str
import logging
import subprocess
@@ -10,7 +11,7 @@
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils.template import render_from_entry, render_from_task, RenderError
-from flexget.utils.tools import io_encoding, native_str_to_text
+from flexget.utils.tools import io_encoding
log = logging.getLogger('exec')
@@ -103,17 +104,16 @@
return config
def execute_cmd(self, cmd, allow_background, encoding):
- log.verbose('Executing: %s' % cmd)
- # if PY2: cmd = cmd.encode(encoding) ?
- p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT, close_fds=False)
+ log.verbose('Executing: %s', cmd)
+ p = subprocess.Popen(text_to_native_str(cmd, encoding=io_encoding), shell=True, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=False)
if not allow_background:
- (r, w) = (p.stdout, p.stdin)
- response = native_str_to_text(r.read(), encoding=encoding, errors='replace')
+ r, w = (p.stdout, p.stdin)
+ response = r.read().decode(io_encoding)
r.close()
w.close()
if response:
- log.info('Stdout: %s' % response)
+ log.info('Stdout: %s', response.rstrip()) # rstrip to get rid of newlines
return p.wait()
def execute(self, task, phase_name, config):
| {"golden_diff": "diff --git a/flexget/plugins/output/exec.py b/flexget/plugins/output/exec.py\n--- a/flexget/plugins/output/exec.py\n+++ b/flexget/plugins/output/exec.py\n@@ -1,6 +1,7 @@\n from __future__ import unicode_literals, division, absolute_import\n from builtins import * # pylint: disable=unused-import, redefined-builtin\n from past.builtins import basestring\n+from future.utils import text_to_native_str\n \n import logging\n import subprocess\n@@ -10,7 +11,7 @@\n from flexget.event import event\n from flexget.config_schema import one_or_more\n from flexget.utils.template import render_from_entry, render_from_task, RenderError\n-from flexget.utils.tools import io_encoding, native_str_to_text\n+from flexget.utils.tools import io_encoding\n \n log = logging.getLogger('exec')\n \n@@ -103,17 +104,16 @@\n return config\n \n def execute_cmd(self, cmd, allow_background, encoding):\n- log.verbose('Executing: %s' % cmd)\n- # if PY2: cmd = cmd.encode(encoding) ?\n- p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n- stderr=subprocess.STDOUT, close_fds=False)\n+ log.verbose('Executing: %s', cmd)\n+ p = subprocess.Popen(text_to_native_str(cmd, encoding=io_encoding), shell=True, stdin=subprocess.PIPE,\n+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=False)\n if not allow_background:\n- (r, w) = (p.stdout, p.stdin)\n- response = native_str_to_text(r.read(), encoding=encoding, errors='replace')\n+ r, w = (p.stdout, p.stdin)\n+ response = r.read().decode(io_encoding)\n r.close()\n w.close()\n if response:\n- log.info('Stdout: %s' % response)\n+ log.info('Stdout: %s', response.rstrip()) # rstrip to get rid of newlines\n return p.wait()\n \n def execute(self, task, phase_name, config):\n", "issue": "Unicode issues with exec plugin\nSame as https://github.com/Flexget/Flexget/issues/1269, but with exec plugin\n\nIt was working well recently, but now if the given path contains unicode (e. g. \"\u00f1\") characters it crashes.\n\nRunning flexget version 2.1.23 installed using pip, on Windows 8.1 x64 + Python 2.7.12\n\nRelevant snippet of crash report (crash_report.2016.07.25.115814396000.log): \n\n```\n2016-07-25 11:58 CRITICAL task remove-folders BUG: Unhandled error in plugin exec: 'ascii' codec can't encode character u'\\xf1' in position 124: ordinal not in range(128)\nTraceback (most recent call last):\n File \"c:\\python27\\lib\\site-packages\\flexget\\task.py\", line 444, in __run_plugin\n return method(*args, **kwargs)\n File \"c:\\python27\\lib\\site-packages\\flexget\\event.py\", line 23, in __call__\n return self.func(*args, **kwargs)\n File \"c:\\python27\\lib\\site-packages\\flexget\\plugins\\output\\exec.py\", line 192, in phase_handler\n self.execute(task, 'on_' + phase, config)\n File \"c:\\python27\\lib\\site-packages\\flexget\\plugins\\output\\exec.py\", line 163, in execute\n if (self.execute_cmd(cmd, allow_background, config['encoding']) != 0 and\n File \"c:\\python27\\lib\\site-packages\\flexget\\plugins\\output\\exec.py\", line 109, in execute_cmd\n stderr=subprocess.STDOUT, close_fds=False)\n File \"c:\\python27\\lib\\subprocess.py\", line 711, in __init__\n errread, errwrite)\n File \"c:\\python27\\lib\\subprocess.py\", line 929, in _execute_child\n args = '{} /c \"{}\"'.format (comspec, args)\nUnicodeEncodeError: 'ascii' codec can't encode character u'\\xf1' in position 124: ordinal not in range(128)\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # pylint: disable=unused-import, redefined-builtin\nfrom past.builtins import basestring\n\nimport logging\nimport subprocess\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.config_schema import one_or_more\nfrom flexget.utils.template import render_from_entry, render_from_task, RenderError\nfrom flexget.utils.tools import io_encoding, native_str_to_text\n\nlog = logging.getLogger('exec')\n\n\nclass EscapingEntry(Entry):\n \"\"\"Helper class, same as a Entry, but returns all string value with quotes escaped.\"\"\"\n\n def __init__(self, entry):\n super(EscapingEntry, self).__init__(entry)\n\n def __getitem__(self, key):\n value = super(EscapingEntry, self).__getitem__(key)\n # TODO: May need to be different depending on OS\n if isinstance(value, basestring):\n value = value.replace('\"', '\\\\\"')\n return value\n\n\nclass PluginExec(object):\n \"\"\"\n Execute commands\n\n Simple example, xecute command for entries that reach output::\n\n exec: echo 'found {{title}} at {{url}}' > file\n\n Advanced Example::\n\n exec:\n on_start:\n phase: echo \"Started\"\n on_input:\n for_entries: echo 'got {{title}}'\n on_output:\n for_accepted: echo 'accepted {{title}} - {{url}} > file\n\n You can use all (available) entry fields in the command.\n \"\"\"\n\n NAME = 'exec'\n HANDLED_PHASES = ['start', 'input', 'filter', 'output', 'exit']\n\n schema = {\n 'oneOf': [\n one_or_more({'type': 'string'}),\n {\n 'type': 'object',\n 'properties': {\n 'on_start': {'$ref': '#/definitions/phaseSettings'},\n 'on_input': {'$ref': '#/definitions/phaseSettings'},\n 'on_filter': {'$ref': '#/definitions/phaseSettings'},\n 'on_output': {'$ref': '#/definitions/phaseSettings'},\n 'on_exit': {'$ref': '#/definitions/phaseSettings'},\n 'fail_entries': {'type': 'boolean'},\n 'auto_escape': {'type': 'boolean'},\n 'encoding': {'type': 'string'},\n 'allow_background': {'type': 'boolean'}\n },\n 'additionalProperties': False\n }\n ],\n 'definitions': {\n 'phaseSettings': {\n 'type': 'object',\n 'properties': {\n 'phase': one_or_more({'type': 'string'}),\n 'for_entries': one_or_more({'type': 'string'}),\n 'for_accepted': one_or_more({'type': 'string'}),\n 'for_rejected': one_or_more({'type': 'string'}),\n 'for_failed': one_or_more({'type': 'string'})\n },\n 'additionalProperties': False\n }\n }\n }\n\n def prepare_config(self, config):\n if isinstance(config, basestring):\n config = [config]\n if isinstance(config, list):\n config = {'on_output': {'for_accepted': config}}\n if not config.get('encoding'):\n config['encoding'] = io_encoding\n for phase_name in config:\n if phase_name.startswith('on_'):\n for items_name in config[phase_name]:\n if isinstance(config[phase_name][items_name], basestring):\n config[phase_name][items_name] = [config[phase_name][items_name]]\n\n return config\n\n def execute_cmd(self, cmd, allow_background, encoding):\n log.verbose('Executing: %s' % cmd)\n # if PY2: cmd = cmd.encode(encoding) ?\n p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, close_fds=False)\n if not allow_background:\n (r, w) = (p.stdout, p.stdin)\n response = native_str_to_text(r.read(), encoding=encoding, errors='replace')\n r.close()\n w.close()\n if response:\n log.info('Stdout: %s' % response)\n return p.wait()\n\n def execute(self, task, phase_name, config):\n config = self.prepare_config(config)\n if phase_name not in config:\n log.debug('phase %s not configured' % phase_name)\n return\n\n name_map = {'for_entries': task.entries, 'for_accepted': task.accepted,\n 'for_rejected': task.rejected, 'for_failed': task.failed}\n\n allow_background = config.get('allow_background')\n for operation, entries in name_map.items():\n if operation not in config[phase_name]:\n continue\n\n log.debug('running phase_name: %s operation: %s entries: %s' % (phase_name, operation, len(entries)))\n\n for entry in entries:\n for cmd in config[phase_name][operation]:\n entrydict = EscapingEntry(entry) if config.get('auto_escape') else entry\n # Do string replacement from entry, but make sure quotes get escaped\n try:\n cmd = render_from_entry(cmd, entrydict)\n except RenderError as e:\n log.error('Could not set exec command for %s: %s' % (entry['title'], e))\n # fail the entry if configured to do so\n if config.get('fail_entries'):\n entry.fail('Entry `%s` does not have required fields for string replacement.' %\n entry['title'])\n continue\n\n log.debug('phase_name: %s operation: %s cmd: %s' % (phase_name, operation, cmd))\n if task.options.test:\n log.info('Would execute: %s' % cmd)\n else:\n # Make sure the command can be encoded into appropriate encoding, don't actually encode yet,\n # so logging continues to work.\n try:\n cmd.encode(config['encoding'])\n except UnicodeEncodeError:\n log.error('Unable to encode cmd `%s` to %s' % (cmd, config['encoding']))\n if config.get('fail_entries'):\n entry.fail('cmd `%s` could not be encoded to %s.' % (cmd, config['encoding']))\n continue\n # Run the command, fail entries with non-zero return code if configured to\n if (self.execute_cmd(cmd, allow_background, config['encoding']) != 0 and\n config.get('fail_entries')):\n entry.fail('exec return code was non-zero')\n\n # phase keyword in this\n if 'phase' in config[phase_name]:\n for cmd in config[phase_name]['phase']:\n try:\n cmd = render_from_task(cmd, task)\n except RenderError as e:\n log.error('Error rendering `%s`: %s' % (cmd, e))\n else:\n log.debug('phase cmd: %s' % cmd)\n if task.options.test:\n log.info('Would execute: %s' % cmd)\n else:\n self.execute_cmd(cmd, allow_background, config['encoding'])\n\n def __getattr__(self, item):\n \"\"\"Creates methods to handle task phases.\"\"\"\n for phase in self.HANDLED_PHASES:\n if item == plugin.phase_methods[phase]:\n # A phase method we handle has been requested\n break\n else:\n # We don't handle this phase\n raise AttributeError(item)\n\n def phase_handler(task, config):\n self.execute(task, 'on_' + phase, config)\n\n # Make sure we run after other plugins so exec can use their output\n phase_handler.priority = 100\n return phase_handler\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(PluginExec, 'exec', api_ver=2)\n", "path": "flexget/plugins/output/exec.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # pylint: disable=unused-import, redefined-builtin\nfrom past.builtins import basestring\nfrom future.utils import text_to_native_str\n\nimport logging\nimport subprocess\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.config_schema import one_or_more\nfrom flexget.utils.template import render_from_entry, render_from_task, RenderError\nfrom flexget.utils.tools import io_encoding\n\nlog = logging.getLogger('exec')\n\n\nclass EscapingEntry(Entry):\n \"\"\"Helper class, same as a Entry, but returns all string value with quotes escaped.\"\"\"\n\n def __init__(self, entry):\n super(EscapingEntry, self).__init__(entry)\n\n def __getitem__(self, key):\n value = super(EscapingEntry, self).__getitem__(key)\n # TODO: May need to be different depending on OS\n if isinstance(value, basestring):\n value = value.replace('\"', '\\\\\"')\n return value\n\n\nclass PluginExec(object):\n \"\"\"\n Execute commands\n\n Simple example, xecute command for entries that reach output::\n\n exec: echo 'found {{title}} at {{url}}' > file\n\n Advanced Example::\n\n exec:\n on_start:\n phase: echo \"Started\"\n on_input:\n for_entries: echo 'got {{title}}'\n on_output:\n for_accepted: echo 'accepted {{title}} - {{url}} > file\n\n You can use all (available) entry fields in the command.\n \"\"\"\n\n NAME = 'exec'\n HANDLED_PHASES = ['start', 'input', 'filter', 'output', 'exit']\n\n schema = {\n 'oneOf': [\n one_or_more({'type': 'string'}),\n {\n 'type': 'object',\n 'properties': {\n 'on_start': {'$ref': '#/definitions/phaseSettings'},\n 'on_input': {'$ref': '#/definitions/phaseSettings'},\n 'on_filter': {'$ref': '#/definitions/phaseSettings'},\n 'on_output': {'$ref': '#/definitions/phaseSettings'},\n 'on_exit': {'$ref': '#/definitions/phaseSettings'},\n 'fail_entries': {'type': 'boolean'},\n 'auto_escape': {'type': 'boolean'},\n 'encoding': {'type': 'string'},\n 'allow_background': {'type': 'boolean'}\n },\n 'additionalProperties': False\n }\n ],\n 'definitions': {\n 'phaseSettings': {\n 'type': 'object',\n 'properties': {\n 'phase': one_or_more({'type': 'string'}),\n 'for_entries': one_or_more({'type': 'string'}),\n 'for_accepted': one_or_more({'type': 'string'}),\n 'for_rejected': one_or_more({'type': 'string'}),\n 'for_failed': one_or_more({'type': 'string'})\n },\n 'additionalProperties': False\n }\n }\n }\n\n def prepare_config(self, config):\n if isinstance(config, basestring):\n config = [config]\n if isinstance(config, list):\n config = {'on_output': {'for_accepted': config}}\n if not config.get('encoding'):\n config['encoding'] = io_encoding\n for phase_name in config:\n if phase_name.startswith('on_'):\n for items_name in config[phase_name]:\n if isinstance(config[phase_name][items_name], basestring):\n config[phase_name][items_name] = [config[phase_name][items_name]]\n\n return config\n\n def execute_cmd(self, cmd, allow_background, encoding):\n log.verbose('Executing: %s', cmd)\n p = subprocess.Popen(text_to_native_str(cmd, encoding=io_encoding), shell=True, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=False)\n if not allow_background:\n r, w = (p.stdout, p.stdin)\n response = r.read().decode(io_encoding)\n r.close()\n w.close()\n if response:\n log.info('Stdout: %s', response.rstrip()) # rstrip to get rid of newlines\n return p.wait()\n\n def execute(self, task, phase_name, config):\n config = self.prepare_config(config)\n if phase_name not in config:\n log.debug('phase %s not configured' % phase_name)\n return\n\n name_map = {'for_entries': task.entries, 'for_accepted': task.accepted,\n 'for_rejected': task.rejected, 'for_failed': task.failed}\n\n allow_background = config.get('allow_background')\n for operation, entries in name_map.items():\n if operation not in config[phase_name]:\n continue\n\n log.debug('running phase_name: %s operation: %s entries: %s' % (phase_name, operation, len(entries)))\n\n for entry in entries:\n for cmd in config[phase_name][operation]:\n entrydict = EscapingEntry(entry) if config.get('auto_escape') else entry\n # Do string replacement from entry, but make sure quotes get escaped\n try:\n cmd = render_from_entry(cmd, entrydict)\n except RenderError as e:\n log.error('Could not set exec command for %s: %s' % (entry['title'], e))\n # fail the entry if configured to do so\n if config.get('fail_entries'):\n entry.fail('Entry `%s` does not have required fields for string replacement.' %\n entry['title'])\n continue\n\n log.debug('phase_name: %s operation: %s cmd: %s' % (phase_name, operation, cmd))\n if task.options.test:\n log.info('Would execute: %s' % cmd)\n else:\n # Make sure the command can be encoded into appropriate encoding, don't actually encode yet,\n # so logging continues to work.\n try:\n cmd.encode(config['encoding'])\n except UnicodeEncodeError:\n log.error('Unable to encode cmd `%s` to %s' % (cmd, config['encoding']))\n if config.get('fail_entries'):\n entry.fail('cmd `%s` could not be encoded to %s.' % (cmd, config['encoding']))\n continue\n # Run the command, fail entries with non-zero return code if configured to\n if (self.execute_cmd(cmd, allow_background, config['encoding']) != 0 and\n config.get('fail_entries')):\n entry.fail('exec return code was non-zero')\n\n # phase keyword in this\n if 'phase' in config[phase_name]:\n for cmd in config[phase_name]['phase']:\n try:\n cmd = render_from_task(cmd, task)\n except RenderError as e:\n log.error('Error rendering `%s`: %s' % (cmd, e))\n else:\n log.debug('phase cmd: %s' % cmd)\n if task.options.test:\n log.info('Would execute: %s' % cmd)\n else:\n self.execute_cmd(cmd, allow_background, config['encoding'])\n\n def __getattr__(self, item):\n \"\"\"Creates methods to handle task phases.\"\"\"\n for phase in self.HANDLED_PHASES:\n if item == plugin.phase_methods[phase]:\n # A phase method we handle has been requested\n break\n else:\n # We don't handle this phase\n raise AttributeError(item)\n\n def phase_handler(task, config):\n self.execute(task, 'on_' + phase, config)\n\n # Make sure we run after other plugins so exec can use their output\n phase_handler.priority = 100\n return phase_handler\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(PluginExec, 'exec', api_ver=2)\n", "path": "flexget/plugins/output/exec.py"}]} | 2,970 | 461 |
gh_patches_debug_50420 | rasdani/github-patches | git_diff | litestar-org__litestar-2330 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/openapi/spec/enums.py`
Content:
```
1 from enum import Enum
2
3 __all__ = ("OpenAPIFormat", "OpenAPIType")
4
5
6 class OpenAPIFormat(str, Enum):
7 """Formats extracted from: https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#page-13"""
8
9 DATE = "date"
10 DATE_TIME = "date-time"
11 TIME = "time"
12 DURATION = "duration"
13 URL = "url"
14 EMAIL = "email"
15 IDN_EMAIL = "idn-email"
16 HOST_NAME = "hostname"
17 IDN_HOST_NAME = "idn-hostname"
18 IPV4 = "ipv4"
19 IPV6 = "ipv6"
20 URI = "uri"
21 URI_REFERENCE = "uri-reference"
22 URI_TEMPLATE = "uri-template"
23 JSON_POINTER = "json-pointer"
24 RELATIVE_JSON_POINTER = "relative-json-pointer"
25 IRI = "iri-reference"
26 IRI_REFERENCE = "iri-reference" # noqa: PIE796
27 UUID = "uuid"
28 REGEX = "regex"
29
30
31 class OpenAPIType(str, Enum):
32 """An OopenAPI type."""
33
34 ARRAY = "array"
35 BOOLEAN = "boolean"
36 INTEGER = "integer"
37 NULL = "null"
38 NUMBER = "number"
39 OBJECT = "object"
40 STRING = "string"
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/openapi/spec/enums.py b/litestar/openapi/spec/enums.py
--- a/litestar/openapi/spec/enums.py
+++ b/litestar/openapi/spec/enums.py
@@ -26,6 +26,7 @@
IRI_REFERENCE = "iri-reference" # noqa: PIE796
UUID = "uuid"
REGEX = "regex"
+ BINARY = "binary"
class OpenAPIType(str, Enum):
| {"golden_diff": "diff --git a/litestar/openapi/spec/enums.py b/litestar/openapi/spec/enums.py\n--- a/litestar/openapi/spec/enums.py\n+++ b/litestar/openapi/spec/enums.py\n@@ -26,6 +26,7 @@\n IRI_REFERENCE = \"iri-reference\" # noqa: PIE796\n UUID = \"uuid\"\n REGEX = \"regex\"\n+ BINARY = \"binary\"\n \n \n class OpenAPIType(str, Enum):\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from enum import Enum\n\n__all__ = (\"OpenAPIFormat\", \"OpenAPIType\")\n\n\nclass OpenAPIFormat(str, Enum):\n \"\"\"Formats extracted from: https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#page-13\"\"\"\n\n DATE = \"date\"\n DATE_TIME = \"date-time\"\n TIME = \"time\"\n DURATION = \"duration\"\n URL = \"url\"\n EMAIL = \"email\"\n IDN_EMAIL = \"idn-email\"\n HOST_NAME = \"hostname\"\n IDN_HOST_NAME = \"idn-hostname\"\n IPV4 = \"ipv4\"\n IPV6 = \"ipv6\"\n URI = \"uri\"\n URI_REFERENCE = \"uri-reference\"\n URI_TEMPLATE = \"uri-template\"\n JSON_POINTER = \"json-pointer\"\n RELATIVE_JSON_POINTER = \"relative-json-pointer\"\n IRI = \"iri-reference\"\n IRI_REFERENCE = \"iri-reference\" # noqa: PIE796\n UUID = \"uuid\"\n REGEX = \"regex\"\n\n\nclass OpenAPIType(str, Enum):\n \"\"\"An OopenAPI type.\"\"\"\n\n ARRAY = \"array\"\n BOOLEAN = \"boolean\"\n INTEGER = \"integer\"\n NULL = \"null\"\n NUMBER = \"number\"\n OBJECT = \"object\"\n STRING = \"string\"\n", "path": "litestar/openapi/spec/enums.py"}], "after_files": [{"content": "from enum import Enum\n\n__all__ = (\"OpenAPIFormat\", \"OpenAPIType\")\n\n\nclass OpenAPIFormat(str, Enum):\n \"\"\"Formats extracted from: https://datatracker.ietf.org/doc/html/draft-bhutton-json-schema-validation-00#page-13\"\"\"\n\n DATE = \"date\"\n DATE_TIME = \"date-time\"\n TIME = \"time\"\n DURATION = \"duration\"\n URL = \"url\"\n EMAIL = \"email\"\n IDN_EMAIL = \"idn-email\"\n HOST_NAME = \"hostname\"\n IDN_HOST_NAME = \"idn-hostname\"\n IPV4 = \"ipv4\"\n IPV6 = \"ipv6\"\n URI = \"uri\"\n URI_REFERENCE = \"uri-reference\"\n URI_TEMPLATE = \"uri-template\"\n JSON_POINTER = \"json-pointer\"\n RELATIVE_JSON_POINTER = \"relative-json-pointer\"\n IRI = \"iri-reference\"\n IRI_REFERENCE = \"iri-reference\" # noqa: PIE796\n UUID = \"uuid\"\n REGEX = \"regex\"\n BINARY = \"binary\"\n\n\nclass OpenAPIType(str, Enum):\n \"\"\"An OopenAPI type.\"\"\"\n\n ARRAY = \"array\"\n BOOLEAN = \"boolean\"\n INTEGER = \"integer\"\n NULL = \"null\"\n NUMBER = \"number\"\n OBJECT = \"object\"\n STRING = \"string\"\n", "path": "litestar/openapi/spec/enums.py"}]} | 791 | 108 |
gh_patches_debug_11843 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-14716 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot download certain Cartoon Network videos?
## Please follow the guide below
- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)
- Use the *Preview* tab to see what your issue will actually look like
---
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.10.29*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.10.29**
### Before submitting an *issue* make sure you have:
- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
### What is the purpose of your *issue*?
- [x] Bug report (encountered problems with youtube-dl)
- [ ] Site support request (request for adding support for a new site)
- [ ] Feature request (request for a new functionality)
- [ ] Question
- [ ] Other
---
### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue*
---
### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:
Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl -v <your command line>`), copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):
```
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: ['http://www.cartoonnetwork.com/video/regularshow/fre
e-cake-episode.html', '-v']
[debug] Encodings: locale cp1252, fs mbcs, out cp437, pref cp1252
[debug] youtube-dl version 2017.10.29
[debug] Python version 3.4.4 - Windows-7-6.1.7601-SP1
[debug] exe versions: ffmpeg N-62162-gec8789a, ffprobe 3.2.4, rtmpdump 2.4
[debug] Proxy map: {}
[CartoonNetwork] free-cake: Downloading webpage
[CartoonNetwork] 42de6efafe3f038ba941f061981bb5b287521da0: Downloading XML
[CartoonNetwork] 42de6efafe3f038ba941f061981bb5b287521da0: Downloading f4m manif
est
WARNING: Unable to download f4m manifest: HTTP Error 403: Forbidden
[CartoonNetwork] 42de6efafe3f038ba941f061981bb5b287521da0: Downloading XML
ERROR: UNKNOWN
Traceback (most recent call last):
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpk63cqkyt\bu
ild\youtube_dl\YoutubeDL.py", line 784, in extract_info
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpk63cqkyt\bu
ild\youtube_dl\extractor\common.py", line 434, in extract
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpk63cqkyt\bu
ild\youtube_dl\extractor\cartoonnetwork.py", line 41, in _real_extract
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpk63cqkyt\bu
ild\youtube_dl\extractor\turner.py", line 84, in _extract_cvp_info
youtube_dl.utils.ExtractorError: UNKNOWN
```
---
### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):
- Single video: http://www.cartoonnetwork.com/video/regularshow/free-cake-episode.html
Note that **youtube-dl does not support sites dedicated to [copyright infringement](https://github.com/rg3/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
---
### Description of your *issue*, suggested solution and other information
I'm trying to download a particular video and for some reason I can't. Other Cartoon Network videos work just fine, but this series(?) doesn't seem to work. I'm not sure why some work, but some don't. I'm probably missing something... Help please?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/cartoonnetwork.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .turner import TurnerBaseIE
7
8
9 class CartoonNetworkIE(TurnerBaseIE):
10 _VALID_URL = r'https?://(?:www\.)?cartoonnetwork\.com/video/(?:[^/]+/)+(?P<id>[^/?#]+)-(?:clip|episode)\.html'
11 _TEST = {
12 'url': 'http://www.cartoonnetwork.com/video/teen-titans-go/starfire-the-cat-lady-clip.html',
13 'info_dict': {
14 'id': '8a250ab04ed07e6c014ef3f1e2f9016c',
15 'ext': 'mp4',
16 'title': 'Starfire the Cat Lady',
17 'description': 'Robin decides to become a cat so that Starfire will finally love him.',
18 },
19 'params': {
20 # m3u8 download
21 'skip_download': True,
22 },
23 }
24
25 def _real_extract(self, url):
26 display_id = self._match_id(url)
27 webpage = self._download_webpage(url, display_id)
28 id_type, video_id = re.search(r"_cnglobal\.cvp(Video|Title)Id\s*=\s*'([^']+)';", webpage).groups()
29 query = ('id' if id_type == 'Video' else 'titleId') + '=' + video_id
30 return self._extract_cvp_info(
31 'http://www.cartoonnetwork.com/video-seo-svc/episodeservices/getCvpPlaylist?networkName=CN2&' + query, video_id, {
32 'secure': {
33 'media_src': 'http://androidhls-secure.cdn.turner.com/toon/big',
34 'tokenizer_src': 'http://www.cartoonnetwork.com/cntv/mvpd/processors/services/token_ipadAdobe.do',
35 },
36 }, {
37 'url': url,
38 'site_name': 'CartoonNetwork',
39 'auth_required': self._search_regex(
40 r'_cnglobal\.cvpFullOrPreviewAuth\s*=\s*(true|false);',
41 webpage, 'auth required', default='false') == 'true',
42 })
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/youtube_dl/extractor/cartoonnetwork.py b/youtube_dl/extractor/cartoonnetwork.py
--- a/youtube_dl/extractor/cartoonnetwork.py
+++ b/youtube_dl/extractor/cartoonnetwork.py
@@ -31,7 +31,7 @@
'http://www.cartoonnetwork.com/video-seo-svc/episodeservices/getCvpPlaylist?networkName=CN2&' + query, video_id, {
'secure': {
'media_src': 'http://androidhls-secure.cdn.turner.com/toon/big',
- 'tokenizer_src': 'http://www.cartoonnetwork.com/cntv/mvpd/processors/services/token_ipadAdobe.do',
+ 'tokenizer_src': 'https://token.vgtf.net/token/token_mobile',
},
}, {
'url': url,
| {"golden_diff": "diff --git a/youtube_dl/extractor/cartoonnetwork.py b/youtube_dl/extractor/cartoonnetwork.py\n--- a/youtube_dl/extractor/cartoonnetwork.py\n+++ b/youtube_dl/extractor/cartoonnetwork.py\n@@ -31,7 +31,7 @@\n 'http://www.cartoonnetwork.com/video-seo-svc/episodeservices/getCvpPlaylist?networkName=CN2&' + query, video_id, {\n 'secure': {\n 'media_src': 'http://androidhls-secure.cdn.turner.com/toon/big',\n- 'tokenizer_src': 'http://www.cartoonnetwork.com/cntv/mvpd/processors/services/token_ipadAdobe.do',\n+ 'tokenizer_src': 'https://token.vgtf.net/token/token_mobile',\n },\n }, {\n 'url': url,\n", "issue": "Cannot download certain Cartoon Network videos?\n## Please follow the guide below\r\n\r\n- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly\r\n- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)\r\n- Use the *Preview* tab to see what your issue will actually look like\r\n\r\n---\r\n\r\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.10.29*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.10.29**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n\r\n### What is the purpose of your *issue*?\r\n- [x] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n---\r\n\r\n### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue*\r\n\r\n---\r\n\r\n### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:\r\n\r\nAdd the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl -v <your command line>`), copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):\r\n\r\n```\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: ['http://www.cartoonnetwork.com/video/regularshow/fre\r\ne-cake-episode.html', '-v']\r\n[debug] Encodings: locale cp1252, fs mbcs, out cp437, pref cp1252\r\n[debug] youtube-dl version 2017.10.29\r\n[debug] Python version 3.4.4 - Windows-7-6.1.7601-SP1\r\n[debug] exe versions: ffmpeg N-62162-gec8789a, ffprobe 3.2.4, rtmpdump 2.4\r\n[debug] Proxy map: {}\r\n[CartoonNetwork] free-cake: Downloading webpage\r\n[CartoonNetwork] 42de6efafe3f038ba941f061981bb5b287521da0: Downloading XML\r\n[CartoonNetwork] 42de6efafe3f038ba941f061981bb5b287521da0: Downloading f4m manif\r\nest\r\nWARNING: Unable to download f4m manifest: HTTP Error 403: Forbidden\r\n[CartoonNetwork] 42de6efafe3f038ba941f061981bb5b287521da0: Downloading XML\r\nERROR: UNKNOWN\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmpk63cqkyt\\bu\r\nild\\youtube_dl\\YoutubeDL.py\", line 784, in extract_info\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmpk63cqkyt\\bu\r\nild\\youtube_dl\\extractor\\common.py\", line 434, in extract\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmpk63cqkyt\\bu\r\nild\\youtube_dl\\extractor\\cartoonnetwork.py\", line 41, in _real_extract\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmpk63cqkyt\\bu\r\nild\\youtube_dl\\extractor\\turner.py\", line 84, in _extract_cvp_info\r\nyoutube_dl.utils.ExtractorError: UNKNOWN\r\n```\r\n\r\n---\r\n\r\n### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):\r\n- Single video: http://www.cartoonnetwork.com/video/regularshow/free-cake-episode.html\r\n\r\nNote that **youtube-dl does not support sites dedicated to [copyright infringement](https://github.com/rg3/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. In order for site support request to be accepted all provided example URLs should not violate any copyrights.\r\n\r\n---\r\n\r\n### Description of your *issue*, suggested solution and other information\r\n\r\nI'm trying to download a particular video and for some reason I can't. Other Cartoon Network videos work just fine, but this series(?) doesn't seem to work. I'm not sure why some work, but some don't. I'm probably missing something... Help please?\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .turner import TurnerBaseIE\n\n\nclass CartoonNetworkIE(TurnerBaseIE):\n _VALID_URL = r'https?://(?:www\\.)?cartoonnetwork\\.com/video/(?:[^/]+/)+(?P<id>[^/?#]+)-(?:clip|episode)\\.html'\n _TEST = {\n 'url': 'http://www.cartoonnetwork.com/video/teen-titans-go/starfire-the-cat-lady-clip.html',\n 'info_dict': {\n 'id': '8a250ab04ed07e6c014ef3f1e2f9016c',\n 'ext': 'mp4',\n 'title': 'Starfire the Cat Lady',\n 'description': 'Robin decides to become a cat so that Starfire will finally love him.',\n },\n 'params': {\n # m3u8 download\n 'skip_download': True,\n },\n }\n\n def _real_extract(self, url):\n display_id = self._match_id(url)\n webpage = self._download_webpage(url, display_id)\n id_type, video_id = re.search(r\"_cnglobal\\.cvp(Video|Title)Id\\s*=\\s*'([^']+)';\", webpage).groups()\n query = ('id' if id_type == 'Video' else 'titleId') + '=' + video_id\n return self._extract_cvp_info(\n 'http://www.cartoonnetwork.com/video-seo-svc/episodeservices/getCvpPlaylist?networkName=CN2&' + query, video_id, {\n 'secure': {\n 'media_src': 'http://androidhls-secure.cdn.turner.com/toon/big',\n 'tokenizer_src': 'http://www.cartoonnetwork.com/cntv/mvpd/processors/services/token_ipadAdobe.do',\n },\n }, {\n 'url': url,\n 'site_name': 'CartoonNetwork',\n 'auth_required': self._search_regex(\n r'_cnglobal\\.cvpFullOrPreviewAuth\\s*=\\s*(true|false);',\n webpage, 'auth required', default='false') == 'true',\n })\n", "path": "youtube_dl/extractor/cartoonnetwork.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .turner import TurnerBaseIE\n\n\nclass CartoonNetworkIE(TurnerBaseIE):\n _VALID_URL = r'https?://(?:www\\.)?cartoonnetwork\\.com/video/(?:[^/]+/)+(?P<id>[^/?#]+)-(?:clip|episode)\\.html'\n _TEST = {\n 'url': 'http://www.cartoonnetwork.com/video/teen-titans-go/starfire-the-cat-lady-clip.html',\n 'info_dict': {\n 'id': '8a250ab04ed07e6c014ef3f1e2f9016c',\n 'ext': 'mp4',\n 'title': 'Starfire the Cat Lady',\n 'description': 'Robin decides to become a cat so that Starfire will finally love him.',\n },\n 'params': {\n # m3u8 download\n 'skip_download': True,\n },\n }\n\n def _real_extract(self, url):\n display_id = self._match_id(url)\n webpage = self._download_webpage(url, display_id)\n id_type, video_id = re.search(r\"_cnglobal\\.cvp(Video|Title)Id\\s*=\\s*'([^']+)';\", webpage).groups()\n query = ('id' if id_type == 'Video' else 'titleId') + '=' + video_id\n return self._extract_cvp_info(\n 'http://www.cartoonnetwork.com/video-seo-svc/episodeservices/getCvpPlaylist?networkName=CN2&' + query, video_id, {\n 'secure': {\n 'media_src': 'http://androidhls-secure.cdn.turner.com/toon/big',\n 'tokenizer_src': 'https://token.vgtf.net/token/token_mobile',\n },\n }, {\n 'url': url,\n 'site_name': 'CartoonNetwork',\n 'auth_required': self._search_regex(\n r'_cnglobal\\.cvpFullOrPreviewAuth\\s*=\\s*(true|false);',\n webpage, 'auth required', default='false') == 'true',\n })\n", "path": "youtube_dl/extractor/cartoonnetwork.py"}]} | 2,147 | 186 |
gh_patches_debug_26780 | rasdani/github-patches | git_diff | ansible-collections__community.vmware-2034 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use toolsVersionStatus2 instead of toolsVersionStatus
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
As i saw in the python module you are looking für toolsVersionStatus instead of toolsVersionStatus2
##### ISSUE TYPE
- Bug Report
- https://developer.vmware.com/apis/1355/
- toolsVersionStatus is depreacated
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
https://docs.ansible.com/ansible/latest/collections/community/vmware/vmware_guest_tools_info_module.html#ansible-collections-community-vmware-vmware-guest-tools-info-module
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```paste below
Every
```
##### COLLECTION VERSION
<!--- Paste verbatim output from "ansible-galaxy collection list <namespace>.<collection>" between the quotes
for example: ansible-galaxy collection list community.general
-->
```paste below
https://docs.ansible.com/ansible/latest/collections/community/vmware/vmware_guest_tools_info_module.html#ansible-collections-community-vmware-vmware-guest-tools-info-module
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```paste below
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```paste below
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/vmware_guest_tools_info.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2019, Ansible Project
5 # Copyright: (c) 2019, VMware, Inc. All Rights Reserved.
6 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
7 # SPDX-License-Identifier: GPL-3.0-or-later
8
9 from __future__ import absolute_import, division, print_function
10 __metaclass__ = type
11
12
13 DOCUMENTATION = r'''
14 ---
15 module: vmware_guest_tools_info
16 short_description: Gather info about VMware tools installed in VM
17 description:
18 - Gather information about the VMware tools installed in virtual machine.
19 author:
20 - Diane Wang (@Tomorrow9) <[email protected]>
21 options:
22 name:
23 description:
24 - Name of the VM to get VMware tools info.
25 - This is required if O(uuid) or O(moid) is not supplied.
26 type: str
27 name_match:
28 description:
29 - If multiple VMs matching the name, use the first or last found.
30 default: 'first'
31 choices: ['first', 'last']
32 type: str
33 uuid:
34 description:
35 - UUID of the instance to manage if known, this is VMware's unique identifier.
36 - This is required if O(name) or O(moid) is not supplied.
37 type: str
38 use_instance_uuid:
39 description:
40 - Whether to use the VMware instance UUID rather than the BIOS UUID.
41 default: false
42 type: bool
43 moid:
44 description:
45 - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
46 - This is required if O(name) or O(uuid) is not supplied.
47 type: str
48 folder:
49 description:
50 - Destination folder, absolute or relative path to find an existing guest.
51 - This is required if name is supplied.
52 - The folder should include the datacenter. ESXi server's datacenter is ha-datacenter.
53 - 'Examples:'
54 - ' folder: /ha-datacenter/vm'
55 - ' folder: ha-datacenter/vm'
56 - ' folder: /datacenter1/vm'
57 - ' folder: datacenter1/vm'
58 - ' folder: /datacenter1/vm/folder1'
59 - ' folder: datacenter1/vm/folder1'
60 - ' folder: /folder1/datacenter1/vm'
61 - ' folder: folder1/datacenter1/vm'
62 - ' folder: /folder1/datacenter1/vm/folder2'
63 type: str
64 datacenter:
65 description:
66 - The datacenter name to which virtual machine belongs to.
67 type: str
68 extends_documentation_fragment:
69 - community.vmware.vmware.documentation
70
71 '''
72
73 EXAMPLES = r'''
74 - name: Gather VMware tools info installed in VM specified by uuid
75 community.vmware.vmware_guest_tools_info:
76 hostname: "{{ vcenter_hostname }}"
77 username: "{{ vcenter_username }}"
78 password: "{{ vcenter_password }}"
79 datacenter: "{{ datacenter_name }}"
80 uuid: 421e4592-c069-924d-ce20-7e7533fab926
81 delegate_to: localhost
82 register: vmtools_info
83
84 - name: Gather VMware tools info installed in VM specified by name
85 community.vmware.vmware_guest_tools_info:
86 hostname: "{{ vcenter_hostname }}"
87 username: "{{ vcenter_username }}"
88 password: "{{ vcenter_password }}"
89 datacenter: "{{ datacenter_name }}"
90 name: "{{ vm_name }}"
91 delegate_to: localhost
92 register: vmtools_info
93 '''
94
95 RETURN = r'''
96 vmtools_info:
97 description: metadata about the VMware tools installed in virtual machine
98 returned: always
99 type: dict
100 sample: {
101 "vm_uuid": null,
102 "vm_moid": null,
103 "vm_use_instance_uuid": false,
104 "vm_guest_fullname": "Microsoft Windows 10 (64-bit)",
105 "vm_guest_hostname": "test",
106 "vm_guest_id": "windows9_64Guest",
107 "vm_hw_version": "vmx-14",
108 "vm_ipaddress": "10.10.10.10",
109 "vm_name": "test_vm",
110 "vm_tools_install_status": "toolsOk",
111 "vm_tools_install_type": "guestToolsTypeMSI",
112 "vm_tools_last_install_count": 0,
113 "vm_tools_running_status": "guestToolsRunning",
114 "vm_tools_upgrade_policy": "manual",
115 "vm_tools_version": 10341,
116 "vm_tools_version_status": "guestToolsCurrent"
117 }
118 '''
119
120
121 from ansible.module_utils.basic import AnsibleModule
122 from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
123
124
125 class PyVmomiHelper(PyVmomi):
126 def __init__(self, module):
127 super(PyVmomiHelper, self).__init__(module)
128 self.name = self.params['name']
129 self.uuid = self.params['uuid']
130 self.moid = self.params['moid']
131 self.use_instance_uuid = self.params['use_instance_uuid']
132
133 def gather_vmtools_info(self):
134 vmtools_info = dict(
135 vm_name=self.name,
136 vm_uuid=self.uuid,
137 vm_moid=self.moid,
138 vm_use_instance_uuid=self.use_instance_uuid,
139 vm_hw_version=self.current_vm_obj.config.version,
140 vm_guest_id=self.current_vm_obj.summary.guest.guestId,
141 vm_guest_fullname=self.current_vm_obj.summary.guest.guestFullName,
142 vm_guest_hostname=self.current_vm_obj.summary.guest.hostName,
143 vm_ipaddress=self.current_vm_obj.summary.guest.ipAddress,
144 vm_tools_running_status=self.current_vm_obj.summary.guest.toolsRunningStatus,
145 vm_tools_install_status=self.current_vm_obj.summary.guest.toolsStatus,
146 vm_tools_version_status=self.current_vm_obj.summary.guest.toolsVersionStatus,
147 vm_tools_install_type=self.current_vm_obj.config.tools.toolsInstallType,
148 vm_tools_version=self.current_vm_obj.config.tools.toolsVersion,
149 vm_tools_upgrade_policy=self.current_vm_obj.config.tools.toolsUpgradePolicy,
150 vm_tools_last_install_count=self.current_vm_obj.config.tools.lastInstallInfo.counter,
151 )
152
153 return {'changed': False, 'failed': False, 'vmtools_info': vmtools_info}
154
155
156 def main():
157 argument_spec = vmware_argument_spec()
158 argument_spec.update(
159 name=dict(type='str'),
160 uuid=dict(type='str'),
161 moid=dict(type='str'),
162 use_instance_uuid=dict(type='bool', default=False),
163 name_match=dict(
164 choices=['first', 'last'],
165 default='first',
166 type='str'
167 ),
168 folder=dict(type='str'),
169 datacenter=dict(type='str'),
170 )
171
172 module = AnsibleModule(
173 argument_spec=argument_spec,
174 required_one_of=[
175 ['name', 'uuid', 'moid']
176 ],
177 mutually_exclusive=[
178 ['name', 'uuid', 'moid']
179 ],
180 supports_check_mode=True,
181 )
182
183 pyv = PyVmomiHelper(module)
184 vm = pyv.get_vm()
185 if not vm:
186 vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
187 module.fail_json(msg='Unable to find the specified virtual machine using: %s' % vm_id)
188 results = pyv.gather_vmtools_info()
189 module.exit_json(**results)
190
191
192 if __name__ == '__main__':
193 main()
194
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/vmware_guest_tools_info.py b/plugins/modules/vmware_guest_tools_info.py
--- a/plugins/modules/vmware_guest_tools_info.py
+++ b/plugins/modules/vmware_guest_tools_info.py
@@ -143,13 +143,18 @@
vm_ipaddress=self.current_vm_obj.summary.guest.ipAddress,
vm_tools_running_status=self.current_vm_obj.summary.guest.toolsRunningStatus,
vm_tools_install_status=self.current_vm_obj.summary.guest.toolsStatus,
- vm_tools_version_status=self.current_vm_obj.summary.guest.toolsVersionStatus,
+ vm_tools_version_status=self.current_vm_obj.summary.guest.toolsVersionStatus2,
vm_tools_install_type=self.current_vm_obj.config.tools.toolsInstallType,
vm_tools_version=self.current_vm_obj.config.tools.toolsVersion,
vm_tools_upgrade_policy=self.current_vm_obj.config.tools.toolsUpgradePolicy,
vm_tools_last_install_count=self.current_vm_obj.config.tools.lastInstallInfo.counter,
)
+ self.module.deprecate(
+ msg="The API providing vm_tools_install_status has been deprecated by VMware; use vm_tools_running_status / vm_tools_version_status instead",
+ version="5.0.0",
+ collection_name="community.vmware"
+ )
return {'changed': False, 'failed': False, 'vmtools_info': vmtools_info}
| {"golden_diff": "diff --git a/plugins/modules/vmware_guest_tools_info.py b/plugins/modules/vmware_guest_tools_info.py\n--- a/plugins/modules/vmware_guest_tools_info.py\n+++ b/plugins/modules/vmware_guest_tools_info.py\n@@ -143,13 +143,18 @@\n vm_ipaddress=self.current_vm_obj.summary.guest.ipAddress,\n vm_tools_running_status=self.current_vm_obj.summary.guest.toolsRunningStatus,\n vm_tools_install_status=self.current_vm_obj.summary.guest.toolsStatus,\n- vm_tools_version_status=self.current_vm_obj.summary.guest.toolsVersionStatus,\n+ vm_tools_version_status=self.current_vm_obj.summary.guest.toolsVersionStatus2,\n vm_tools_install_type=self.current_vm_obj.config.tools.toolsInstallType,\n vm_tools_version=self.current_vm_obj.config.tools.toolsVersion,\n vm_tools_upgrade_policy=self.current_vm_obj.config.tools.toolsUpgradePolicy,\n vm_tools_last_install_count=self.current_vm_obj.config.tools.lastInstallInfo.counter,\n )\n \n+ self.module.deprecate(\n+ msg=\"The API providing vm_tools_install_status has been deprecated by VMware; use vm_tools_running_status / vm_tools_version_status instead\",\n+ version=\"5.0.0\",\n+ collection_name=\"community.vmware\"\n+ )\n return {'changed': False, 'failed': False, 'vmtools_info': vmtools_info}\n", "issue": "Use toolsVersionStatus2 instead of toolsVersionStatus\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and devel branch are affected too -->\r\n<!--- Complete *all* sections as described, this form is processed automatically -->\r\n\r\n##### SUMMARY\r\nAs i saw in the python module you are looking f\u00fcr toolsVersionStatus instead of toolsVersionStatus2\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n- https://developer.vmware.com/apis/1355/\r\n- toolsVersionStatus is depreacated\r\n\r\n##### COMPONENT NAME\r\n<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->\r\nhttps://docs.ansible.com/ansible/latest/collections/community/vmware/vmware_guest_tools_info_module.html#ansible-collections-community-vmware-vmware-guest-tools-info-module\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes -->\r\n```paste below\r\nEvery\r\n```\r\n\r\n##### COLLECTION VERSION\r\n<!--- Paste verbatim output from \"ansible-galaxy collection list <namespace>.<collection>\" between the quotes\r\nfor example: ansible-galaxy collection list community.general\r\n-->\r\n```paste below\r\nhttps://docs.ansible.com/ansible/latest/collections/community/vmware/vmware_guest_tools_info_module.html#ansible-collections-community-vmware-vmware-guest-tools-info-module\r\n```\r\n\r\n##### CONFIGURATION\r\n<!--- Paste verbatim output from \"ansible-config dump --only-changed\" between quotes -->\r\n```paste below\r\n\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- Describe what you expected to happen when running the steps above -->\r\n\r\n\r\n##### ACTUAL RESULTS\r\n<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->\r\n\r\n<!--- Paste verbatim command output between quotes -->\r\n```paste below\r\n\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2019, Ansible Project\n# Copyright: (c) 2019, VMware, Inc. All Rights Reserved.\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_guest_tools_info\nshort_description: Gather info about VMware tools installed in VM\ndescription:\n - Gather information about the VMware tools installed in virtual machine.\nauthor:\n - Diane Wang (@Tomorrow9) <[email protected]>\noptions:\n name:\n description:\n - Name of the VM to get VMware tools info.\n - This is required if O(uuid) or O(moid) is not supplied.\n type: str\n name_match:\n description:\n - If multiple VMs matching the name, use the first or last found.\n default: 'first'\n choices: ['first', 'last']\n type: str\n uuid:\n description:\n - UUID of the instance to manage if known, this is VMware's unique identifier.\n - This is required if O(name) or O(moid) is not supplied.\n type: str\n use_instance_uuid:\n description:\n - Whether to use the VMware instance UUID rather than the BIOS UUID.\n default: false\n type: bool\n moid:\n description:\n - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.\n - This is required if O(name) or O(uuid) is not supplied.\n type: str\n folder:\n description:\n - Destination folder, absolute or relative path to find an existing guest.\n - This is required if name is supplied.\n - The folder should include the datacenter. ESXi server's datacenter is ha-datacenter.\n - 'Examples:'\n - ' folder: /ha-datacenter/vm'\n - ' folder: ha-datacenter/vm'\n - ' folder: /datacenter1/vm'\n - ' folder: datacenter1/vm'\n - ' folder: /datacenter1/vm/folder1'\n - ' folder: datacenter1/vm/folder1'\n - ' folder: /folder1/datacenter1/vm'\n - ' folder: folder1/datacenter1/vm'\n - ' folder: /folder1/datacenter1/vm/folder2'\n type: str\n datacenter:\n description:\n - The datacenter name to which virtual machine belongs to.\n type: str\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Gather VMware tools info installed in VM specified by uuid\n community.vmware.vmware_guest_tools_info:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n uuid: 421e4592-c069-924d-ce20-7e7533fab926\n delegate_to: localhost\n register: vmtools_info\n\n- name: Gather VMware tools info installed in VM specified by name\n community.vmware.vmware_guest_tools_info:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n name: \"{{ vm_name }}\"\n delegate_to: localhost\n register: vmtools_info\n'''\n\nRETURN = r'''\nvmtools_info:\n description: metadata about the VMware tools installed in virtual machine\n returned: always\n type: dict\n sample: {\n \"vm_uuid\": null,\n \"vm_moid\": null,\n \"vm_use_instance_uuid\": false,\n \"vm_guest_fullname\": \"Microsoft Windows 10 (64-bit)\",\n \"vm_guest_hostname\": \"test\",\n \"vm_guest_id\": \"windows9_64Guest\",\n \"vm_hw_version\": \"vmx-14\",\n \"vm_ipaddress\": \"10.10.10.10\",\n \"vm_name\": \"test_vm\",\n \"vm_tools_install_status\": \"toolsOk\",\n \"vm_tools_install_type\": \"guestToolsTypeMSI\",\n \"vm_tools_last_install_count\": 0,\n \"vm_tools_running_status\": \"guestToolsRunning\",\n \"vm_tools_upgrade_policy\": \"manual\",\n \"vm_tools_version\": 10341,\n \"vm_tools_version_status\": \"guestToolsCurrent\"\n }\n'''\n\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec\n\n\nclass PyVmomiHelper(PyVmomi):\n def __init__(self, module):\n super(PyVmomiHelper, self).__init__(module)\n self.name = self.params['name']\n self.uuid = self.params['uuid']\n self.moid = self.params['moid']\n self.use_instance_uuid = self.params['use_instance_uuid']\n\n def gather_vmtools_info(self):\n vmtools_info = dict(\n vm_name=self.name,\n vm_uuid=self.uuid,\n vm_moid=self.moid,\n vm_use_instance_uuid=self.use_instance_uuid,\n vm_hw_version=self.current_vm_obj.config.version,\n vm_guest_id=self.current_vm_obj.summary.guest.guestId,\n vm_guest_fullname=self.current_vm_obj.summary.guest.guestFullName,\n vm_guest_hostname=self.current_vm_obj.summary.guest.hostName,\n vm_ipaddress=self.current_vm_obj.summary.guest.ipAddress,\n vm_tools_running_status=self.current_vm_obj.summary.guest.toolsRunningStatus,\n vm_tools_install_status=self.current_vm_obj.summary.guest.toolsStatus,\n vm_tools_version_status=self.current_vm_obj.summary.guest.toolsVersionStatus,\n vm_tools_install_type=self.current_vm_obj.config.tools.toolsInstallType,\n vm_tools_version=self.current_vm_obj.config.tools.toolsVersion,\n vm_tools_upgrade_policy=self.current_vm_obj.config.tools.toolsUpgradePolicy,\n vm_tools_last_install_count=self.current_vm_obj.config.tools.lastInstallInfo.counter,\n )\n\n return {'changed': False, 'failed': False, 'vmtools_info': vmtools_info}\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n name=dict(type='str'),\n uuid=dict(type='str'),\n moid=dict(type='str'),\n use_instance_uuid=dict(type='bool', default=False),\n name_match=dict(\n choices=['first', 'last'],\n default='first',\n type='str'\n ),\n folder=dict(type='str'),\n datacenter=dict(type='str'),\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n required_one_of=[\n ['name', 'uuid', 'moid']\n ],\n mutually_exclusive=[\n ['name', 'uuid', 'moid']\n ],\n supports_check_mode=True,\n )\n\n pyv = PyVmomiHelper(module)\n vm = pyv.get_vm()\n if not vm:\n vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))\n module.fail_json(msg='Unable to find the specified virtual machine using: %s' % vm_id)\n results = pyv.gather_vmtools_info()\n module.exit_json(**results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_guest_tools_info.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2019, Ansible Project\n# Copyright: (c) 2019, VMware, Inc. All Rights Reserved.\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_guest_tools_info\nshort_description: Gather info about VMware tools installed in VM\ndescription:\n - Gather information about the VMware tools installed in virtual machine.\nauthor:\n - Diane Wang (@Tomorrow9) <[email protected]>\noptions:\n name:\n description:\n - Name of the VM to get VMware tools info.\n - This is required if O(uuid) or O(moid) is not supplied.\n type: str\n name_match:\n description:\n - If multiple VMs matching the name, use the first or last found.\n default: 'first'\n choices: ['first', 'last']\n type: str\n uuid:\n description:\n - UUID of the instance to manage if known, this is VMware's unique identifier.\n - This is required if O(name) or O(moid) is not supplied.\n type: str\n use_instance_uuid:\n description:\n - Whether to use the VMware instance UUID rather than the BIOS UUID.\n default: false\n type: bool\n moid:\n description:\n - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.\n - This is required if O(name) or O(uuid) is not supplied.\n type: str\n folder:\n description:\n - Destination folder, absolute or relative path to find an existing guest.\n - This is required if name is supplied.\n - The folder should include the datacenter. ESXi server's datacenter is ha-datacenter.\n - 'Examples:'\n - ' folder: /ha-datacenter/vm'\n - ' folder: ha-datacenter/vm'\n - ' folder: /datacenter1/vm'\n - ' folder: datacenter1/vm'\n - ' folder: /datacenter1/vm/folder1'\n - ' folder: datacenter1/vm/folder1'\n - ' folder: /folder1/datacenter1/vm'\n - ' folder: folder1/datacenter1/vm'\n - ' folder: /folder1/datacenter1/vm/folder2'\n type: str\n datacenter:\n description:\n - The datacenter name to which virtual machine belongs to.\n type: str\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Gather VMware tools info installed in VM specified by uuid\n community.vmware.vmware_guest_tools_info:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n uuid: 421e4592-c069-924d-ce20-7e7533fab926\n delegate_to: localhost\n register: vmtools_info\n\n- name: Gather VMware tools info installed in VM specified by name\n community.vmware.vmware_guest_tools_info:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n datacenter: \"{{ datacenter_name }}\"\n name: \"{{ vm_name }}\"\n delegate_to: localhost\n register: vmtools_info\n'''\n\nRETURN = r'''\nvmtools_info:\n description: metadata about the VMware tools installed in virtual machine\n returned: always\n type: dict\n sample: {\n \"vm_uuid\": null,\n \"vm_moid\": null,\n \"vm_use_instance_uuid\": false,\n \"vm_guest_fullname\": \"Microsoft Windows 10 (64-bit)\",\n \"vm_guest_hostname\": \"test\",\n \"vm_guest_id\": \"windows9_64Guest\",\n \"vm_hw_version\": \"vmx-14\",\n \"vm_ipaddress\": \"10.10.10.10\",\n \"vm_name\": \"test_vm\",\n \"vm_tools_install_status\": \"toolsOk\",\n \"vm_tools_install_type\": \"guestToolsTypeMSI\",\n \"vm_tools_last_install_count\": 0,\n \"vm_tools_running_status\": \"guestToolsRunning\",\n \"vm_tools_upgrade_policy\": \"manual\",\n \"vm_tools_version\": 10341,\n \"vm_tools_version_status\": \"guestToolsCurrent\"\n }\n'''\n\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec\n\n\nclass PyVmomiHelper(PyVmomi):\n def __init__(self, module):\n super(PyVmomiHelper, self).__init__(module)\n self.name = self.params['name']\n self.uuid = self.params['uuid']\n self.moid = self.params['moid']\n self.use_instance_uuid = self.params['use_instance_uuid']\n\n def gather_vmtools_info(self):\n vmtools_info = dict(\n vm_name=self.name,\n vm_uuid=self.uuid,\n vm_moid=self.moid,\n vm_use_instance_uuid=self.use_instance_uuid,\n vm_hw_version=self.current_vm_obj.config.version,\n vm_guest_id=self.current_vm_obj.summary.guest.guestId,\n vm_guest_fullname=self.current_vm_obj.summary.guest.guestFullName,\n vm_guest_hostname=self.current_vm_obj.summary.guest.hostName,\n vm_ipaddress=self.current_vm_obj.summary.guest.ipAddress,\n vm_tools_running_status=self.current_vm_obj.summary.guest.toolsRunningStatus,\n vm_tools_install_status=self.current_vm_obj.summary.guest.toolsStatus,\n vm_tools_version_status=self.current_vm_obj.summary.guest.toolsVersionStatus2,\n vm_tools_install_type=self.current_vm_obj.config.tools.toolsInstallType,\n vm_tools_version=self.current_vm_obj.config.tools.toolsVersion,\n vm_tools_upgrade_policy=self.current_vm_obj.config.tools.toolsUpgradePolicy,\n vm_tools_last_install_count=self.current_vm_obj.config.tools.lastInstallInfo.counter,\n )\n\n self.module.deprecate(\n msg=\"The API providing vm_tools_install_status has been deprecated by VMware; use vm_tools_running_status / vm_tools_version_status instead\",\n version=\"5.0.0\",\n collection_name=\"community.vmware\"\n )\n return {'changed': False, 'failed': False, 'vmtools_info': vmtools_info}\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n name=dict(type='str'),\n uuid=dict(type='str'),\n moid=dict(type='str'),\n use_instance_uuid=dict(type='bool', default=False),\n name_match=dict(\n choices=['first', 'last'],\n default='first',\n type='str'\n ),\n folder=dict(type='str'),\n datacenter=dict(type='str'),\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n required_one_of=[\n ['name', 'uuid', 'moid']\n ],\n mutually_exclusive=[\n ['name', 'uuid', 'moid']\n ],\n supports_check_mode=True,\n )\n\n pyv = PyVmomiHelper(module)\n vm = pyv.get_vm()\n if not vm:\n vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))\n module.fail_json(msg='Unable to find the specified virtual machine using: %s' % vm_id)\n results = pyv.gather_vmtools_info()\n module.exit_json(**results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_guest_tools_info.py"}]} | 2,874 | 287 |
gh_patches_debug_64580 | rasdani/github-patches | git_diff | kubeflow__pipelines-6691 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[sdk] dependency conflict with tensorflow 2.6.0 and seldon-core
### Environment
* KFP version: 1.7
* KFP SDK version: 1.8.3
* All dependencies version:
```
[~]$ pip list | grep kfp
kfp 1.8.3
kfp-pipeline-spec 0.1.11
kfp-server-api 1.7.0
```
kfp==1.8.3 collides with tensorflow==2.6 because it requires
https://github.com/kubeflow/pipelines/blob/220d79df66e31bbd93c409fb361e0463bde4aeac/sdk/python/setup.py#L56
while tensorflow needs
```
Warning!!! Possibly conflicting dependencies found:
* tensorflow==2.6.0
- typing-extensions [required: ~=3.7.4, installed: 3.10.0.2]
```
https://github.com/tensorflow/tensorflow/blob/421fba8888bb8f8724bc2e35ca2fdcde16e1bfe5/tensorflow/tools/pip_package/setup.py#L90
is `'typing-extensions>=3.7.4,<4;python_version<"3.9"'` not enough?
The same goes for seldon-core==1.11.* and package click and absl-py
```
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
kfp 1.8.2 requires absl-py<=0.11,>=0.9, but you have absl-py 0.13.0 which is incompatible.
kfp 1.8.2 requires click<8,>=7.1.1, but you have click 8.0.1 which is incompatible.
kfp 1.8.2 requires typing-extensions<4,>=3.10.0.2, but you have typing-extensions 3.7.4.3 which is incompatible.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/setup.py`
Content:
```
1 # Copyright 2018 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import re
17
18 from setuptools import setup
19
20 NAME = 'kfp'
21 #VERSION = .... Change the version in kfp/__init__.py
22
23 # NOTICE, after any updates to the following, ./requirements.in should be updated
24 # accordingly.
25 REQUIRES = [
26 'absl-py>=0.9,<=0.11',
27 'PyYAML>=5.3,<6',
28 # `Blob.from_string` was introduced in google-cloud-storage 1.20.0
29 # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200
30 'google-cloud-storage>=1.20.0,<2',
31 'kubernetes>=8.0.0,<19',
32 # google-api-python-client v2 doesn't work for private dicovery by default:
33 # https://github.com/googleapis/google-api-python-client/issues/1225#issuecomment-791058235
34 'google-api-python-client>=1.7.8,<2',
35 'google-auth>=1.6.1,<2',
36 'requests-toolbelt>=0.8.0,<1',
37 'cloudpickle>=1.3.0,<2',
38 # Update the upper version whenever a new major version of the
39 # kfp-server-api package is released.
40 # Update the lower version when kfp sdk depends on new apis/fields in
41 # kfp-server-api.
42 # Note, please also update ./requirements.in
43 'kfp-server-api>=1.1.2,<2.0.0',
44 'jsonschema>=3.0.1,<4',
45 'tabulate>=0.8.6,<1',
46 'click>=7.1.1,<8',
47 'Deprecated>=1.2.7,<2',
48 'strip-hints>=0.1.8,<1',
49 'docstring-parser>=0.7.3,<1',
50 'kfp-pipeline-spec>=0.1.10,<0.2.0',
51 'fire>=0.3.1,<1',
52 'protobuf>=3.13.0,<4',
53 'uritemplate>=3.0.1,<4',
54 'pydantic>=1.8.2,<2',
55 # Standard library backports
56 'dataclasses;python_version<"3.7"',
57 'typing-extensions>=3.7.4,<4;python_version<"3.9"',
58 ]
59
60 TESTS_REQUIRE = [
61 'frozendict',
62 ]
63
64
65 def find_version(*file_path_parts):
66 here = os.path.abspath(os.path.dirname(__file__))
67 with open(os.path.join(here, *file_path_parts), 'r') as fp:
68 version_file_text = fp.read()
69
70 version_match = re.search(
71 r"^__version__ = ['\"]([^'\"]*)['\"]",
72 version_file_text,
73 re.M,
74 )
75 if version_match:
76 return version_match.group(1)
77
78 raise RuntimeError('Unable to find version string.')
79
80
81 setup(
82 name=NAME,
83 version=find_version('kfp', '__init__.py'),
84 description='KubeFlow Pipelines SDK',
85 author='The Kubeflow Authors',
86 url="https://github.com/kubeflow/pipelines",
87 project_urls={
88 "Documentation": "https://kubeflow-pipelines.readthedocs.io/en/stable/",
89 "Bug Tracker": "https://github.com/kubeflow/pipelines/issues",
90 "Source": "https://github.com/kubeflow/pipelines/tree/master/sdk",
91 "Changelog": "https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md",
92 },
93 install_requires=REQUIRES,
94 tests_require=TESTS_REQUIRE,
95 packages=[
96 'kfp',
97 'kfp.auth',
98 'kfp.cli',
99 'kfp.cli.diagnose_me',
100 'kfp.compiler',
101 'kfp.components',
102 'kfp.components.structures',
103 'kfp.containers',
104 'kfp.dsl',
105 'kfp.dsl.extensions',
106 'kfp.notebook',
107 'kfp.v2',
108 'kfp.v2.compiler',
109 'kfp.v2.components',
110 'kfp.v2.components.types',
111 'kfp.v2.components.experimental',
112 'kfp.v2.dsl',
113 'kfp.v2.google.client',
114 'kfp.v2.google.experimental',
115 ],
116 classifiers=[
117 'Intended Audience :: Developers',
118 'Intended Audience :: Education',
119 'Intended Audience :: Science/Research',
120 'License :: OSI Approved :: Apache Software License',
121 'Programming Language :: Python :: 3',
122 'Programming Language :: Python :: 3.6',
123 'Programming Language :: Python :: 3.7',
124 'Programming Language :: Python :: 3.8',
125 'Programming Language :: Python :: 3.9',
126 'Topic :: Scientific/Engineering',
127 'Topic :: Scientific/Engineering :: Artificial Intelligence',
128 'Topic :: Software Development',
129 'Topic :: Software Development :: Libraries',
130 'Topic :: Software Development :: Libraries :: Python Modules',
131 ],
132 python_requires='>=3.6.1',
133 include_package_data=True,
134 entry_points={
135 'console_scripts': [
136 'dsl-compile = kfp.compiler.main:main',
137 'dsl-compile-v2 = kfp.v2.compiler.main:main',
138 'kfp=kfp.__main__:main'
139 ]
140 })
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -43,7 +43,7 @@
'kfp-server-api>=1.1.2,<2.0.0',
'jsonschema>=3.0.1,<4',
'tabulate>=0.8.6,<1',
- 'click>=7.1.1,<8',
+ 'click>=7.1.2,<9',
'Deprecated>=1.2.7,<2',
'strip-hints>=0.1.8,<1',
'docstring-parser>=0.7.3,<1',
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -43,7 +43,7 @@\n 'kfp-server-api>=1.1.2,<2.0.0',\n 'jsonschema>=3.0.1,<4',\n 'tabulate>=0.8.6,<1',\n- 'click>=7.1.1,<8',\n+ 'click>=7.1.2,<9',\n 'Deprecated>=1.2.7,<2',\n 'strip-hints>=0.1.8,<1',\n 'docstring-parser>=0.7.3,<1',\n", "issue": "[sdk] dependency conflict with tensorflow 2.6.0 and seldon-core\n### Environment\r\n\r\n* KFP version: 1.7\r\n* KFP SDK version: 1.8.3\r\n\r\n\r\n* All dependencies version:\r\n```\r\n[~]$ pip list | grep kfp\r\nkfp 1.8.3\r\nkfp-pipeline-spec 0.1.11\r\nkfp-server-api 1.7.0\r\n```\r\nkfp==1.8.3 collides with tensorflow==2.6 because it requires \r\n\r\nhttps://github.com/kubeflow/pipelines/blob/220d79df66e31bbd93c409fb361e0463bde4aeac/sdk/python/setup.py#L56\r\n\r\nwhile tensorflow needs\r\n```\r\nWarning!!! Possibly conflicting dependencies found:\r\n* tensorflow==2.6.0\r\n - typing-extensions [required: ~=3.7.4, installed: 3.10.0.2]\r\n```\r\n\r\nhttps://github.com/tensorflow/tensorflow/blob/421fba8888bb8f8724bc2e35ca2fdcde16e1bfe5/tensorflow/tools/pip_package/setup.py#L90\r\n\r\nis `'typing-extensions>=3.7.4,<4;python_version<\"3.9\"'` not enough?\r\n\r\nThe same goes for seldon-core==1.11.* and package click and absl-py\r\n\r\n```\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nkfp 1.8.2 requires absl-py<=0.11,>=0.9, but you have absl-py 0.13.0 which is incompatible.\r\nkfp 1.8.2 requires click<8,>=7.1.1, but you have click 8.0.1 which is incompatible.\r\nkfp 1.8.2 requires typing-extensions<4,>=3.10.0.2, but you have typing-extensions 3.7.4.3 which is incompatible.\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\n\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\n# NOTICE, after any updates to the following, ./requirements.in should be updated\n# accordingly.\nREQUIRES = [\n 'absl-py>=0.9,<=0.11',\n 'PyYAML>=5.3,<6',\n # `Blob.from_string` was introduced in google-cloud-storage 1.20.0\n # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200\n 'google-cloud-storage>=1.20.0,<2',\n 'kubernetes>=8.0.0,<19',\n # google-api-python-client v2 doesn't work for private dicovery by default:\n # https://github.com/googleapis/google-api-python-client/issues/1225#issuecomment-791058235\n 'google-api-python-client>=1.7.8,<2',\n 'google-auth>=1.6.1,<2',\n 'requests-toolbelt>=0.8.0,<1',\n 'cloudpickle>=1.3.0,<2',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n # kfp-server-api.\n # Note, please also update ./requirements.in\n 'kfp-server-api>=1.1.2,<2.0.0',\n 'jsonschema>=3.0.1,<4',\n 'tabulate>=0.8.6,<1',\n 'click>=7.1.1,<8',\n 'Deprecated>=1.2.7,<2',\n 'strip-hints>=0.1.8,<1',\n 'docstring-parser>=0.7.3,<1',\n 'kfp-pipeline-spec>=0.1.10,<0.2.0',\n 'fire>=0.3.1,<1',\n 'protobuf>=3.13.0,<4',\n 'uritemplate>=3.0.1,<4',\n 'pydantic>=1.8.2,<2',\n # Standard library backports\n 'dataclasses;python_version<\"3.7\"',\n 'typing-extensions>=3.7.4,<4;python_version<\"3.9\"',\n]\n\nTESTS_REQUIRE = [\n 'frozendict',\n]\n\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError('Unable to find version string.')\n\n\nsetup(\n name=NAME,\n version=find_version('kfp', '__init__.py'),\n description='KubeFlow Pipelines SDK',\n author='The Kubeflow Authors',\n url=\"https://github.com/kubeflow/pipelines\",\n project_urls={\n \"Documentation\": \"https://kubeflow-pipelines.readthedocs.io/en/stable/\",\n \"Bug Tracker\": \"https://github.com/kubeflow/pipelines/issues\",\n \"Source\": \"https://github.com/kubeflow/pipelines/tree/master/sdk\",\n \"Changelog\": \"https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md\",\n },\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRE,\n packages=[\n 'kfp',\n 'kfp.auth',\n 'kfp.cli',\n 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.dsl.extensions',\n 'kfp.notebook',\n 'kfp.v2',\n 'kfp.v2.compiler',\n 'kfp.v2.components',\n 'kfp.v2.components.types',\n 'kfp.v2.components.experimental',\n 'kfp.v2.dsl',\n 'kfp.v2.google.client',\n 'kfp.v2.google.experimental',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6.1',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main',\n 'dsl-compile-v2 = kfp.v2.compiler.main:main',\n 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py"}], "after_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\n\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\n# NOTICE, after any updates to the following, ./requirements.in should be updated\n# accordingly.\nREQUIRES = [\n 'absl-py>=0.9,<=0.11',\n 'PyYAML>=5.3,<6',\n # `Blob.from_string` was introduced in google-cloud-storage 1.20.0\n # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200\n 'google-cloud-storage>=1.20.0,<2',\n 'kubernetes>=8.0.0,<19',\n # google-api-python-client v2 doesn't work for private dicovery by default:\n # https://github.com/googleapis/google-api-python-client/issues/1225#issuecomment-791058235\n 'google-api-python-client>=1.7.8,<2',\n 'google-auth>=1.6.1,<2',\n 'requests-toolbelt>=0.8.0,<1',\n 'cloudpickle>=1.3.0,<2',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n # kfp-server-api.\n # Note, please also update ./requirements.in\n 'kfp-server-api>=1.1.2,<2.0.0',\n 'jsonschema>=3.0.1,<4',\n 'tabulate>=0.8.6,<1',\n 'click>=7.1.2,<9',\n 'Deprecated>=1.2.7,<2',\n 'strip-hints>=0.1.8,<1',\n 'docstring-parser>=0.7.3,<1',\n 'kfp-pipeline-spec>=0.1.10,<0.2.0',\n 'fire>=0.3.1,<1',\n 'protobuf>=3.13.0,<4',\n 'uritemplate>=3.0.1,<4',\n 'pydantic>=1.8.2,<2',\n # Standard library backports\n 'dataclasses;python_version<\"3.7\"',\n 'typing-extensions>=3.7.4,<4;python_version<\"3.9\"',\n]\n\nTESTS_REQUIRE = [\n 'frozendict',\n]\n\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError('Unable to find version string.')\n\n\nsetup(\n name=NAME,\n version=find_version('kfp', '__init__.py'),\n description='KubeFlow Pipelines SDK',\n author='The Kubeflow Authors',\n url=\"https://github.com/kubeflow/pipelines\",\n project_urls={\n \"Documentation\": \"https://kubeflow-pipelines.readthedocs.io/en/stable/\",\n \"Bug Tracker\": \"https://github.com/kubeflow/pipelines/issues\",\n \"Source\": \"https://github.com/kubeflow/pipelines/tree/master/sdk\",\n \"Changelog\": \"https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md\",\n },\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRE,\n packages=[\n 'kfp',\n 'kfp.auth',\n 'kfp.cli',\n 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.dsl.extensions',\n 'kfp.notebook',\n 'kfp.v2',\n 'kfp.v2.compiler',\n 'kfp.v2.components',\n 'kfp.v2.components.types',\n 'kfp.v2.components.experimental',\n 'kfp.v2.dsl',\n 'kfp.v2.google.client',\n 'kfp.v2.google.experimental',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6.1',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main',\n 'dsl-compile-v2 = kfp.v2.compiler.main:main',\n 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py"}]} | 2,374 | 151 |
gh_patches_debug_19349 | rasdani/github-patches | git_diff | fossasia__open-event-server-4248 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Microlocations : GET requests return ERROR 500
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server
Endpoint
```
GET v1/events/<event_id>/microlocations
```
Response
```
{
"errors":[
{
"detail":"Unknown error",
"source":{
"pointer":""
},
"status":500,
"title":"Unknown error"
}
],
"jsonapi":{
"version":"1.0"
}
}
```
Example URL
```
https://open-event-api.herokuapp.com/v1/events/173/microlocations
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/microlocations.py`
Content:
```
1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
2 from marshmallow_jsonapi.flask import Schema, Relationship
3 from marshmallow_jsonapi import fields
4
5 from app.api.bootstrap import api
6 from app.api.helpers.utilities import dasherize
7 from app.models import db
8 from app.models.microlocation import Microlocation
9 from app.models.session import Session
10 from app.api.helpers.db import safe_query
11 from app.api.helpers.utilities import require_relationship
12 from app.api.helpers.permission_manager import has_access
13 from app.api.helpers.exceptions import ForbiddenException
14 from app.api.helpers.query import event_query
15
16
17 class MicrolocationSchema(Schema):
18 """
19 Api schema for Microlocation Model
20 """
21
22 class Meta:
23 """
24 Meta class for Microlocation Api Schema
25 """
26 type_ = 'microlocation'
27 self_view = 'v1.microlocation_detail'
28 self_view_kwargs = {'id': '<id>'}
29 self_view_many = 'v1.session_list'
30 inflect = dasherize
31
32 id = fields.Str(dump_only=True)
33 name = fields.Str(required=True)
34 latitude = fields.Float(validate=lambda n: -90 <= n <= 90, allow_none=True)
35 longitude = fields.Float(validate=lambda n: -180 <= n <= 180, allow_none=True)
36 floor = fields.Integer(allow_none=True)
37 room = fields.Str(allow_none=True)
38 sessions = Relationship(attribute='session',
39 self_view='v1.microlocation_session',
40 self_view_kwargs={'id': '<id>'},
41 related_view='v1.session_list',
42 related_view_kwargs={'microlocation_id': '<id>'},
43 schema='SessionSchema',
44 type_='session')
45 event = Relationship(attribute='event',
46 self_view='v1.microlocation_event',
47 self_view_kwargs={'id': '<id>'},
48 related_view='v1.event_detail',
49 related_view_kwargs={'microlocation_id': '<id>'},
50 schema='EventSchema',
51 type_='event')
52
53
54 class MicrolocationListPost(ResourceList):
55 """
56 List and create microlocations
57 """
58 def before_post(self, args, kwargs, data):
59 require_relationship(['event'], data)
60 if not has_access('is_coorganizer', event_id=data['event']):
61 raise ForbiddenException({'source': ''}, 'Co-organizer access is required.')
62
63 methods = ['POST', ]
64 schema = MicrolocationSchema
65 data_layer = {'session': db.session,
66 'model': Microlocation}
67
68
69 class MicrolocationList(ResourceList):
70 """
71 List Microlocations
72 """
73 def query(self, view_kwargs):
74 query_ = self.session.query(Microlocation)
75 query_ = event_query(self, query_, view_kwargs)
76 if view_kwargs.get('session_id'):
77 session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
78 query_ = query_.join(Session).filter(Session.id == session.id)
79 return query_
80
81 view_kwargs = True
82 methods = ['GET']
83 schema = MicrolocationSchema
84 data_layer = {'session': db.session,
85 'model': Microlocation,
86 'methods': {
87 'query': query
88 }}
89
90
91 class MicrolocationDetail(ResourceDetail):
92 """
93 Microlocation detail by id
94 """
95
96 def before_get_object(self, view_kwargs):
97
98 if view_kwargs.get('session_id') is not None:
99 sessions = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
100 if sessions.event_id is not None:
101 view_kwargs['id'] = sessions.event_id
102 else:
103 view_kwargs['id'] = None
104
105 decorators = (api.has_permission('is_coorganizer', methods="PATCH,DELETE", fetch="event_id", fetch_as="event_id",
106 model=Microlocation),)
107 schema = MicrolocationSchema
108 data_layer = {'session': db.session,
109 'model': Microlocation,
110 'methods': {'before_get_object': before_get_object}}
111
112
113 class MicrolocationRelationshipRequired(ResourceRelationship):
114 """
115 Microlocation Relationship for required entities
116 """
117 decorators = (api.has_permission('is_coorganizer', methods="PATCH", fetch="event_id", fetch_as="event_id",
118 model=Microlocation),)
119 methods = ['GET', 'PATCH']
120 schema = MicrolocationSchema
121 data_layer = {'session': db.session,
122 'model': Microlocation}
123
124
125 class MicrolocationRelationshipOptional(ResourceRelationship):
126 """
127 Microlocation Relationship
128 """
129 decorators = (api.has_permission('is_coorganizer', methods="PATCH,DELETE", fetch="event_id", fetch_as="event_id",
130 model=Microlocation),)
131 schema = MicrolocationSchema
132 data_layer = {'session': db.session,
133 'model': Microlocation}
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/microlocations.py b/app/api/microlocations.py
--- a/app/api/microlocations.py
+++ b/app/api/microlocations.py
@@ -26,7 +26,7 @@
type_ = 'microlocation'
self_view = 'v1.microlocation_detail'
self_view_kwargs = {'id': '<id>'}
- self_view_many = 'v1.session_list'
+ self_view_many = 'v1.microlocation_list_post'
inflect = dasherize
id = fields.Str(dump_only=True)
@@ -36,6 +36,7 @@
floor = fields.Integer(allow_none=True)
room = fields.Str(allow_none=True)
sessions = Relationship(attribute='session',
+ many=True,
self_view='v1.microlocation_session',
self_view_kwargs={'id': '<id>'},
related_view='v1.session_list',
| {"golden_diff": "diff --git a/app/api/microlocations.py b/app/api/microlocations.py\n--- a/app/api/microlocations.py\n+++ b/app/api/microlocations.py\n@@ -26,7 +26,7 @@\n type_ = 'microlocation'\n self_view = 'v1.microlocation_detail'\n self_view_kwargs = {'id': '<id>'}\n- self_view_many = 'v1.session_list'\n+ self_view_many = 'v1.microlocation_list_post'\n inflect = dasherize\n \n id = fields.Str(dump_only=True)\n@@ -36,6 +36,7 @@\n floor = fields.Integer(allow_none=True)\n room = fields.Str(allow_none=True)\n sessions = Relationship(attribute='session',\n+ many=True,\n self_view='v1.microlocation_session',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.session_list',\n", "issue": "Microlocations : GET requests return ERROR 500\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\nEndpoint \r\n```\r\nGET v1/events/<event_id>/microlocations \r\n```\r\n\r\nResponse\r\n```\r\n{\r\n \"errors\":[\r\n {\r\n \"detail\":\"Unknown error\",\r\n \"source\":{\r\n \"pointer\":\"\"\r\n },\r\n \"status\":500,\r\n \"title\":\"Unknown error\"\r\n }\r\n ],\r\n \"jsonapi\":{\r\n \"version\":\"1.0\"\r\n }\r\n}\r\n```\r\n\r\nExample URL\r\n```\r\nhttps://open-event-api.herokuapp.com/v1/events/173/microlocations\r\n```\r\n\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.utilities import dasherize\nfrom app.models import db\nfrom app.models.microlocation import Microlocation\nfrom app.models.session import Session\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.query import event_query\n\n\nclass MicrolocationSchema(Schema):\n \"\"\"\n Api schema for Microlocation Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for Microlocation Api Schema\n \"\"\"\n type_ = 'microlocation'\n self_view = 'v1.microlocation_detail'\n self_view_kwargs = {'id': '<id>'}\n self_view_many = 'v1.session_list'\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n name = fields.Str(required=True)\n latitude = fields.Float(validate=lambda n: -90 <= n <= 90, allow_none=True)\n longitude = fields.Float(validate=lambda n: -180 <= n <= 180, allow_none=True)\n floor = fields.Integer(allow_none=True)\n room = fields.Str(allow_none=True)\n sessions = Relationship(attribute='session',\n self_view='v1.microlocation_session',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.session_list',\n related_view_kwargs={'microlocation_id': '<id>'},\n schema='SessionSchema',\n type_='session')\n event = Relationship(attribute='event',\n self_view='v1.microlocation_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'microlocation_id': '<id>'},\n schema='EventSchema',\n type_='event')\n\n\nclass MicrolocationListPost(ResourceList):\n \"\"\"\n List and create microlocations\n \"\"\"\n def before_post(self, args, kwargs, data):\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n methods = ['POST', ]\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation}\n\n\nclass MicrolocationList(ResourceList):\n \"\"\"\n List Microlocations\n \"\"\"\n def query(self, view_kwargs):\n query_ = self.session.query(Microlocation)\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('session_id'):\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n query_ = query_.join(Session).filter(Session.id == session.id)\n return query_\n\n view_kwargs = True\n methods = ['GET']\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation,\n 'methods': {\n 'query': query\n }}\n\n\nclass MicrolocationDetail(ResourceDetail):\n \"\"\"\n Microlocation detail by id\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n\n if view_kwargs.get('session_id') is not None:\n sessions = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if sessions.event_id is not None:\n view_kwargs['id'] = sessions.event_id\n else:\n view_kwargs['id'] = None\n\n decorators = (api.has_permission('is_coorganizer', methods=\"PATCH,DELETE\", fetch=\"event_id\", fetch_as=\"event_id\",\n model=Microlocation),)\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation,\n 'methods': {'before_get_object': before_get_object}}\n\n\nclass MicrolocationRelationshipRequired(ResourceRelationship):\n \"\"\"\n Microlocation Relationship for required entities\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', methods=\"PATCH\", fetch=\"event_id\", fetch_as=\"event_id\",\n model=Microlocation),)\n methods = ['GET', 'PATCH']\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation}\n\n\nclass MicrolocationRelationshipOptional(ResourceRelationship):\n \"\"\"\n Microlocation Relationship\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', methods=\"PATCH,DELETE\", fetch=\"event_id\", fetch_as=\"event_id\",\n model=Microlocation),)\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation}\n", "path": "app/api/microlocations.py"}], "after_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.utilities import dasherize\nfrom app.models import db\nfrom app.models.microlocation import Microlocation\nfrom app.models.session import Session\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.query import event_query\n\n\nclass MicrolocationSchema(Schema):\n \"\"\"\n Api schema for Microlocation Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for Microlocation Api Schema\n \"\"\"\n type_ = 'microlocation'\n self_view = 'v1.microlocation_detail'\n self_view_kwargs = {'id': '<id>'}\n self_view_many = 'v1.microlocation_list_post'\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n name = fields.Str(required=True)\n latitude = fields.Float(validate=lambda n: -90 <= n <= 90, allow_none=True)\n longitude = fields.Float(validate=lambda n: -180 <= n <= 180, allow_none=True)\n floor = fields.Integer(allow_none=True)\n room = fields.Str(allow_none=True)\n sessions = Relationship(attribute='session',\n many=True,\n self_view='v1.microlocation_session',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.session_list',\n related_view_kwargs={'microlocation_id': '<id>'},\n schema='SessionSchema',\n type_='session')\n event = Relationship(attribute='event',\n self_view='v1.microlocation_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'microlocation_id': '<id>'},\n schema='EventSchema',\n type_='event')\n\n\nclass MicrolocationListPost(ResourceList):\n \"\"\"\n List and create microlocations\n \"\"\"\n def before_post(self, args, kwargs, data):\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n methods = ['POST', ]\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation}\n\n\nclass MicrolocationList(ResourceList):\n \"\"\"\n List Microlocations\n \"\"\"\n def query(self, view_kwargs):\n query_ = self.session.query(Microlocation)\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('session_id'):\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n query_ = query_.join(Session).filter(Session.id == session.id)\n return query_\n\n view_kwargs = True\n methods = ['GET']\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation,\n 'methods': {\n 'query': query\n }}\n\n\nclass MicrolocationDetail(ResourceDetail):\n \"\"\"\n Microlocation detail by id\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n\n if view_kwargs.get('session_id') is not None:\n sessions = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n if sessions.event_id is not None:\n view_kwargs['id'] = sessions.event_id\n else:\n view_kwargs['id'] = None\n\n decorators = (api.has_permission('is_coorganizer', methods=\"PATCH,DELETE\", fetch=\"event_id\", fetch_as=\"event_id\",\n model=Microlocation),)\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation,\n 'methods': {'before_get_object': before_get_object}}\n\n\nclass MicrolocationRelationshipRequired(ResourceRelationship):\n \"\"\"\n Microlocation Relationship for required entities\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', methods=\"PATCH\", fetch=\"event_id\", fetch_as=\"event_id\",\n model=Microlocation),)\n methods = ['GET', 'PATCH']\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation}\n\n\nclass MicrolocationRelationshipOptional(ResourceRelationship):\n \"\"\"\n Microlocation Relationship\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', methods=\"PATCH,DELETE\", fetch=\"event_id\", fetch_as=\"event_id\",\n model=Microlocation),)\n schema = MicrolocationSchema\n data_layer = {'session': db.session,\n 'model': Microlocation}\n", "path": "app/api/microlocations.py"}]} | 1,768 | 198 |
gh_patches_debug_8814 | rasdani/github-patches | git_diff | CTPUG__wafer-243 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tickets should be decoded on python 3
As seen from the recent quicket hook posts
TypeError at /tickets/quicket_hook/
the JSON object must be str, not 'bytes'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wafer/tickets/views.py`
Content:
```
1 import json
2 import logging
3
4 from django.conf import settings
5 from django.contrib.auth import get_user_model
6 from django.core.exceptions import PermissionDenied, ValidationError
7 from django.core.urlresolvers import reverse
8 from django.http import HttpResponse, Http404
9 from django.views.decorators.csrf import csrf_exempt
10 from django.views.decorators.http import require_POST
11 from django.views.generic.edit import FormView
12
13 from wafer.tickets.models import Ticket, TicketType
14 from wafer.tickets.forms import TicketForm
15
16 log = logging.getLogger(__name__)
17
18
19 class ClaimView(FormView):
20 template_name = 'wafer.tickets/claim.html'
21 form_class = TicketForm
22
23 def get_context_data(self, **kwargs):
24 context = super(ClaimView, self).get_context_data(**kwargs)
25 context['can_claim'] = self.can_claim()
26 return context
27
28 def can_claim(self):
29 if settings.WAFER_REGISTRATION_MODE != 'ticket':
30 raise Http404('Ticket-based registration is not in use')
31 if not settings.WAFER_REGISTRATION_OPEN:
32 return False
33 return not self.request.user.userprofile.is_registered()
34
35 def form_valid(self, form):
36 if not self.can_claim():
37 raise ValidationError('User may not claim a ticket')
38 ticket = Ticket.objects.get(barcode=form.cleaned_data['barcode'])
39 ticket.user = self.request.user
40 ticket.save()
41 return super(ClaimView, self).form_valid(form)
42
43 def get_success_url(self):
44 return reverse(
45 'wafer_user_profile', args=(self.request.user.username,))
46
47
48 @csrf_exempt
49 @require_POST
50 def quicket_hook(request):
51 '''
52 Quicket.co.za can POST something like this when tickets are bought:
53 {
54 "reference": "REF00123456",
55 "event_id": 123,
56 "event_name": "My Big Event",
57 "amount": 0.00,
58 "email": "[email protected]",
59 "action": "checkout_started",
60 // Options are "checkout_started","checkout_cancelled","eft_pending",
61 // "checkout_completed"
62 "tickets": [
63 {
64 "id": 122,
65 "attendee_name": "",
66 "attendee_email": "",
67 "ticket_type": "Free Ticket",
68 "price": 0.00,
69 "barcode": 12345,
70 },
71 ...
72 ],
73 }
74 '''
75 if request.GET.get('secret') != settings.WAFER_TICKETS_SECRET:
76 raise PermissionDenied('Incorrect secret')
77
78 payload = json.load(request)
79 for ticket in payload['tickets']:
80 import_ticket(ticket['barcode'], ticket['ticket_type'],
81 ticket['attendee_email'])
82
83 return HttpResponse("Noted\n", content_type='text/plain')
84
85
86 def import_ticket(ticket_barcode, ticket_type, email):
87 if Ticket.objects.filter(barcode=ticket_barcode).exists():
88 log.debug('Ticket already registered: %s', ticket_barcode)
89 return
90
91 # truncate long ticket type names to length allowed by database
92 ticket_type = ticket_type[:TicketType.MAX_NAME_LENGTH]
93 type_, created = TicketType.objects.get_or_create(name=ticket_type)
94
95 UserModel = get_user_model()
96
97 try:
98 user = UserModel.objects.get(email=email, ticket=None)
99 except UserModel.DoesNotExist:
100 user = None
101 except UserModel.MultipleObjectsReturned:
102 # We're can't uniquely identify the user to associate this ticket
103 # with, so leave it for them to figure out via the 'claim ticket'
104 # interface
105 user = None
106
107 ticket = Ticket.objects.create(
108 barcode=ticket_barcode,
109 email=email,
110 type=type_,
111 user=user,
112 )
113 ticket.save()
114
115 if user:
116 log.info('Ticket registered: %s and linked to user', ticket)
117 else:
118 log.info('Ticket registered: %s. Unclaimed', ticket)
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wafer/tickets/views.py b/wafer/tickets/views.py
--- a/wafer/tickets/views.py
+++ b/wafer/tickets/views.py
@@ -75,7 +75,8 @@
if request.GET.get('secret') != settings.WAFER_TICKETS_SECRET:
raise PermissionDenied('Incorrect secret')
- payload = json.load(request)
+ # This is required for python 3, and in theory fine on python 2
+ payload = json.loads(request.body.decode('utf8'))
for ticket in payload['tickets']:
import_ticket(ticket['barcode'], ticket['ticket_type'],
ticket['attendee_email'])
| {"golden_diff": "diff --git a/wafer/tickets/views.py b/wafer/tickets/views.py\n--- a/wafer/tickets/views.py\n+++ b/wafer/tickets/views.py\n@@ -75,7 +75,8 @@\n if request.GET.get('secret') != settings.WAFER_TICKETS_SECRET:\n raise PermissionDenied('Incorrect secret')\n \n- payload = json.load(request)\n+ # This is required for python 3, and in theory fine on python 2\n+ payload = json.loads(request.body.decode('utf8'))\n for ticket in payload['tickets']:\n import_ticket(ticket['barcode'], ticket['ticket_type'],\n ticket['attendee_email'])\n", "issue": "tickets should be decoded on python 3\nAs seen from the recent quicket hook posts\n\nTypeError at /tickets/quicket_hook/\nthe JSON object must be str, not 'bytes'\n\n", "before_files": [{"content": "import json\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import PermissionDenied, ValidationError\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, Http404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic.edit import FormView\n\nfrom wafer.tickets.models import Ticket, TicketType\nfrom wafer.tickets.forms import TicketForm\n\nlog = logging.getLogger(__name__)\n\n\nclass ClaimView(FormView):\n template_name = 'wafer.tickets/claim.html'\n form_class = TicketForm\n\n def get_context_data(self, **kwargs):\n context = super(ClaimView, self).get_context_data(**kwargs)\n context['can_claim'] = self.can_claim()\n return context\n\n def can_claim(self):\n if settings.WAFER_REGISTRATION_MODE != 'ticket':\n raise Http404('Ticket-based registration is not in use')\n if not settings.WAFER_REGISTRATION_OPEN:\n return False\n return not self.request.user.userprofile.is_registered()\n\n def form_valid(self, form):\n if not self.can_claim():\n raise ValidationError('User may not claim a ticket')\n ticket = Ticket.objects.get(barcode=form.cleaned_data['barcode'])\n ticket.user = self.request.user\n ticket.save()\n return super(ClaimView, self).form_valid(form)\n\n def get_success_url(self):\n return reverse(\n 'wafer_user_profile', args=(self.request.user.username,))\n\n\n@csrf_exempt\n@require_POST\ndef quicket_hook(request):\n '''\n Quicket.co.za can POST something like this when tickets are bought:\n {\n \"reference\": \"REF00123456\",\n \"event_id\": 123,\n \"event_name\": \"My Big Event\",\n \"amount\": 0.00,\n \"email\": \"[email protected]\",\n \"action\": \"checkout_started\",\n // Options are \"checkout_started\",\"checkout_cancelled\",\"eft_pending\",\n // \"checkout_completed\"\n \"tickets\": [\n {\n \"id\": 122,\n \"attendee_name\": \"\",\n \"attendee_email\": \"\",\n \"ticket_type\": \"Free Ticket\",\n \"price\": 0.00,\n \"barcode\": 12345,\n },\n ...\n ],\n }\n '''\n if request.GET.get('secret') != settings.WAFER_TICKETS_SECRET:\n raise PermissionDenied('Incorrect secret')\n\n payload = json.load(request)\n for ticket in payload['tickets']:\n import_ticket(ticket['barcode'], ticket['ticket_type'],\n ticket['attendee_email'])\n\n return HttpResponse(\"Noted\\n\", content_type='text/plain')\n\n\ndef import_ticket(ticket_barcode, ticket_type, email):\n if Ticket.objects.filter(barcode=ticket_barcode).exists():\n log.debug('Ticket already registered: %s', ticket_barcode)\n return\n\n # truncate long ticket type names to length allowed by database\n ticket_type = ticket_type[:TicketType.MAX_NAME_LENGTH]\n type_, created = TicketType.objects.get_or_create(name=ticket_type)\n\n UserModel = get_user_model()\n\n try:\n user = UserModel.objects.get(email=email, ticket=None)\n except UserModel.DoesNotExist:\n user = None\n except UserModel.MultipleObjectsReturned:\n # We're can't uniquely identify the user to associate this ticket\n # with, so leave it for them to figure out via the 'claim ticket'\n # interface\n user = None\n\n ticket = Ticket.objects.create(\n barcode=ticket_barcode,\n email=email,\n type=type_,\n user=user,\n )\n ticket.save()\n\n if user:\n log.info('Ticket registered: %s and linked to user', ticket)\n else:\n log.info('Ticket registered: %s. Unclaimed', ticket)\n", "path": "wafer/tickets/views.py"}], "after_files": [{"content": "import json\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import PermissionDenied, ValidationError\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, Http404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic.edit import FormView\n\nfrom wafer.tickets.models import Ticket, TicketType\nfrom wafer.tickets.forms import TicketForm\n\nlog = logging.getLogger(__name__)\n\n\nclass ClaimView(FormView):\n template_name = 'wafer.tickets/claim.html'\n form_class = TicketForm\n\n def get_context_data(self, **kwargs):\n context = super(ClaimView, self).get_context_data(**kwargs)\n context['can_claim'] = self.can_claim()\n return context\n\n def can_claim(self):\n if settings.WAFER_REGISTRATION_MODE != 'ticket':\n raise Http404('Ticket-based registration is not in use')\n if not settings.WAFER_REGISTRATION_OPEN:\n return False\n return not self.request.user.userprofile.is_registered()\n\n def form_valid(self, form):\n if not self.can_claim():\n raise ValidationError('User may not claim a ticket')\n ticket = Ticket.objects.get(barcode=form.cleaned_data['barcode'])\n ticket.user = self.request.user\n ticket.save()\n return super(ClaimView, self).form_valid(form)\n\n def get_success_url(self):\n return reverse(\n 'wafer_user_profile', args=(self.request.user.username,))\n\n\n@csrf_exempt\n@require_POST\ndef quicket_hook(request):\n '''\n Quicket.co.za can POST something like this when tickets are bought:\n {\n \"reference\": \"REF00123456\",\n \"event_id\": 123,\n \"event_name\": \"My Big Event\",\n \"amount\": 0.00,\n \"email\": \"[email protected]\",\n \"action\": \"checkout_started\",\n // Options are \"checkout_started\",\"checkout_cancelled\",\"eft_pending\",\n // \"checkout_completed\"\n \"tickets\": [\n {\n \"id\": 122,\n \"attendee_name\": \"\",\n \"attendee_email\": \"\",\n \"ticket_type\": \"Free Ticket\",\n \"price\": 0.00,\n \"barcode\": 12345,\n },\n ...\n ],\n }\n '''\n if request.GET.get('secret') != settings.WAFER_TICKETS_SECRET:\n raise PermissionDenied('Incorrect secret')\n\n # This is required for python 3, and in theory fine on python 2\n payload = json.loads(request.body.decode('utf8'))\n for ticket in payload['tickets']:\n import_ticket(ticket['barcode'], ticket['ticket_type'],\n ticket['attendee_email'])\n\n return HttpResponse(\"Noted\\n\", content_type='text/plain')\n\n\ndef import_ticket(ticket_barcode, ticket_type, email):\n if Ticket.objects.filter(barcode=ticket_barcode).exists():\n log.debug('Ticket already registered: %s', ticket_barcode)\n return\n\n # truncate long ticket type names to length allowed by database\n ticket_type = ticket_type[:TicketType.MAX_NAME_LENGTH]\n type_, created = TicketType.objects.get_or_create(name=ticket_type)\n\n UserModel = get_user_model()\n\n try:\n user = UserModel.objects.get(email=email, ticket=None)\n except UserModel.DoesNotExist:\n user = None\n except UserModel.MultipleObjectsReturned:\n # We're can't uniquely identify the user to associate this ticket\n # with, so leave it for them to figure out via the 'claim ticket'\n # interface\n user = None\n\n ticket = Ticket.objects.create(\n barcode=ticket_barcode,\n email=email,\n type=type_,\n user=user,\n )\n ticket.save()\n\n if user:\n log.info('Ticket registered: %s and linked to user', ticket)\n else:\n log.info('Ticket registered: %s. Unclaimed', ticket)\n", "path": "wafer/tickets/views.py"}]} | 1,390 | 146 |
gh_patches_debug_3467 | rasdani/github-patches | git_diff | getmoto__moto-1739 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[SES] Does not properly verify mailbox with display name
https://tools.ietf.org/html/rfc2822.html#section-3.4 defines two forms of valid mailbox:
* `[email protected]`
* `"Foo Bar" <[email protected]>`
SES supports both of these forms. Per https://github.com/spulec/moto/blob/master/moto/ses/models.py#L55, only the first form is supported by moto.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `moto/ses/models.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import email
4 from email.utils import parseaddr
5
6 from moto.core import BaseBackend, BaseModel
7 from .exceptions import MessageRejectedError
8 from .utils import get_random_message_id
9
10
11 RECIPIENT_LIMIT = 50
12
13
14 class Message(BaseModel):
15
16 def __init__(self, message_id, source, subject, body, destinations):
17 self.id = message_id
18 self.source = source
19 self.subject = subject
20 self.body = body
21 self.destinations = destinations
22
23
24 class RawMessage(BaseModel):
25
26 def __init__(self, message_id, source, destinations, raw_data):
27 self.id = message_id
28 self.source = source
29 self.destinations = destinations
30 self.raw_data = raw_data
31
32
33 class SESQuota(BaseModel):
34
35 def __init__(self, sent):
36 self.sent = sent
37
38 @property
39 def sent_past_24(self):
40 return self.sent
41
42
43 class SESBackend(BaseBackend):
44
45 def __init__(self):
46 self.addresses = []
47 self.email_addresses = []
48 self.domains = []
49 self.sent_messages = []
50 self.sent_message_count = 0
51
52 def _is_verified_address(self, address):
53 if address in self.addresses:
54 return True
55 user, host = address.split('@', 1)
56 return host in self.domains
57
58 def verify_email_identity(self, address):
59 self.addresses.append(address)
60
61 def verify_email_address(self, address):
62 self.email_addresses.append(address)
63
64 def verify_domain(self, domain):
65 self.domains.append(domain)
66
67 def list_identities(self):
68 return self.domains + self.addresses
69
70 def list_verified_email_addresses(self):
71 return self.email_addresses
72
73 def delete_identity(self, identity):
74 if '@' in identity:
75 self.addresses.remove(identity)
76 else:
77 self.domains.remove(identity)
78
79 def send_email(self, source, subject, body, destinations):
80 recipient_count = sum(map(len, destinations.values()))
81 if recipient_count > RECIPIENT_LIMIT:
82 raise MessageRejectedError('Too many recipients.')
83 if not self._is_verified_address(source):
84 raise MessageRejectedError(
85 "Email address not verified %s" % source
86 )
87
88 message_id = get_random_message_id()
89 message = Message(message_id, source, subject, body, destinations)
90 self.sent_messages.append(message)
91 self.sent_message_count += recipient_count
92 return message
93
94 def send_raw_email(self, source, destinations, raw_data):
95 if source is not None:
96 _, source_email_address = parseaddr(source)
97 if source_email_address not in self.addresses:
98 raise MessageRejectedError(
99 "Did not have authority to send from email %s" % source_email_address
100 )
101
102 recipient_count = len(destinations)
103 message = email.message_from_string(raw_data)
104 if source is None:
105 if message['from'] is None:
106 raise MessageRejectedError(
107 "Source not specified"
108 )
109
110 _, source_email_address = parseaddr(message['from'])
111 if source_email_address not in self.addresses:
112 raise MessageRejectedError(
113 "Did not have authority to send from email %s" % source_email_address
114 )
115
116 for header in 'TO', 'CC', 'BCC':
117 recipient_count += sum(
118 d.strip() and 1 or 0
119 for d in message.get(header, '').split(',')
120 )
121 if recipient_count > RECIPIENT_LIMIT:
122 raise MessageRejectedError('Too many recipients.')
123
124 self.sent_message_count += recipient_count
125 message_id = get_random_message_id()
126 message = RawMessage(message_id, source, destinations, raw_data)
127 self.sent_messages.append(message)
128 return message
129
130 def get_send_quota(self):
131 return SESQuota(self.sent_message_count)
132
133
134 ses_backend = SESBackend()
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/moto/ses/models.py b/moto/ses/models.py
--- a/moto/ses/models.py
+++ b/moto/ses/models.py
@@ -49,7 +49,8 @@
self.sent_messages = []
self.sent_message_count = 0
- def _is_verified_address(self, address):
+ def _is_verified_address(self, source):
+ _, address = parseaddr(source)
if address in self.addresses:
return True
user, host = address.split('@', 1)
| {"golden_diff": "diff --git a/moto/ses/models.py b/moto/ses/models.py\n--- a/moto/ses/models.py\n+++ b/moto/ses/models.py\n@@ -49,7 +49,8 @@\n self.sent_messages = []\n self.sent_message_count = 0\n \n- def _is_verified_address(self, address):\n+ def _is_verified_address(self, source):\n+ _, address = parseaddr(source)\n if address in self.addresses:\n return True\n user, host = address.split('@', 1)\n", "issue": "[SES] Does not properly verify mailbox with display name\nhttps://tools.ietf.org/html/rfc2822.html#section-3.4 defines two forms of valid mailbox:\r\n\r\n* `[email protected]`\r\n* `\"Foo Bar\" <[email protected]>`\r\n\r\nSES supports both of these forms. Per https://github.com/spulec/moto/blob/master/moto/ses/models.py#L55, only the first form is supported by moto.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport email\nfrom email.utils import parseaddr\n\nfrom moto.core import BaseBackend, BaseModel\nfrom .exceptions import MessageRejectedError\nfrom .utils import get_random_message_id\n\n\nRECIPIENT_LIMIT = 50\n\n\nclass Message(BaseModel):\n\n def __init__(self, message_id, source, subject, body, destinations):\n self.id = message_id\n self.source = source\n self.subject = subject\n self.body = body\n self.destinations = destinations\n\n\nclass RawMessage(BaseModel):\n\n def __init__(self, message_id, source, destinations, raw_data):\n self.id = message_id\n self.source = source\n self.destinations = destinations\n self.raw_data = raw_data\n\n\nclass SESQuota(BaseModel):\n\n def __init__(self, sent):\n self.sent = sent\n\n @property\n def sent_past_24(self):\n return self.sent\n\n\nclass SESBackend(BaseBackend):\n\n def __init__(self):\n self.addresses = []\n self.email_addresses = []\n self.domains = []\n self.sent_messages = []\n self.sent_message_count = 0\n\n def _is_verified_address(self, address):\n if address in self.addresses:\n return True\n user, host = address.split('@', 1)\n return host in self.domains\n\n def verify_email_identity(self, address):\n self.addresses.append(address)\n\n def verify_email_address(self, address):\n self.email_addresses.append(address)\n\n def verify_domain(self, domain):\n self.domains.append(domain)\n\n def list_identities(self):\n return self.domains + self.addresses\n\n def list_verified_email_addresses(self):\n return self.email_addresses\n\n def delete_identity(self, identity):\n if '@' in identity:\n self.addresses.remove(identity)\n else:\n self.domains.remove(identity)\n\n def send_email(self, source, subject, body, destinations):\n recipient_count = sum(map(len, destinations.values()))\n if recipient_count > RECIPIENT_LIMIT:\n raise MessageRejectedError('Too many recipients.')\n if not self._is_verified_address(source):\n raise MessageRejectedError(\n \"Email address not verified %s\" % source\n )\n\n message_id = get_random_message_id()\n message = Message(message_id, source, subject, body, destinations)\n self.sent_messages.append(message)\n self.sent_message_count += recipient_count\n return message\n\n def send_raw_email(self, source, destinations, raw_data):\n if source is not None:\n _, source_email_address = parseaddr(source)\n if source_email_address not in self.addresses:\n raise MessageRejectedError(\n \"Did not have authority to send from email %s\" % source_email_address\n )\n\n recipient_count = len(destinations)\n message = email.message_from_string(raw_data)\n if source is None:\n if message['from'] is None:\n raise MessageRejectedError(\n \"Source not specified\"\n )\n\n _, source_email_address = parseaddr(message['from'])\n if source_email_address not in self.addresses:\n raise MessageRejectedError(\n \"Did not have authority to send from email %s\" % source_email_address\n )\n\n for header in 'TO', 'CC', 'BCC':\n recipient_count += sum(\n d.strip() and 1 or 0\n for d in message.get(header, '').split(',')\n )\n if recipient_count > RECIPIENT_LIMIT:\n raise MessageRejectedError('Too many recipients.')\n\n self.sent_message_count += recipient_count\n message_id = get_random_message_id()\n message = RawMessage(message_id, source, destinations, raw_data)\n self.sent_messages.append(message)\n return message\n\n def get_send_quota(self):\n return SESQuota(self.sent_message_count)\n\n\nses_backend = SESBackend()\n", "path": "moto/ses/models.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport email\nfrom email.utils import parseaddr\n\nfrom moto.core import BaseBackend, BaseModel\nfrom .exceptions import MessageRejectedError\nfrom .utils import get_random_message_id\n\n\nRECIPIENT_LIMIT = 50\n\n\nclass Message(BaseModel):\n\n def __init__(self, message_id, source, subject, body, destinations):\n self.id = message_id\n self.source = source\n self.subject = subject\n self.body = body\n self.destinations = destinations\n\n\nclass RawMessage(BaseModel):\n\n def __init__(self, message_id, source, destinations, raw_data):\n self.id = message_id\n self.source = source\n self.destinations = destinations\n self.raw_data = raw_data\n\n\nclass SESQuota(BaseModel):\n\n def __init__(self, sent):\n self.sent = sent\n\n @property\n def sent_past_24(self):\n return self.sent\n\n\nclass SESBackend(BaseBackend):\n\n def __init__(self):\n self.addresses = []\n self.email_addresses = []\n self.domains = []\n self.sent_messages = []\n self.sent_message_count = 0\n\n def _is_verified_address(self, source):\n _, address = parseaddr(source)\n if address in self.addresses:\n return True\n user, host = address.split('@', 1)\n return host in self.domains\n\n def verify_email_identity(self, address):\n self.addresses.append(address)\n\n def verify_email_address(self, address):\n self.email_addresses.append(address)\n\n def verify_domain(self, domain):\n self.domains.append(domain)\n\n def list_identities(self):\n return self.domains + self.addresses\n\n def list_verified_email_addresses(self):\n return self.email_addresses\n\n def delete_identity(self, identity):\n if '@' in identity:\n self.addresses.remove(identity)\n else:\n self.domains.remove(identity)\n\n def send_email(self, source, subject, body, destinations):\n recipient_count = sum(map(len, destinations.values()))\n if recipient_count > RECIPIENT_LIMIT:\n raise MessageRejectedError('Too many recipients.')\n if not self._is_verified_address(source):\n raise MessageRejectedError(\n \"Email address not verified %s\" % source\n )\n\n message_id = get_random_message_id()\n message = Message(message_id, source, subject, body, destinations)\n self.sent_messages.append(message)\n self.sent_message_count += recipient_count\n return message\n\n def send_raw_email(self, source, destinations, raw_data):\n if source is not None:\n _, source_email_address = parseaddr(source)\n if source_email_address not in self.addresses:\n raise MessageRejectedError(\n \"Did not have authority to send from email %s\" % source_email_address\n )\n\n recipient_count = len(destinations)\n message = email.message_from_string(raw_data)\n if source is None:\n if message['from'] is None:\n raise MessageRejectedError(\n \"Source not specified\"\n )\n\n _, source_email_address = parseaddr(message['from'])\n if source_email_address not in self.addresses:\n raise MessageRejectedError(\n \"Did not have authority to send from email %s\" % source_email_address\n )\n\n for header in 'TO', 'CC', 'BCC':\n recipient_count += sum(\n d.strip() and 1 or 0\n for d in message.get(header, '').split(',')\n )\n if recipient_count > RECIPIENT_LIMIT:\n raise MessageRejectedError('Too many recipients.')\n\n self.sent_message_count += recipient_count\n message_id = get_random_message_id()\n message = RawMessage(message_id, source, destinations, raw_data)\n self.sent_messages.append(message)\n return message\n\n def get_send_quota(self):\n return SESQuota(self.sent_message_count)\n\n\nses_backend = SESBackend()\n", "path": "moto/ses/models.py"}]} | 1,499 | 118 |
gh_patches_debug_31477 | rasdani/github-patches | git_diff | talonhub__community-179 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Windows switching could use fuzzier matching
Right now when you want to switch window focus you have to say a word the matches a generated list of running applications, and this list has an override that lets you more easily say the names of complex application names, for instance `term` is `iTerm2` or whatever.
I propose we change the way the list works so that actually we just use `focus <user.text>` for the command, and then `switcher_focus()` does the lookup for whatever text it gets inside the running application list. Doing it this way allows us to leverage any additional vocabulary that's been added to `vocabulary.py` without necessarily having to hardcode it again inside of `switcher.py`.
in addition I propose that we introduce slightly fuzzer matching. IFor example, my `vocabulary.py` as a rule for "key pass" which becomes "keepass", and this is helpful when I am just talking to other people in chat or whatever. But the actual name of the running process is `KeePassXC`. I don't necessarily want to always be typing that exact word into chat with other people when I say `key pass` though. I suspect this type of thing will be increasingly common the more people are using it with different applications
As it stands if I wanted to match the app with current switcher I'd have to put "key pass":"KeePassXC" override in `switcher.py`, instead of just matching the result I already have in `vocabulary.py`.
In addition to using `vocabulary.py`, I propose we check for a partial match using `startswith()`, so then I would be able to say something like "focus key pass" and it would actually come through as `focus keepass`, which would in turn match against `keepassxc`.
We can of course keep the overrides for certain things that are a bit harder to say in `switcher.py`, for instance I think people aren't going to have `term` becoming `iTerm2` in `vocabulary.py`.
This can mostly all be done with a simple `switcher_focus()` change like:
```
running = ctx.lists["self.running"]
wanted_app = None
for running_name in running.keys():
if running_name == name or running_name.lower().startswith(name):
wanted_app = running[running_name]
break
if wanted_app is None:
return
for app in ui.apps():
if app.name == wanted_app and not app.background:
#os.system("i3-msg '[class=\"(?)%s\"] focus'" % app.name)
app.focus()
break
```
Not sending a PR for now as I figured I'd run it past others first.
One possible change could be that it does two passes, the first pass matches on explicit name matches so you don't accidentally match against something that has a more explicit match, and then if there are no explicit matches, it tries a second pass with fuzzy matching.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `code/switcher.py`
Content:
```
1 from talon import app, Module, Context, actions, ui, imgui
2 from talon.voice import Capture
3 import re
4 import time
5 import os
6
7 # Construct at startup a list of overides for application names (similar to how homophone list is managed)
8 # ie for a given talon recognition word set `one note`, recognized this in these switcher functions as `ONENOTE`
9 # the list is a comma seperated `<Recognized Words>, <Overide>`
10 # TODO: Consider put list csv's (homophones.csv, app_name_overrides.csv) files together in a seperate directory,`knausj_talon/lists`
11 cwd = os.path.dirname(os.path.realpath(__file__))
12 overrides_file = os.path.join(
13 cwd, "app_names", f"app_name_overrides.{app.platform}.csv"
14 )
15 overrides = {}
16 with open(overrides_file, "r") as f:
17 for line in f:
18 line = line.rstrip()
19 line = line.split(",")
20 overrides[line[0].lower()] = line[1].strip()
21
22 print(f"knausj_talon.switcher------------ app name overrides:{overrides}")
23
24 app_cache = {}
25
26
27 mod = Module()
28 mod.list("running", desc="all running applications")
29 mod.list("launch", desc="all launchable applications")
30
31
32 @mod.capture
33 def running_applications(m) -> str:
34 "Returns a single application name"
35
36
37 @mod.capture
38 def launch_applications(m) -> Capture:
39 "Returns a single application name"
40
41
42 ctx = Context()
43
44
45 @ctx.capture(rule="{self.running}")
46 def running_applications(m):
47 return m.running
48
49
50 @ctx.capture(rule="{self.launch}")
51 def launch_applications(m):
52 return m.launch
53
54
55 def split_camel(word):
56 return re.findall(r"[0-9A-Z]*[a-z]+(?=[A-Z]|$)", word)
57
58
59 def get_words(name):
60 words = re.findall(r"[0-9A-Za-z]+", name)
61 out = []
62 for word in words:
63 out += split_camel(word)
64 return out
65
66
67 @mod.action_class
68 class Actions:
69 def switcher_focus(name: str):
70 """Focus a new application by name"""
71 for app in ui.apps():
72 # print(f"--------- app.name:{app.name} app.bundler:{app.bundle}")
73 if name in app.name and not app.background:
74 app.focus()
75 break
76
77 def switcher_launch(path: str):
78 """Launch a new application by path"""
79 ui.launch(path=path)
80
81 def switcher_list_running():
82 """Lists all running applications"""
83 gui.show()
84
85 def switcher_hide_running():
86 """Hides list of running applications"""
87 gui.hide()
88
89
90 @imgui.open(software=False)
91 def gui(gui: imgui.GUI):
92 gui.text("Names of running applications")
93 gui.line()
94 for line in ctx.lists["self.running"]:
95 gui.text(line)
96
97
98 def update_lists():
99 running = {}
100 launch = {}
101
102 for cur_app in ui.apps(background=False):
103 name = cur_app.name
104 if name.endswith(".exe"):
105 name = name.rsplit(".", 1)[0]
106 words = get_words(name)
107 for word in words:
108 if word and not word in running:
109 running[word.lower()] = cur_app.name
110 running[name.lower()] = cur_app.name
111 for override in overrides:
112 running[override] = overrides[override]
113
114 if app.platform == "mac":
115 for base in "/Applications", "/Applications/Utilities":
116 for name in os.listdir(base):
117 path = os.path.join(base, name)
118 name = name.rsplit(".", 1)[0].lower()
119 launch[name] = path
120 words = name.split(" ")
121 for word in words:
122 if word and word not in launch:
123 if len(name) > 6 and len(word) < 3:
124 continue
125 launch[word] = path
126
127 lists = {
128 "self.running": running,
129 "self.launch": launch,
130 }
131
132 # batch update lists
133 ctx.lists.update(lists)
134
135
136 def ui_event(event, arg):
137 if event in ("app_activate", "app_launch", "app_close", "win_open", "win_close"):
138 # print(f'------------------ event:{event} arg:{arg}')
139 update_lists()
140
141
142 ui.register("", ui_event)
143 update_lists()
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/code/switcher.py b/code/switcher.py
--- a/code/switcher.py
+++ b/code/switcher.py
@@ -1,8 +1,9 @@
-from talon import app, Module, Context, actions, ui, imgui
-from talon.voice import Capture
+import os
import re
import time
-import os
+
+from talon import Context, Module, app, imgui, ui
+from talon.voice import Capture
# Construct at startup a list of overides for application names (similar to how homophone list is managed)
# ie for a given talon recognition word set `one note`, recognized this in these switcher functions as `ONENOTE`
@@ -68,10 +69,18 @@
class Actions:
def switcher_focus(name: str):
"""Focus a new application by name"""
- for app in ui.apps():
- # print(f"--------- app.name:{app.name} app.bundler:{app.bundle}")
- if name in app.name and not app.background:
- app.focus()
+ running = ctx.lists["self.running"]
+ wanted_app = None
+ for running_name in running.keys():
+ if running_name == name or running_name.lower().startswith(name):
+ wanted_app = running[running_name]
+ break
+ if wanted_app is None:
+ return
+
+ for cur_app in ui.apps():
+ if cur_app.name == wanted_app and not cur_app.background:
+ cur_app.focus()
break
def switcher_launch(path: str):
@@ -105,7 +114,7 @@
name = name.rsplit(".", 1)[0]
words = get_words(name)
for word in words:
- if word and not word in running:
+ if word and word not in running:
running[word.lower()] = cur_app.name
running[name.lower()] = cur_app.name
for override in overrides:
| {"golden_diff": "diff --git a/code/switcher.py b/code/switcher.py\n--- a/code/switcher.py\n+++ b/code/switcher.py\n@@ -1,8 +1,9 @@\n-from talon import app, Module, Context, actions, ui, imgui\n-from talon.voice import Capture\n+import os\n import re\n import time\n-import os\n+\n+from talon import Context, Module, app, imgui, ui\n+from talon.voice import Capture\n \n # Construct at startup a list of overides for application names (similar to how homophone list is managed)\n # ie for a given talon recognition word set `one note`, recognized this in these switcher functions as `ONENOTE`\n@@ -68,10 +69,18 @@\n class Actions:\n def switcher_focus(name: str):\n \"\"\"Focus a new application by name\"\"\"\n- for app in ui.apps():\n- # print(f\"--------- app.name:{app.name} app.bundler:{app.bundle}\")\n- if name in app.name and not app.background:\n- app.focus()\n+ running = ctx.lists[\"self.running\"]\n+ wanted_app = None\n+ for running_name in running.keys():\n+ if running_name == name or running_name.lower().startswith(name):\n+ wanted_app = running[running_name]\n+ break\n+ if wanted_app is None:\n+ return\n+\n+ for cur_app in ui.apps():\n+ if cur_app.name == wanted_app and not cur_app.background:\n+ cur_app.focus()\n break\n \n def switcher_launch(path: str):\n@@ -105,7 +114,7 @@\n name = name.rsplit(\".\", 1)[0]\n words = get_words(name)\n for word in words:\n- if word and not word in running:\n+ if word and word not in running:\n running[word.lower()] = cur_app.name\n running[name.lower()] = cur_app.name\n for override in overrides:\n", "issue": "Windows switching could use fuzzier matching\nRight now when you want to switch window focus you have to say a word the matches a generated list of running applications, and this list has an override that lets you more easily say the names of complex application names, for instance `term` is `iTerm2` or whatever.\r\n\r\nI propose we change the way the list works so that actually we just use `focus <user.text>` for the command, and then `switcher_focus()` does the lookup for whatever text it gets inside the running application list. Doing it this way allows us to leverage any additional vocabulary that's been added to `vocabulary.py` without necessarily having to hardcode it again inside of `switcher.py`. \r\n\r\nin addition I propose that we introduce slightly fuzzer matching. IFor example, my `vocabulary.py` as a rule for \"key pass\" which becomes \"keepass\", and this is helpful when I am just talking to other people in chat or whatever. But the actual name of the running process is `KeePassXC`. I don't necessarily want to always be typing that exact word into chat with other people when I say `key pass` though. I suspect this type of thing will be increasingly common the more people are using it with different applications \r\n\r\nAs it stands if I wanted to match the app with current switcher I'd have to put \"key pass\":\"KeePassXC\" override in `switcher.py`, instead of just matching the result I already have in `vocabulary.py`. \r\n\r\nIn addition to using `vocabulary.py`, I propose we check for a partial match using `startswith()`, so then I would be able to say something like \"focus key pass\" and it would actually come through as `focus keepass`, which would in turn match against `keepassxc`. \r\n\r\nWe can of course keep the overrides for certain things that are a bit harder to say in `switcher.py`, for instance I think people aren't going to have `term` becoming `iTerm2` in `vocabulary.py`.\r\n\r\nThis can mostly all be done with a simple `switcher_focus()` change like:\r\n\r\n```\r\n running = ctx.lists[\"self.running\"]\r\n wanted_app = None\r\n for running_name in running.keys():\r\n if running_name == name or running_name.lower().startswith(name):\r\n wanted_app = running[running_name]\r\n break\r\n if wanted_app is None:\r\n return\r\n\r\n for app in ui.apps():\r\n if app.name == wanted_app and not app.background:\r\n #os.system(\"i3-msg '[class=\\\"(?)%s\\\"] focus'\" % app.name)\r\n app.focus()\r\n break\r\n```\r\n\r\nNot sending a PR for now as I figured I'd run it past others first.\r\n\r\nOne possible change could be that it does two passes, the first pass matches on explicit name matches so you don't accidentally match against something that has a more explicit match, and then if there are no explicit matches, it tries a second pass with fuzzy matching.\n", "before_files": [{"content": "from talon import app, Module, Context, actions, ui, imgui\nfrom talon.voice import Capture\nimport re\nimport time\nimport os\n\n# Construct at startup a list of overides for application names (similar to how homophone list is managed)\n# ie for a given talon recognition word set `one note`, recognized this in these switcher functions as `ONENOTE`\n# the list is a comma seperated `<Recognized Words>, <Overide>`\n# TODO: Consider put list csv's (homophones.csv, app_name_overrides.csv) files together in a seperate directory,`knausj_talon/lists`\ncwd = os.path.dirname(os.path.realpath(__file__))\noverrides_file = os.path.join(\n cwd, \"app_names\", f\"app_name_overrides.{app.platform}.csv\"\n)\noverrides = {}\nwith open(overrides_file, \"r\") as f:\n for line in f:\n line = line.rstrip()\n line = line.split(\",\")\n overrides[line[0].lower()] = line[1].strip()\n\nprint(f\"knausj_talon.switcher------------ app name overrides:{overrides}\")\n\napp_cache = {}\n\n\nmod = Module()\nmod.list(\"running\", desc=\"all running applications\")\nmod.list(\"launch\", desc=\"all launchable applications\")\n\n\[email protected]\ndef running_applications(m) -> str:\n \"Returns a single application name\"\n\n\[email protected]\ndef launch_applications(m) -> Capture:\n \"Returns a single application name\"\n\n\nctx = Context()\n\n\[email protected](rule=\"{self.running}\")\ndef running_applications(m):\n return m.running\n\n\[email protected](rule=\"{self.launch}\")\ndef launch_applications(m):\n return m.launch\n\n\ndef split_camel(word):\n return re.findall(r\"[0-9A-Z]*[a-z]+(?=[A-Z]|$)\", word)\n\n\ndef get_words(name):\n words = re.findall(r\"[0-9A-Za-z]+\", name)\n out = []\n for word in words:\n out += split_camel(word)\n return out\n\n\[email protected]_class\nclass Actions:\n def switcher_focus(name: str):\n \"\"\"Focus a new application by name\"\"\"\n for app in ui.apps():\n # print(f\"--------- app.name:{app.name} app.bundler:{app.bundle}\")\n if name in app.name and not app.background:\n app.focus()\n break\n\n def switcher_launch(path: str):\n \"\"\"Launch a new application by path\"\"\"\n ui.launch(path=path)\n\n def switcher_list_running():\n \"\"\"Lists all running applications\"\"\"\n gui.show()\n\n def switcher_hide_running():\n \"\"\"Hides list of running applications\"\"\"\n gui.hide()\n\n\[email protected](software=False)\ndef gui(gui: imgui.GUI):\n gui.text(\"Names of running applications\")\n gui.line()\n for line in ctx.lists[\"self.running\"]:\n gui.text(line)\n\n\ndef update_lists():\n running = {}\n launch = {}\n\n for cur_app in ui.apps(background=False):\n name = cur_app.name\n if name.endswith(\".exe\"):\n name = name.rsplit(\".\", 1)[0]\n words = get_words(name)\n for word in words:\n if word and not word in running:\n running[word.lower()] = cur_app.name\n running[name.lower()] = cur_app.name\n for override in overrides:\n running[override] = overrides[override]\n\n if app.platform == \"mac\":\n for base in \"/Applications\", \"/Applications/Utilities\":\n for name in os.listdir(base):\n path = os.path.join(base, name)\n name = name.rsplit(\".\", 1)[0].lower()\n launch[name] = path\n words = name.split(\" \")\n for word in words:\n if word and word not in launch:\n if len(name) > 6 and len(word) < 3:\n continue\n launch[word] = path\n\n lists = {\n \"self.running\": running,\n \"self.launch\": launch,\n }\n\n # batch update lists\n ctx.lists.update(lists)\n\n\ndef ui_event(event, arg):\n if event in (\"app_activate\", \"app_launch\", \"app_close\", \"win_open\", \"win_close\"):\n # print(f'------------------ event:{event} arg:{arg}')\n update_lists()\n\n\nui.register(\"\", ui_event)\nupdate_lists()\n", "path": "code/switcher.py"}], "after_files": [{"content": "import os\nimport re\nimport time\n\nfrom talon import Context, Module, app, imgui, ui\nfrom talon.voice import Capture\n\n# Construct at startup a list of overides for application names (similar to how homophone list is managed)\n# ie for a given talon recognition word set `one note`, recognized this in these switcher functions as `ONENOTE`\n# the list is a comma seperated `<Recognized Words>, <Overide>`\n# TODO: Consider put list csv's (homophones.csv, app_name_overrides.csv) files together in a seperate directory,`knausj_talon/lists`\ncwd = os.path.dirname(os.path.realpath(__file__))\noverrides_file = os.path.join(\n cwd, \"app_names\", f\"app_name_overrides.{app.platform}.csv\"\n)\noverrides = {}\nwith open(overrides_file, \"r\") as f:\n for line in f:\n line = line.rstrip()\n line = line.split(\",\")\n overrides[line[0].lower()] = line[1].strip()\n\nprint(f\"knausj_talon.switcher------------ app name overrides:{overrides}\")\n\napp_cache = {}\n\n\nmod = Module()\nmod.list(\"running\", desc=\"all running applications\")\nmod.list(\"launch\", desc=\"all launchable applications\")\n\n\[email protected]\ndef running_applications(m) -> str:\n \"Returns a single application name\"\n\n\[email protected]\ndef launch_applications(m) -> Capture:\n \"Returns a single application name\"\n\n\nctx = Context()\n\n\[email protected](rule=\"{self.running}\")\ndef running_applications(m):\n return m.running\n\n\[email protected](rule=\"{self.launch}\")\ndef launch_applications(m):\n return m.launch\n\n\ndef split_camel(word):\n return re.findall(r\"[0-9A-Z]*[a-z]+(?=[A-Z]|$)\", word)\n\n\ndef get_words(name):\n words = re.findall(r\"[0-9A-Za-z]+\", name)\n out = []\n for word in words:\n out += split_camel(word)\n return out\n\n\[email protected]_class\nclass Actions:\n def switcher_focus(name: str):\n \"\"\"Focus a new application by name\"\"\"\n running = ctx.lists[\"self.running\"]\n wanted_app = None\n for running_name in running.keys():\n if running_name == name or running_name.lower().startswith(name):\n wanted_app = running[running_name]\n break\n if wanted_app is None:\n return\n\n for cur_app in ui.apps():\n if cur_app.name == wanted_app and not cur_app.background:\n cur_app.focus()\n break\n\n def switcher_launch(path: str):\n \"\"\"Launch a new application by path\"\"\"\n ui.launch(path=path)\n\n def switcher_list_running():\n \"\"\"Lists all running applications\"\"\"\n gui.show()\n\n def switcher_hide_running():\n \"\"\"Hides list of running applications\"\"\"\n gui.hide()\n\n\[email protected](software=False)\ndef gui(gui: imgui.GUI):\n gui.text(\"Names of running applications\")\n gui.line()\n for line in ctx.lists[\"self.running\"]:\n gui.text(line)\n\n\ndef update_lists():\n running = {}\n launch = {}\n\n for cur_app in ui.apps(background=False):\n name = cur_app.name\n if name.endswith(\".exe\"):\n name = name.rsplit(\".\", 1)[0]\n words = get_words(name)\n for word in words:\n if word and word not in running:\n running[word.lower()] = cur_app.name\n running[name.lower()] = cur_app.name\n for override in overrides:\n running[override] = overrides[override]\n\n if app.platform == \"mac\":\n for base in \"/Applications\", \"/Applications/Utilities\":\n for name in os.listdir(base):\n path = os.path.join(base, name)\n name = name.rsplit(\".\", 1)[0].lower()\n launch[name] = path\n words = name.split(\" \")\n for word in words:\n if word and word not in launch:\n if len(name) > 6 and len(word) < 3:\n continue\n launch[word] = path\n\n lists = {\n \"self.running\": running,\n \"self.launch\": launch,\n }\n\n # batch update lists\n ctx.lists.update(lists)\n\n\ndef ui_event(event, arg):\n if event in (\"app_activate\", \"app_launch\", \"app_close\", \"win_open\", \"win_close\"):\n # print(f'------------------ event:{event} arg:{arg}')\n update_lists()\n\n\nui.register(\"\", ui_event)\nupdate_lists()\n", "path": "code/switcher.py"}]} | 2,152 | 431 |
gh_patches_debug_22587 | rasdani/github-patches | git_diff | buildbot__buildbot-3623 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Running with --nodaemon and PID files
This ticket is a migrated Trac ticket [3608](http://trac.buildbot.net/ticket/3608)
People contributed to the original ticket: @unknown_contributor
Ticket created on: `Sep 12 2016`
Ticket last modified on: `Sep 12 2016`
---
Both the master and the worker still use PID files while running with --nodaemon. I've had issues with workers running with --nodaemon not starting because of an old, stale PID file, corresponding to an actual process belonging to another user.
This can be thought to be inconsistent, and harms the writing ofreally simple non forking configuration files to manage the process through an external service manager (systemd, supervisord etc.).
It seems that the cousin ``twistd`` executable has a --pidfile argument, that can be used to mean "no pid file". This is explained notably in https://twistedmatrix.com/documents/current/core/howto/systemd.html.
We could expose that, or we could decide that --nodaemon implies no pid file.
I believe this is true in Eight and Nine, and that a fix could be easily backported
(marking this as minor, because it's easy to circumvent by putting appropriate rms in unit files or other confs)
---
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `worker/buildbot_worker/scripts/start.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from __future__ import absolute_import
17 from __future__ import division
18 from __future__ import print_function
19
20 import os
21 import sys
22 import time
23
24 from buildbot_worker.scripts import base
25 from buildbot_worker.util import rewrap
26
27
28 class Follower(object):
29
30 def follow(self):
31 from twisted.internet import reactor
32 from buildbot_worker.scripts.logwatcher import LogWatcher
33 self.rc = 0
34 print("Following twistd.log until startup finished..")
35 lw = LogWatcher("twistd.log")
36 d = lw.start()
37 d.addCallbacks(self._success, self._failure)
38 reactor.run()
39 return self.rc
40
41 def _success(self, processtype):
42 from twisted.internet import reactor
43 print("The %s appears to have (re)started correctly." % processtype)
44 self.rc = 0
45 reactor.stop()
46
47 def _failure(self, why):
48 from twisted.internet import reactor
49 from buildbot_worker.scripts.logwatcher import WorkerTimeoutError
50 if why.check(WorkerTimeoutError):
51 print(rewrap("""\
52 The worker took more than 10 seconds to start and/or connect
53 to the buildmaster, so we were unable to confirm that it
54 started and connected correctly.
55 Please 'tail twistd.log' and look for a line that says
56 'message from master: attached' to verify correct startup.
57 If you see a bunch of messages like 'will retry in 6 seconds',
58 your worker might not have the correct hostname or portnumber
59 for the buildmaster, or the buildmaster might not be running.
60 If you see messages like
61 'Failure: twisted.cred.error.UnauthorizedLogin'
62 then your worker might be using the wrong botname or password.
63 Please correct these problems and then restart the worker.
64 """))
65 else:
66 print(rewrap("""\
67 Unable to confirm that the worker started correctly.
68 You may need to stop it, fix the config file, and restart.
69 """))
70 print(why)
71 self.rc = 1
72 reactor.stop()
73
74
75 def startCommand(config):
76 basedir = config['basedir']
77 if not base.isWorkerDir(basedir):
78 return 1
79
80 return startWorker(basedir, config['quiet'], config['nodaemon'])
81
82
83 def startWorker(basedir, quiet, nodaemon):
84 """
85 Start worker process.
86
87 Fork and start twisted application described in basedir buildbot.tac file.
88 Print it's log messages to stdout for a while and try to figure out if
89 start was successful.
90
91 If quiet or nodaemon parameters are True, or we are running on a win32
92 system, will not fork and log will not be printed to stdout.
93
94 @param basedir: worker's basedir path
95 @param quiet: don't display startup log messages
96 @param nodaemon: don't daemonize (stay in foreground)
97 @return: 0 if worker was successfully started,
98 1 if we are not sure that worker started successfully
99 """
100
101 os.chdir(basedir)
102 if quiet or nodaemon:
103 return launch(nodaemon)
104
105 # we probably can't do this os.fork under windows
106 from twisted.python.runtime import platformType
107 if platformType == "win32":
108 return launch(nodaemon)
109
110 # fork a child to launch the daemon, while the parent process tails the
111 # logfile
112 if os.fork():
113 # this is the parent
114 rc = Follower().follow()
115 return rc
116 # this is the child: give the logfile-watching parent a chance to start
117 # watching it before we start the daemon
118 time.sleep(0.2)
119 launch(nodaemon)
120
121
122 def launch(nodaemon):
123 sys.path.insert(0, os.path.abspath(os.getcwd()))
124
125 # see if we can launch the application without actually having to
126 # spawn twistd, since spawning processes correctly is a real hassle
127 # on windows.
128 from twisted.python.runtime import platformType
129 argv = ["twistd",
130 "--no_save",
131 "--logfile=twistd.log", # windows doesn't use the same default
132 "--python=buildbot.tac"]
133 if nodaemon:
134 argv.extend(['--nodaemon'])
135 sys.argv = argv
136
137 # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use
138 # _twistw.run . Twisted-2.5.0 and later use twistd.run, even for
139 # windows.
140 from twisted import __version__
141 major, minor, ignored = __version__.split(".", 2)
142 major = int(major)
143 minor = int(minor)
144 if (platformType == "win32" and (major == 2 and minor < 5)):
145 from twisted.scripts import _twistw
146 run = _twistw.run
147 else:
148 from twisted.scripts import twistd
149 run = twistd.run
150 run()
151
```
Path: `master/buildbot/scripts/start.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from __future__ import absolute_import
17 from __future__ import division
18 from __future__ import print_function
19
20 import os
21 import sys
22
23 from twisted.internet import protocol
24 from twisted.internet import reactor
25 from twisted.python.runtime import platformType
26
27 from buildbot.scripts import base
28 from buildbot.scripts.logwatcher import BuildmasterStartupError
29 from buildbot.scripts.logwatcher import BuildmasterTimeoutError
30 from buildbot.scripts.logwatcher import LogWatcher
31 from buildbot.scripts.logwatcher import ReconfigError
32 from buildbot.util import rewrap
33
34
35 class Follower:
36
37 def follow(self, basedir):
38 self.rc = 0
39 print("Following twistd.log until startup finished..")
40 lw = LogWatcher(os.path.join(basedir, "twistd.log"))
41 d = lw.start()
42 d.addCallbacks(self._success, self._failure)
43 reactor.run()
44 return self.rc
45
46 def _success(self, _):
47 print("The buildmaster appears to have (re)started correctly.")
48 self.rc = 0
49 reactor.stop()
50
51 def _failure(self, why):
52 if why.check(BuildmasterTimeoutError):
53 print(rewrap("""\
54 The buildmaster took more than 10 seconds to start, so we were
55 unable to confirm that it started correctly.
56 Please 'tail twistd.log' and look for a line that says
57 'BuildMaster is running' to verify correct startup.
58 """))
59 elif why.check(ReconfigError):
60 print(rewrap("""\
61 The buildmaster appears to have encountered an error in the
62 master.cfg config file during startup.
63 Please inspect and fix master.cfg, then restart the
64 buildmaster.
65 """))
66 elif why.check(BuildmasterStartupError):
67 print(rewrap("""\
68 The buildmaster startup failed. Please see 'twistd.log' for
69 possible reason.
70 """))
71 else:
72 print(rewrap("""\
73 Unable to confirm that the buildmaster started correctly.
74 You may need to stop it, fix the config file, and restart.
75 """))
76 print(why)
77 self.rc = 1
78 reactor.stop()
79
80
81 def launchNoDaemon(config):
82 os.chdir(config['basedir'])
83 sys.path.insert(0, os.path.abspath(config['basedir']))
84
85 argv = ["twistd",
86 "--no_save",
87 '--nodaemon',
88 "--logfile=twistd.log", # windows doesn't use the same default
89 "--python=buildbot.tac"]
90 sys.argv = argv
91
92 # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use
93 # _twistw.run . Twisted-2.5.0 and later use twistd.run, even for
94 # windows.
95 from twisted.scripts import twistd
96 twistd.run()
97
98
99 def launch(config):
100 os.chdir(config['basedir'])
101 sys.path.insert(0, os.path.abspath(config['basedir']))
102
103 # see if we can launch the application without actually having to
104 # spawn twistd, since spawning processes correctly is a real hassle
105 # on windows.
106 argv = [sys.executable,
107 "-c",
108 # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use
109 # _twistw.run . Twisted-2.5.0 and later use twistd.run, even for
110 # windows.
111 "from twisted.scripts import twistd; twistd.run()",
112 "--no_save",
113 "--logfile=twistd.log", # windows doesn't use the same default
114 "--python=buildbot.tac"]
115
116 # ProcessProtocol just ignores all output
117 proc = reactor.spawnProcess(
118 protocol.ProcessProtocol(), sys.executable, argv, env=os.environ)
119
120 if platformType == "win32":
121 with open("twistd.pid", "w") as pidfile:
122 pidfile.write("{0}".format(proc.pid))
123
124
125 def start(config):
126 if not base.isBuildmasterDir(config['basedir']):
127 return 1
128
129 if config['nodaemon']:
130 launchNoDaemon(config)
131 return 0
132
133 launch(config)
134
135 # We don't have tail on windows
136 if platformType == "win32" or config['quiet']:
137 return 0
138
139 # this is the parent
140 rc = Follower().follow(config['basedir'])
141 return rc
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/master/buildbot/scripts/start.py b/master/buildbot/scripts/start.py
--- a/master/buildbot/scripts/start.py
+++ b/master/buildbot/scripts/start.py
@@ -84,9 +84,14 @@
argv = ["twistd",
"--no_save",
- '--nodaemon',
+ "--nodaemon",
"--logfile=twistd.log", # windows doesn't use the same default
"--python=buildbot.tac"]
+
+ if platformType != 'win32':
+ # windows doesn't use pidfile option.
+ argv.extend(["--pidfile="])
+
sys.argv = argv
# this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use
diff --git a/worker/buildbot_worker/scripts/start.py b/worker/buildbot_worker/scripts/start.py
--- a/worker/buildbot_worker/scripts/start.py
+++ b/worker/buildbot_worker/scripts/start.py
@@ -131,7 +131,11 @@
"--logfile=twistd.log", # windows doesn't use the same default
"--python=buildbot.tac"]
if nodaemon:
- argv.extend(['--nodaemon'])
+ argv.extend(["--nodaemon"])
+ if platformType != 'win32':
+ # windows doesn't use pidfile option.
+ argv.extend(["--pidfile="])
+
sys.argv = argv
# this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use
| {"golden_diff": "diff --git a/master/buildbot/scripts/start.py b/master/buildbot/scripts/start.py\n--- a/master/buildbot/scripts/start.py\n+++ b/master/buildbot/scripts/start.py\n@@ -84,9 +84,14 @@\n \n argv = [\"twistd\",\n \"--no_save\",\n- '--nodaemon',\n+ \"--nodaemon\",\n \"--logfile=twistd.log\", # windows doesn't use the same default\n \"--python=buildbot.tac\"]\n+\n+ if platformType != 'win32':\n+ # windows doesn't use pidfile option.\n+ argv.extend([\"--pidfile=\"])\n+\n sys.argv = argv\n \n # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use\ndiff --git a/worker/buildbot_worker/scripts/start.py b/worker/buildbot_worker/scripts/start.py\n--- a/worker/buildbot_worker/scripts/start.py\n+++ b/worker/buildbot_worker/scripts/start.py\n@@ -131,7 +131,11 @@\n \"--logfile=twistd.log\", # windows doesn't use the same default\n \"--python=buildbot.tac\"]\n if nodaemon:\n- argv.extend(['--nodaemon'])\n+ argv.extend([\"--nodaemon\"])\n+ if platformType != 'win32':\n+ # windows doesn't use pidfile option.\n+ argv.extend([\"--pidfile=\"])\n+\n sys.argv = argv\n \n # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use\n", "issue": "Running with --nodaemon and PID files\nThis ticket is a migrated Trac ticket [3608](http://trac.buildbot.net/ticket/3608)\n\nPeople contributed to the original ticket: @unknown_contributor\nTicket created on: `Sep 12 2016`\nTicket last modified on: `Sep 12 2016`\n\n---\n\nBoth the master and the worker still use PID files while running with --nodaemon. I've had issues with workers running with --nodaemon not starting because of an old, stale PID file, corresponding to an actual process belonging to another user.\n\nThis can be thought to be inconsistent, and harms the writing ofreally simple non forking configuration files to manage the process through an external service manager (systemd, supervisord etc.).\n\nIt seems that the cousin ``twistd`` executable has a --pidfile argument, that can be used to mean \"no pid file\". This is explained notably in https://twistedmatrix.com/documents/current/core/howto/systemd.html.\n\nWe could expose that, or we could decide that --nodaemon implies no pid file.\n\nI believe this is true in Eight and Nine, and that a fix could be easily backported\n(marking this as minor, because it's easy to circumvent by putting appropriate rms in unit files or other confs)\n\n\n---\n\n\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\n\nfrom buildbot_worker.scripts import base\nfrom buildbot_worker.util import rewrap\n\n\nclass Follower(object):\n\n def follow(self):\n from twisted.internet import reactor\n from buildbot_worker.scripts.logwatcher import LogWatcher\n self.rc = 0\n print(\"Following twistd.log until startup finished..\")\n lw = LogWatcher(\"twistd.log\")\n d = lw.start()\n d.addCallbacks(self._success, self._failure)\n reactor.run()\n return self.rc\n\n def _success(self, processtype):\n from twisted.internet import reactor\n print(\"The %s appears to have (re)started correctly.\" % processtype)\n self.rc = 0\n reactor.stop()\n\n def _failure(self, why):\n from twisted.internet import reactor\n from buildbot_worker.scripts.logwatcher import WorkerTimeoutError\n if why.check(WorkerTimeoutError):\n print(rewrap(\"\"\"\\\n The worker took more than 10 seconds to start and/or connect\n to the buildmaster, so we were unable to confirm that it\n started and connected correctly.\n Please 'tail twistd.log' and look for a line that says\n 'message from master: attached' to verify correct startup.\n If you see a bunch of messages like 'will retry in 6 seconds',\n your worker might not have the correct hostname or portnumber\n for the buildmaster, or the buildmaster might not be running.\n If you see messages like\n 'Failure: twisted.cred.error.UnauthorizedLogin'\n then your worker might be using the wrong botname or password.\n Please correct these problems and then restart the worker.\n \"\"\"))\n else:\n print(rewrap(\"\"\"\\\n Unable to confirm that the worker started correctly.\n You may need to stop it, fix the config file, and restart.\n \"\"\"))\n print(why)\n self.rc = 1\n reactor.stop()\n\n\ndef startCommand(config):\n basedir = config['basedir']\n if not base.isWorkerDir(basedir):\n return 1\n\n return startWorker(basedir, config['quiet'], config['nodaemon'])\n\n\ndef startWorker(basedir, quiet, nodaemon):\n \"\"\"\n Start worker process.\n\n Fork and start twisted application described in basedir buildbot.tac file.\n Print it's log messages to stdout for a while and try to figure out if\n start was successful.\n\n If quiet or nodaemon parameters are True, or we are running on a win32\n system, will not fork and log will not be printed to stdout.\n\n @param basedir: worker's basedir path\n @param quiet: don't display startup log messages\n @param nodaemon: don't daemonize (stay in foreground)\n @return: 0 if worker was successfully started,\n 1 if we are not sure that worker started successfully\n \"\"\"\n\n os.chdir(basedir)\n if quiet or nodaemon:\n return launch(nodaemon)\n\n # we probably can't do this os.fork under windows\n from twisted.python.runtime import platformType\n if platformType == \"win32\":\n return launch(nodaemon)\n\n # fork a child to launch the daemon, while the parent process tails the\n # logfile\n if os.fork():\n # this is the parent\n rc = Follower().follow()\n return rc\n # this is the child: give the logfile-watching parent a chance to start\n # watching it before we start the daemon\n time.sleep(0.2)\n launch(nodaemon)\n\n\ndef launch(nodaemon):\n sys.path.insert(0, os.path.abspath(os.getcwd()))\n\n # see if we can launch the application without actually having to\n # spawn twistd, since spawning processes correctly is a real hassle\n # on windows.\n from twisted.python.runtime import platformType\n argv = [\"twistd\",\n \"--no_save\",\n \"--logfile=twistd.log\", # windows doesn't use the same default\n \"--python=buildbot.tac\"]\n if nodaemon:\n argv.extend(['--nodaemon'])\n sys.argv = argv\n\n # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use\n # _twistw.run . Twisted-2.5.0 and later use twistd.run, even for\n # windows.\n from twisted import __version__\n major, minor, ignored = __version__.split(\".\", 2)\n major = int(major)\n minor = int(minor)\n if (platformType == \"win32\" and (major == 2 and minor < 5)):\n from twisted.scripts import _twistw\n run = _twistw.run\n else:\n from twisted.scripts import twistd\n run = twistd.run\n run()\n", "path": "worker/buildbot_worker/scripts/start.py"}, {"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom twisted.internet import protocol\nfrom twisted.internet import reactor\nfrom twisted.python.runtime import platformType\n\nfrom buildbot.scripts import base\nfrom buildbot.scripts.logwatcher import BuildmasterStartupError\nfrom buildbot.scripts.logwatcher import BuildmasterTimeoutError\nfrom buildbot.scripts.logwatcher import LogWatcher\nfrom buildbot.scripts.logwatcher import ReconfigError\nfrom buildbot.util import rewrap\n\n\nclass Follower:\n\n def follow(self, basedir):\n self.rc = 0\n print(\"Following twistd.log until startup finished..\")\n lw = LogWatcher(os.path.join(basedir, \"twistd.log\"))\n d = lw.start()\n d.addCallbacks(self._success, self._failure)\n reactor.run()\n return self.rc\n\n def _success(self, _):\n print(\"The buildmaster appears to have (re)started correctly.\")\n self.rc = 0\n reactor.stop()\n\n def _failure(self, why):\n if why.check(BuildmasterTimeoutError):\n print(rewrap(\"\"\"\\\n The buildmaster took more than 10 seconds to start, so we were\n unable to confirm that it started correctly.\n Please 'tail twistd.log' and look for a line that says\n 'BuildMaster is running' to verify correct startup.\n \"\"\"))\n elif why.check(ReconfigError):\n print(rewrap(\"\"\"\\\n The buildmaster appears to have encountered an error in the\n master.cfg config file during startup.\n Please inspect and fix master.cfg, then restart the\n buildmaster.\n \"\"\"))\n elif why.check(BuildmasterStartupError):\n print(rewrap(\"\"\"\\\n The buildmaster startup failed. Please see 'twistd.log' for\n possible reason.\n \"\"\"))\n else:\n print(rewrap(\"\"\"\\\n Unable to confirm that the buildmaster started correctly.\n You may need to stop it, fix the config file, and restart.\n \"\"\"))\n print(why)\n self.rc = 1\n reactor.stop()\n\n\ndef launchNoDaemon(config):\n os.chdir(config['basedir'])\n sys.path.insert(0, os.path.abspath(config['basedir']))\n\n argv = [\"twistd\",\n \"--no_save\",\n '--nodaemon',\n \"--logfile=twistd.log\", # windows doesn't use the same default\n \"--python=buildbot.tac\"]\n sys.argv = argv\n\n # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use\n # _twistw.run . Twisted-2.5.0 and later use twistd.run, even for\n # windows.\n from twisted.scripts import twistd\n twistd.run()\n\n\ndef launch(config):\n os.chdir(config['basedir'])\n sys.path.insert(0, os.path.abspath(config['basedir']))\n\n # see if we can launch the application without actually having to\n # spawn twistd, since spawning processes correctly is a real hassle\n # on windows.\n argv = [sys.executable,\n \"-c\",\n # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use\n # _twistw.run . Twisted-2.5.0 and later use twistd.run, even for\n # windows.\n \"from twisted.scripts import twistd; twistd.run()\",\n \"--no_save\",\n \"--logfile=twistd.log\", # windows doesn't use the same default\n \"--python=buildbot.tac\"]\n\n # ProcessProtocol just ignores all output\n proc = reactor.spawnProcess(\n protocol.ProcessProtocol(), sys.executable, argv, env=os.environ)\n\n if platformType == \"win32\":\n with open(\"twistd.pid\", \"w\") as pidfile:\n pidfile.write(\"{0}\".format(proc.pid))\n\n\ndef start(config):\n if not base.isBuildmasterDir(config['basedir']):\n return 1\n\n if config['nodaemon']:\n launchNoDaemon(config)\n return 0\n\n launch(config)\n\n # We don't have tail on windows\n if platformType == \"win32\" or config['quiet']:\n return 0\n\n # this is the parent\n rc = Follower().follow(config['basedir'])\n return rc\n", "path": "master/buildbot/scripts/start.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\n\nfrom buildbot_worker.scripts import base\nfrom buildbot_worker.util import rewrap\n\n\nclass Follower(object):\n\n def follow(self):\n from twisted.internet import reactor\n from buildbot_worker.scripts.logwatcher import LogWatcher\n self.rc = 0\n print(\"Following twistd.log until startup finished..\")\n lw = LogWatcher(\"twistd.log\")\n d = lw.start()\n d.addCallbacks(self._success, self._failure)\n reactor.run()\n return self.rc\n\n def _success(self, processtype):\n from twisted.internet import reactor\n print(\"The %s appears to have (re)started correctly.\" % processtype)\n self.rc = 0\n reactor.stop()\n\n def _failure(self, why):\n from twisted.internet import reactor\n from buildbot_worker.scripts.logwatcher import WorkerTimeoutError\n if why.check(WorkerTimeoutError):\n print(rewrap(\"\"\"\\\n The worker took more than 10 seconds to start and/or connect\n to the buildmaster, so we were unable to confirm that it\n started and connected correctly.\n Please 'tail twistd.log' and look for a line that says\n 'message from master: attached' to verify correct startup.\n If you see a bunch of messages like 'will retry in 6 seconds',\n your worker might not have the correct hostname or portnumber\n for the buildmaster, or the buildmaster might not be running.\n If you see messages like\n 'Failure: twisted.cred.error.UnauthorizedLogin'\n then your worker might be using the wrong botname or password.\n Please correct these problems and then restart the worker.\n \"\"\"))\n else:\n print(rewrap(\"\"\"\\\n Unable to confirm that the worker started correctly.\n You may need to stop it, fix the config file, and restart.\n \"\"\"))\n print(why)\n self.rc = 1\n reactor.stop()\n\n\ndef startCommand(config):\n basedir = config['basedir']\n if not base.isWorkerDir(basedir):\n return 1\n\n return startWorker(basedir, config['quiet'], config['nodaemon'])\n\n\ndef startWorker(basedir, quiet, nodaemon):\n \"\"\"\n Start worker process.\n\n Fork and start twisted application described in basedir buildbot.tac file.\n Print it's log messages to stdout for a while and try to figure out if\n start was successful.\n\n If quiet or nodaemon parameters are True, or we are running on a win32\n system, will not fork and log will not be printed to stdout.\n\n @param basedir: worker's basedir path\n @param quiet: don't display startup log messages\n @param nodaemon: don't daemonize (stay in foreground)\n @return: 0 if worker was successfully started,\n 1 if we are not sure that worker started successfully\n \"\"\"\n\n os.chdir(basedir)\n if quiet or nodaemon:\n return launch(nodaemon)\n\n # we probably can't do this os.fork under windows\n from twisted.python.runtime import platformType\n if platformType == \"win32\":\n return launch(nodaemon)\n\n # fork a child to launch the daemon, while the parent process tails the\n # logfile\n if os.fork():\n # this is the parent\n rc = Follower().follow()\n return rc\n # this is the child: give the logfile-watching parent a chance to start\n # watching it before we start the daemon\n time.sleep(0.2)\n launch(nodaemon)\n\n\ndef launch(nodaemon):\n sys.path.insert(0, os.path.abspath(os.getcwd()))\n\n # see if we can launch the application without actually having to\n # spawn twistd, since spawning processes correctly is a real hassle\n # on windows.\n from twisted.python.runtime import platformType\n argv = [\"twistd\",\n \"--no_save\",\n \"--logfile=twistd.log\", # windows doesn't use the same default\n \"--python=buildbot.tac\"]\n if nodaemon:\n argv.extend([\"--nodaemon\"])\n if platformType != 'win32':\n # windows doesn't use pidfile option.\n argv.extend([\"--pidfile=\"])\n\n sys.argv = argv\n\n # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use\n # _twistw.run . Twisted-2.5.0 and later use twistd.run, even for\n # windows.\n from twisted import __version__\n major, minor, ignored = __version__.split(\".\", 2)\n major = int(major)\n minor = int(minor)\n if (platformType == \"win32\" and (major == 2 and minor < 5)):\n from twisted.scripts import _twistw\n run = _twistw.run\n else:\n from twisted.scripts import twistd\n run = twistd.run\n run()\n", "path": "worker/buildbot_worker/scripts/start.py"}, {"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom twisted.internet import protocol\nfrom twisted.internet import reactor\nfrom twisted.python.runtime import platformType\n\nfrom buildbot.scripts import base\nfrom buildbot.scripts.logwatcher import BuildmasterStartupError\nfrom buildbot.scripts.logwatcher import BuildmasterTimeoutError\nfrom buildbot.scripts.logwatcher import LogWatcher\nfrom buildbot.scripts.logwatcher import ReconfigError\nfrom buildbot.util import rewrap\n\n\nclass Follower:\n\n def follow(self, basedir):\n self.rc = 0\n print(\"Following twistd.log until startup finished..\")\n lw = LogWatcher(os.path.join(basedir, \"twistd.log\"))\n d = lw.start()\n d.addCallbacks(self._success, self._failure)\n reactor.run()\n return self.rc\n\n def _success(self, _):\n print(\"The buildmaster appears to have (re)started correctly.\")\n self.rc = 0\n reactor.stop()\n\n def _failure(self, why):\n if why.check(BuildmasterTimeoutError):\n print(rewrap(\"\"\"\\\n The buildmaster took more than 10 seconds to start, so we were\n unable to confirm that it started correctly.\n Please 'tail twistd.log' and look for a line that says\n 'BuildMaster is running' to verify correct startup.\n \"\"\"))\n elif why.check(ReconfigError):\n print(rewrap(\"\"\"\\\n The buildmaster appears to have encountered an error in the\n master.cfg config file during startup.\n Please inspect and fix master.cfg, then restart the\n buildmaster.\n \"\"\"))\n elif why.check(BuildmasterStartupError):\n print(rewrap(\"\"\"\\\n The buildmaster startup failed. Please see 'twistd.log' for\n possible reason.\n \"\"\"))\n else:\n print(rewrap(\"\"\"\\\n Unable to confirm that the buildmaster started correctly.\n You may need to stop it, fix the config file, and restart.\n \"\"\"))\n print(why)\n self.rc = 1\n reactor.stop()\n\n\ndef launchNoDaemon(config):\n os.chdir(config['basedir'])\n sys.path.insert(0, os.path.abspath(config['basedir']))\n\n argv = [\"twistd\",\n \"--no_save\",\n \"--nodaemon\",\n \"--logfile=twistd.log\", # windows doesn't use the same default\n \"--python=buildbot.tac\"]\n\n if platformType != 'win32':\n # windows doesn't use pidfile option.\n argv.extend([\"--pidfile=\"])\n\n sys.argv = argv\n\n # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use\n # _twistw.run . Twisted-2.5.0 and later use twistd.run, even for\n # windows.\n from twisted.scripts import twistd\n twistd.run()\n\n\ndef launch(config):\n os.chdir(config['basedir'])\n sys.path.insert(0, os.path.abspath(config['basedir']))\n\n # see if we can launch the application without actually having to\n # spawn twistd, since spawning processes correctly is a real hassle\n # on windows.\n argv = [sys.executable,\n \"-c\",\n # this is copied from bin/twistd. twisted-2.0.0 through 2.4.0 use\n # _twistw.run . Twisted-2.5.0 and later use twistd.run, even for\n # windows.\n \"from twisted.scripts import twistd; twistd.run()\",\n \"--no_save\",\n \"--logfile=twistd.log\", # windows doesn't use the same default\n \"--python=buildbot.tac\"]\n\n # ProcessProtocol just ignores all output\n proc = reactor.spawnProcess(\n protocol.ProcessProtocol(), sys.executable, argv, env=os.environ)\n\n if platformType == \"win32\":\n with open(\"twistd.pid\", \"w\") as pidfile:\n pidfile.write(\"{0}\".format(proc.pid))\n\n\ndef start(config):\n if not base.isBuildmasterDir(config['basedir']):\n return 1\n\n if config['nodaemon']:\n launchNoDaemon(config)\n return 0\n\n launch(config)\n\n # We don't have tail on windows\n if platformType == \"win32\" or config['quiet']:\n return 0\n\n # this is the parent\n rc = Follower().follow(config['basedir'])\n return rc\n", "path": "master/buildbot/scripts/start.py"}]} | 3,660 | 353 |
gh_patches_debug_31263 | rasdani/github-patches | git_diff | svthalia__concrexit-1749 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FieldError: Cannot resolve keyword 'pizza_event' into field. Choices are: food_event, food_event_id, id, memb...
Sentry Issue: [CONCREXIT-6G](https://sentry.io/organizations/thalia/issues/2468308255/?referrer=github_integration)
```
FieldError: Cannot resolve keyword 'pizza_event' into field. Choices are: food_event, food_event_id, id, member, member_id, name, payment, payment_id, product, product_id
(9 additional frame(s) were not displayed)
...
File "django/db/models/sql/query.py", line 1391, in add_q
clause, _ = self._add_q(q_object, self.used_aliases)
File "django/db/models/sql/query.py", line 1410, in _add_q
child_clause, needed_inner = self.build_filter(
File "django/db/models/sql/query.py", line 1284, in build_filter
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
File "django/db/models/sql/query.py", line 1110, in solve_lookup_type
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
File "django/db/models/sql/query.py", line 1537, in names_to_path
raise FieldError("Cannot resolve keyword '%s' into field. "
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/pizzas/services.py`
Content:
```
1 from events.services import is_organiser
2 from .models import Product, FoodOrder, FoodEvent
3
4
5 def gen_stats_pizza_orders():
6 """Generate statistics about number of orders per product.
7
8 :return: Dict with key, value being resp. name, order count of a product.
9 """
10 total = {}
11
12 for product in Product.objects.all():
13 total.update(
14 {product.name: FoodOrder.objects.filter(product=product).count(),}
15 )
16
17 return {
18 i[0]: i[1]
19 for i in sorted(total.items(), key=lambda x: x[1], reverse=True)[:5]
20 if i[1] > 0
21 }
22
23
24 def gen_stats_current_pizza_orders():
25 """Generate statistics about number of orders per product of the active pizza event.
26
27 :return: Dict with key, value being resp. name, order count of a product.
28 """
29 total = {}
30
31 current_pizza_event = FoodEvent.current()
32 if not current_pizza_event:
33 return None
34
35 for product in Product.objects.filter():
36 total.update(
37 {
38 product.name: FoodOrder.objects.filter(
39 product=product, pizza_event=current_pizza_event,
40 ).count(),
41 }
42 )
43
44 return {
45 i[0]: i[1]
46 for i in sorted(total.items(), key=lambda x: x[1], reverse=True)[:5]
47 if i[1] > 0
48 }
49
50
51 def can_change_order(member, food_event):
52 """Determine if a certain member can edit orders of an event.
53
54 :param member: Member who wants to change and order
55 :param food_event: The event for which we want to change an order
56 :return: True if we can change an order else False
57 """
58 return (
59 food_event
60 and member.has_perm("pizzas.change_foodorder")
61 and is_organiser(member, food_event.event)
62 )
63
```
Path: `website/pizzas/views.py`
Content:
```
1 """Views provided by the pizzas package."""
2 from django.contrib import messages
3 from django.contrib.auth.decorators import login_required
4 from django.http import Http404
5 from django.shortcuts import get_object_or_404, render, redirect
6 from django.utils.translation import gettext_lazy as _
7 from django.views.decorators.http import require_http_methods
8
9 from .models import FoodOrder, FoodEvent, Product
10
11
12 @login_required
13 def index(request):
14 """Overview of user order for a pizza event."""
15 products = Product.available_products.order_by("name")
16 if not request.user.has_perm("pizzas.order_restricted_products"):
17 products = products.exclude(restricted=True)
18 event = FoodEvent.current()
19 try:
20 obj = FoodOrder.objects.get(pizza_event=event, member=request.member)
21 except FoodOrder.DoesNotExist:
22 obj = None
23 context = {"event": event, "products": products, "order": obj}
24 return render(request, "pizzas/index.html", context)
25
26
27 @require_http_methods(["POST"])
28 def cancel_order(request):
29 """View that cancels a user's order."""
30 if "order" in request.POST:
31 try:
32 order = get_object_or_404(FoodOrder, pk=int(request.POST["order"]))
33 if not order.can_be_changed:
34 messages.error(request, _("You can no longer cancel."))
35 elif order.member == request.member:
36 order.delete()
37 messages.success(request, _("Your order has been cancelled."))
38 except Http404:
39 messages.error(request, _("Your order could not be found."))
40 return redirect("pizzas:index")
41
42
43 @login_required
44 def place_order(request):
45 """View that shows the detail of the current order."""
46 event = FoodEvent.current()
47 if not event:
48 return redirect("pizzas:index")
49
50 try:
51 obj = FoodOrder.objects.get(pizza_event=event, member=request.member)
52 current_order_locked = not obj.can_be_changed
53 except FoodOrder.DoesNotExist:
54 obj = None
55 current_order_locked = False
56
57 if "product" in request.POST and not current_order_locked:
58 productset = Product.available_products.all()
59 if not request.user.has_perm("pizzas.order_restricted_products"):
60 productset = productset.exclude(restricted=True)
61 try:
62 product = productset.get(pk=int(request.POST["product"]))
63 except Product.DoesNotExist as e:
64 raise Http404("Pizza does not exist") from e
65 if not obj:
66 obj = FoodOrder(pizza_event=event, member=request.member)
67 obj.product = product
68 obj.save()
69 return redirect("pizzas:index")
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/pizzas/services.py b/website/pizzas/services.py
--- a/website/pizzas/services.py
+++ b/website/pizzas/services.py
@@ -36,7 +36,7 @@
total.update(
{
product.name: FoodOrder.objects.filter(
- product=product, pizza_event=current_pizza_event,
+ product=product, food_event=current_pizza_event,
).count(),
}
)
diff --git a/website/pizzas/views.py b/website/pizzas/views.py
--- a/website/pizzas/views.py
+++ b/website/pizzas/views.py
@@ -17,7 +17,7 @@
products = products.exclude(restricted=True)
event = FoodEvent.current()
try:
- obj = FoodOrder.objects.get(pizza_event=event, member=request.member)
+ obj = FoodOrder.objects.get(food_event=event, member=request.member)
except FoodOrder.DoesNotExist:
obj = None
context = {"event": event, "products": products, "order": obj}
@@ -48,7 +48,7 @@
return redirect("pizzas:index")
try:
- obj = FoodOrder.objects.get(pizza_event=event, member=request.member)
+ obj = FoodOrder.objects.get(food_event=event, member=request.member)
current_order_locked = not obj.can_be_changed
except FoodOrder.DoesNotExist:
obj = None
@@ -63,7 +63,7 @@
except Product.DoesNotExist as e:
raise Http404("Pizza does not exist") from e
if not obj:
- obj = FoodOrder(pizza_event=event, member=request.member)
+ obj = FoodOrder(food_event=event, member=request.member)
obj.product = product
obj.save()
return redirect("pizzas:index")
| {"golden_diff": "diff --git a/website/pizzas/services.py b/website/pizzas/services.py\n--- a/website/pizzas/services.py\n+++ b/website/pizzas/services.py\n@@ -36,7 +36,7 @@\n total.update(\n {\n product.name: FoodOrder.objects.filter(\n- product=product, pizza_event=current_pizza_event,\n+ product=product, food_event=current_pizza_event,\n ).count(),\n }\n )\ndiff --git a/website/pizzas/views.py b/website/pizzas/views.py\n--- a/website/pizzas/views.py\n+++ b/website/pizzas/views.py\n@@ -17,7 +17,7 @@\n products = products.exclude(restricted=True)\n event = FoodEvent.current()\n try:\n- obj = FoodOrder.objects.get(pizza_event=event, member=request.member)\n+ obj = FoodOrder.objects.get(food_event=event, member=request.member)\n except FoodOrder.DoesNotExist:\n obj = None\n context = {\"event\": event, \"products\": products, \"order\": obj}\n@@ -48,7 +48,7 @@\n return redirect(\"pizzas:index\")\n \n try:\n- obj = FoodOrder.objects.get(pizza_event=event, member=request.member)\n+ obj = FoodOrder.objects.get(food_event=event, member=request.member)\n current_order_locked = not obj.can_be_changed\n except FoodOrder.DoesNotExist:\n obj = None\n@@ -63,7 +63,7 @@\n except Product.DoesNotExist as e:\n raise Http404(\"Pizza does not exist\") from e\n if not obj:\n- obj = FoodOrder(pizza_event=event, member=request.member)\n+ obj = FoodOrder(food_event=event, member=request.member)\n obj.product = product\n obj.save()\n return redirect(\"pizzas:index\")\n", "issue": "FieldError: Cannot resolve keyword 'pizza_event' into field. Choices are: food_event, food_event_id, id, memb...\nSentry Issue: [CONCREXIT-6G](https://sentry.io/organizations/thalia/issues/2468308255/?referrer=github_integration)\n\n```\nFieldError: Cannot resolve keyword 'pizza_event' into field. Choices are: food_event, food_event_id, id, member, member_id, name, payment, payment_id, product, product_id\n(9 additional frame(s) were not displayed)\n...\n File \"django/db/models/sql/query.py\", line 1391, in add_q\n clause, _ = self._add_q(q_object, self.used_aliases)\n File \"django/db/models/sql/query.py\", line 1410, in _add_q\n child_clause, needed_inner = self.build_filter(\n File \"django/db/models/sql/query.py\", line 1284, in build_filter\n lookups, parts, reffed_expression = self.solve_lookup_type(arg)\n File \"django/db/models/sql/query.py\", line 1110, in solve_lookup_type\n _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())\n File \"django/db/models/sql/query.py\", line 1537, in names_to_path\n raise FieldError(\"Cannot resolve keyword '%s' into field. \"\n```\n", "before_files": [{"content": "from events.services import is_organiser\nfrom .models import Product, FoodOrder, FoodEvent\n\n\ndef gen_stats_pizza_orders():\n \"\"\"Generate statistics about number of orders per product.\n\n :return: Dict with key, value being resp. name, order count of a product.\n \"\"\"\n total = {}\n\n for product in Product.objects.all():\n total.update(\n {product.name: FoodOrder.objects.filter(product=product).count(),}\n )\n\n return {\n i[0]: i[1]\n for i in sorted(total.items(), key=lambda x: x[1], reverse=True)[:5]\n if i[1] > 0\n }\n\n\ndef gen_stats_current_pizza_orders():\n \"\"\"Generate statistics about number of orders per product of the active pizza event.\n\n :return: Dict with key, value being resp. name, order count of a product.\n \"\"\"\n total = {}\n\n current_pizza_event = FoodEvent.current()\n if not current_pizza_event:\n return None\n\n for product in Product.objects.filter():\n total.update(\n {\n product.name: FoodOrder.objects.filter(\n product=product, pizza_event=current_pizza_event,\n ).count(),\n }\n )\n\n return {\n i[0]: i[1]\n for i in sorted(total.items(), key=lambda x: x[1], reverse=True)[:5]\n if i[1] > 0\n }\n\n\ndef can_change_order(member, food_event):\n \"\"\"Determine if a certain member can edit orders of an event.\n\n :param member: Member who wants to change and order\n :param food_event: The event for which we want to change an order\n :return: True if we can change an order else False\n \"\"\"\n return (\n food_event\n and member.has_perm(\"pizzas.change_foodorder\")\n and is_organiser(member, food_event.event)\n )\n", "path": "website/pizzas/services.py"}, {"content": "\"\"\"Views provided by the pizzas package.\"\"\"\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.http import require_http_methods\n\nfrom .models import FoodOrder, FoodEvent, Product\n\n\n@login_required\ndef index(request):\n \"\"\"Overview of user order for a pizza event.\"\"\"\n products = Product.available_products.order_by(\"name\")\n if not request.user.has_perm(\"pizzas.order_restricted_products\"):\n products = products.exclude(restricted=True)\n event = FoodEvent.current()\n try:\n obj = FoodOrder.objects.get(pizza_event=event, member=request.member)\n except FoodOrder.DoesNotExist:\n obj = None\n context = {\"event\": event, \"products\": products, \"order\": obj}\n return render(request, \"pizzas/index.html\", context)\n\n\n@require_http_methods([\"POST\"])\ndef cancel_order(request):\n \"\"\"View that cancels a user's order.\"\"\"\n if \"order\" in request.POST:\n try:\n order = get_object_or_404(FoodOrder, pk=int(request.POST[\"order\"]))\n if not order.can_be_changed:\n messages.error(request, _(\"You can no longer cancel.\"))\n elif order.member == request.member:\n order.delete()\n messages.success(request, _(\"Your order has been cancelled.\"))\n except Http404:\n messages.error(request, _(\"Your order could not be found.\"))\n return redirect(\"pizzas:index\")\n\n\n@login_required\ndef place_order(request):\n \"\"\"View that shows the detail of the current order.\"\"\"\n event = FoodEvent.current()\n if not event:\n return redirect(\"pizzas:index\")\n\n try:\n obj = FoodOrder.objects.get(pizza_event=event, member=request.member)\n current_order_locked = not obj.can_be_changed\n except FoodOrder.DoesNotExist:\n obj = None\n current_order_locked = False\n\n if \"product\" in request.POST and not current_order_locked:\n productset = Product.available_products.all()\n if not request.user.has_perm(\"pizzas.order_restricted_products\"):\n productset = productset.exclude(restricted=True)\n try:\n product = productset.get(pk=int(request.POST[\"product\"]))\n except Product.DoesNotExist as e:\n raise Http404(\"Pizza does not exist\") from e\n if not obj:\n obj = FoodOrder(pizza_event=event, member=request.member)\n obj.product = product\n obj.save()\n return redirect(\"pizzas:index\")\n", "path": "website/pizzas/views.py"}], "after_files": [{"content": "from events.services import is_organiser\nfrom .models import Product, FoodOrder, FoodEvent\n\n\ndef gen_stats_pizza_orders():\n \"\"\"Generate statistics about number of orders per product.\n\n :return: Dict with key, value being resp. name, order count of a product.\n \"\"\"\n total = {}\n\n for product in Product.objects.all():\n total.update(\n {product.name: FoodOrder.objects.filter(product=product).count(),}\n )\n\n return {\n i[0]: i[1]\n for i in sorted(total.items(), key=lambda x: x[1], reverse=True)[:5]\n if i[1] > 0\n }\n\n\ndef gen_stats_current_pizza_orders():\n \"\"\"Generate statistics about number of orders per product of the active pizza event.\n\n :return: Dict with key, value being resp. name, order count of a product.\n \"\"\"\n total = {}\n\n current_pizza_event = FoodEvent.current()\n if not current_pizza_event:\n return None\n\n for product in Product.objects.filter():\n total.update(\n {\n product.name: FoodOrder.objects.filter(\n product=product, food_event=current_pizza_event,\n ).count(),\n }\n )\n\n return {\n i[0]: i[1]\n for i in sorted(total.items(), key=lambda x: x[1], reverse=True)[:5]\n if i[1] > 0\n }\n\n\ndef can_change_order(member, food_event):\n \"\"\"Determine if a certain member can edit orders of an event.\n\n :param member: Member who wants to change and order\n :param food_event: The event for which we want to change an order\n :return: True if we can change an order else False\n \"\"\"\n return (\n food_event\n and member.has_perm(\"pizzas.change_foodorder\")\n and is_organiser(member, food_event.event)\n )\n", "path": "website/pizzas/services.py"}, {"content": "\"\"\"Views provided by the pizzas package.\"\"\"\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.http import require_http_methods\n\nfrom .models import FoodOrder, FoodEvent, Product\n\n\n@login_required\ndef index(request):\n \"\"\"Overview of user order for a pizza event.\"\"\"\n products = Product.available_products.order_by(\"name\")\n if not request.user.has_perm(\"pizzas.order_restricted_products\"):\n products = products.exclude(restricted=True)\n event = FoodEvent.current()\n try:\n obj = FoodOrder.objects.get(food_event=event, member=request.member)\n except FoodOrder.DoesNotExist:\n obj = None\n context = {\"event\": event, \"products\": products, \"order\": obj}\n return render(request, \"pizzas/index.html\", context)\n\n\n@require_http_methods([\"POST\"])\ndef cancel_order(request):\n \"\"\"View that cancels a user's order.\"\"\"\n if \"order\" in request.POST:\n try:\n order = get_object_or_404(FoodOrder, pk=int(request.POST[\"order\"]))\n if not order.can_be_changed:\n messages.error(request, _(\"You can no longer cancel.\"))\n elif order.member == request.member:\n order.delete()\n messages.success(request, _(\"Your order has been cancelled.\"))\n except Http404:\n messages.error(request, _(\"Your order could not be found.\"))\n return redirect(\"pizzas:index\")\n\n\n@login_required\ndef place_order(request):\n \"\"\"View that shows the detail of the current order.\"\"\"\n event = FoodEvent.current()\n if not event:\n return redirect(\"pizzas:index\")\n\n try:\n obj = FoodOrder.objects.get(food_event=event, member=request.member)\n current_order_locked = not obj.can_be_changed\n except FoodOrder.DoesNotExist:\n obj = None\n current_order_locked = False\n\n if \"product\" in request.POST and not current_order_locked:\n productset = Product.available_products.all()\n if not request.user.has_perm(\"pizzas.order_restricted_products\"):\n productset = productset.exclude(restricted=True)\n try:\n product = productset.get(pk=int(request.POST[\"product\"]))\n except Product.DoesNotExist as e:\n raise Http404(\"Pizza does not exist\") from e\n if not obj:\n obj = FoodOrder(food_event=event, member=request.member)\n obj.product = product\n obj.save()\n return redirect(\"pizzas:index\")\n", "path": "website/pizzas/views.py"}]} | 1,807 | 391 |
gh_patches_debug_16658 | rasdani/github-patches | git_diff | ansible-collections__community.general-4056 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Emails sent by the `mail` callback pluging are missing the `Date` header.
### Summary
When the `mail` callback plugin successfully sends an email, the receiver cannot find it in his mailbox.
This is because mailboxes are usually sorted by origination date and this field is missing in the emails sent by this plugin.
According to [RFC 5322, the `Date` header is mandatory](https://datatracker.ietf.org/doc/html/rfc5322#section-3.6.1) but the `mail` callback plugin sends emails without it.
> The only required header fields are the origination date field and
the originator address field(s).
### Issue Type
Bug Report
### Component Name
`community.general.mail` callback plugin
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.12.1]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/lenaic/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.10/site-packages/ansible
ansible collection location = /home/lenaic/.ansible/collections:/usr/share/ansible/collections
executable location = /bin/ansible
python version = 3.10.1 (main, Dec 18 2021, 23:53:45) [GCC 11.1.0]
jinja version = 3.0.3
libyaml = True
```
### Community.general Version
```console (paste below)
$ ansible-galaxy collection list community.general
# /usr/lib/python3.10/site-packages/ansible_collections
Collection Version
----------------- -------
community.general 4.3.0
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
CACHE_PLUGIN(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = jsonfile
CACHE_PLUGIN_CONNECTION(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = ~/.ansible/tmp/facts_cache
CACHE_PLUGIN_TIMEOUT(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = 86400
CALLBACKS_ENABLED(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = ['community.general.mail']
DEFAULT_FORCE_HANDLERS(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = True
DEFAULT_GATHERING(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = smart
DEFAULT_HOST_LIST(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = ['/home/lenaic/doc/devel/perso/lenaic_config/inventory-libvirt.yaml']
DEFAULT_VAULT_PASSWORD_FILE(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = /home/lenaic/doc/devel/perso/lenaic_config/.vault_password
INVENTORY_ENABLED(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = ['community.libvirt.libvirt']
RETRY_FILES_ENABLED(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = False
```
### OS / Environment
```console
$ cat /etc/os-release
NAME="Arch Linux"
PRETTY_NAME="Arch Linux"
ID=arch
BUILD_ID=rolling
ANSI_COLOR="38;2;23;147;209"
HOME_URL="https://archlinux.org/"
DOCUMENTATION_URL="https://wiki.archlinux.org/"
SUPPORT_URL="https://bbs.archlinux.org/"
BUG_REPORT_URL="https://bugs.archlinux.org/"
LOGO=archlinux-logo
```
### Steps to Reproduce
Enable the `community.general.mail` callback plugin thanks to the following configuration options
```ini
[defaults]
callbacks_enabled = community.general.mail
[callback_mail]
to = me <[email protected]>
sender = Ansible <[email protected]>
```
And try to launch a playbook some tasks of which are failing.
### Expected Results
The email containing the Ansible failures appears in the receiver mailbox at the top with the date at which the playbook execution finished.
### Actual Results
The received email is invisible because it is sorted as if it had been received the 1st January 1970 00:00:00.
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/callback/mail.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright: (c) 2012, Dag Wieers <[email protected]>
4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
5
6 from __future__ import (absolute_import, division, print_function)
7 __metaclass__ = type
8
9 DOCUMENTATION = '''
10 name: mail
11 type: notification
12 short_description: Sends failure events via email
13 description:
14 - This callback will report failures via email
15 author:
16 - Dag Wieers (@dagwieers)
17 requirements:
18 - whitelisting in configuration
19 options:
20 mta:
21 description: Mail Transfer Agent, server that accepts SMTP
22 env:
23 - name: SMTPHOST
24 ini:
25 - section: callback_mail
26 key: smtphost
27 default: localhost
28 mtaport:
29 description: Mail Transfer Agent Port, port at which server SMTP
30 ini:
31 - section: callback_mail
32 key: smtpport
33 default: 25
34 to:
35 description: Mail recipient
36 ini:
37 - section: callback_mail
38 key: to
39 default: root
40 sender:
41 description: Mail sender
42 ini:
43 - section: callback_mail
44 key: sender
45 cc:
46 description: CC'd recipient
47 ini:
48 - section: callback_mail
49 key: cc
50 bcc:
51 description: BCC'd recipient
52 ini:
53 - section: callback_mail
54 key: bcc
55 notes:
56 - "TODO: expand configuration options now that plugins can leverage Ansible's configuration"
57 '''
58
59 import json
60 import os
61 import re
62 import smtplib
63
64 from ansible.module_utils.six import string_types
65 from ansible.module_utils.common.text.converters import to_bytes
66 from ansible.parsing.ajson import AnsibleJSONEncoder
67 from ansible.plugins.callback import CallbackBase
68
69
70 class CallbackModule(CallbackBase):
71 ''' This Ansible callback plugin mails errors to interested parties. '''
72 CALLBACK_VERSION = 2.0
73 CALLBACK_TYPE = 'notification'
74 CALLBACK_NAME = 'community.general.mail'
75 CALLBACK_NEEDS_WHITELIST = True
76
77 def __init__(self, display=None):
78 super(CallbackModule, self).__init__(display=display)
79 self.sender = None
80 self.to = 'root'
81 self.smtphost = os.getenv('SMTPHOST', 'localhost')
82 self.smtpport = 25
83 self.cc = None
84 self.bcc = None
85
86 def set_options(self, task_keys=None, var_options=None, direct=None):
87
88 super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
89
90 self.sender = self.get_option('sender')
91 self.to = self.get_option('to')
92 self.smtphost = self.get_option('mta')
93 self.smtpport = int(self.get_option('mtaport'))
94 self.cc = self.get_option('cc')
95 self.bcc = self.get_option('bcc')
96
97 def mail(self, subject='Ansible error mail', body=None):
98 if body is None:
99 body = subject
100
101 smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)
102
103 content = 'From: %s\n' % self.sender
104 content += 'To: %s\n' % self.to
105 if self.cc:
106 content += 'Cc: %s\n' % self.cc
107 content += 'Subject: %s\n\n' % subject.strip()
108 content += body
109
110 addresses = self.to.split(',')
111 if self.cc:
112 addresses += self.cc.split(',')
113 if self.bcc:
114 addresses += self.bcc.split(',')
115
116 for address in addresses:
117 smtp.sendmail(self.sender, address, to_bytes(content))
118
119 smtp.quit()
120
121 def subject_msg(self, multiline, failtype, linenr):
122 return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr])
123
124 def indent(self, multiline, indent=8):
125 return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE)
126
127 def body_blob(self, multiline, texttype):
128 ''' Turn some text output in a well-indented block for sending in a mail body '''
129 intro = 'with the following %s:\n\n' % texttype
130 blob = ''
131 for line in multiline.strip('\r\n').splitlines():
132 blob += '%s\n' % line
133 return intro + self.indent(blob) + '\n'
134
135 def mail_result(self, result, failtype):
136 host = result._host.get_name()
137 if not self.sender:
138 self.sender = '"Ansible: %s" <root>' % host
139
140 # Add subject
141 if self.itembody:
142 subject = self.itemsubject
143 elif result._result.get('failed_when_result') is True:
144 subject = "Failed due to 'failed_when' condition"
145 elif result._result.get('msg'):
146 subject = self.subject_msg(result._result['msg'], failtype, 0)
147 elif result._result.get('stderr'):
148 subject = self.subject_msg(result._result['stderr'], failtype, -1)
149 elif result._result.get('stdout'):
150 subject = self.subject_msg(result._result['stdout'], failtype, -1)
151 elif result._result.get('exception'): # Unrelated exceptions are added to output :-/
152 subject = self.subject_msg(result._result['exception'], failtype, -1)
153 else:
154 subject = '%s: %s' % (failtype, result._task.name or result._task.action)
155
156 # Make playbook name visible (e.g. in Outlook/Gmail condensed view)
157 body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name)
158 if result._task.name:
159 body += 'Task: %s\n' % result._task.name
160 body += 'Module: %s\n' % result._task.action
161 body += 'Host: %s\n' % host
162 body += '\n'
163
164 # Add task information (as much as possible)
165 body += 'The following task failed:\n\n'
166 if 'invocation' in result._result:
167 body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4)))
168 elif result._task.name:
169 body += self.indent('%s (%s)\n' % (result._task.name, result._task.action))
170 else:
171 body += self.indent('%s\n' % result._task.action)
172 body += '\n'
173
174 # Add item / message
175 if self.itembody:
176 body += self.itembody
177 elif result._result.get('failed_when_result') is True:
178 body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n'
179 elif result._result.get('msg'):
180 body += self.body_blob(result._result['msg'], 'message')
181
182 # Add stdout / stderr / exception / warnings / deprecations
183 if result._result.get('stdout'):
184 body += self.body_blob(result._result['stdout'], 'standard output')
185 if result._result.get('stderr'):
186 body += self.body_blob(result._result['stderr'], 'error output')
187 if result._result.get('exception'): # Unrelated exceptions are added to output :-/
188 body += self.body_blob(result._result['exception'], 'exception')
189 if result._result.get('warnings'):
190 for i in range(len(result._result.get('warnings'))):
191 body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1))
192 if result._result.get('deprecations'):
193 for i in range(len(result._result.get('deprecations'))):
194 body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1))
195
196 body += 'and a complete dump of the error:\n\n'
197 body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)))
198
199 self.mail(subject=subject, body=body)
200
201 def v2_playbook_on_start(self, playbook):
202 self.playbook = playbook
203 self.itembody = ''
204
205 def v2_runner_on_failed(self, result, ignore_errors=False):
206 if ignore_errors:
207 return
208
209 self.mail_result(result, 'Failed')
210
211 def v2_runner_on_unreachable(self, result):
212 self.mail_result(result, 'Unreachable')
213
214 def v2_runner_on_async_failed(self, result):
215 self.mail_result(result, 'Async failure')
216
217 def v2_runner_item_on_failed(self, result):
218 # Pass item information to task failure
219 self.itemsubject = result._result['msg']
220 self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result)
221
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py
--- a/plugins/callback/mail.py
+++ b/plugins/callback/mail.py
@@ -59,6 +59,7 @@
import json
import os
import re
+import email.utils
import smtplib
from ansible.module_utils.six import string_types
@@ -100,10 +101,12 @@
smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)
- content = 'From: %s\n' % self.sender
+ content = 'Date: %s\n' % email.utils.formatdate()
+ content += 'From: %s\n' % self.sender
content += 'To: %s\n' % self.to
if self.cc:
content += 'Cc: %s\n' % self.cc
+ content += 'Message-ID: %s\n' % email.utils.make_msgid()
content += 'Subject: %s\n\n' % subject.strip()
content += body
| {"golden_diff": "diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py\n--- a/plugins/callback/mail.py\n+++ b/plugins/callback/mail.py\n@@ -59,6 +59,7 @@\n import json\n import os\n import re\n+import email.utils\n import smtplib\n \n from ansible.module_utils.six import string_types\n@@ -100,10 +101,12 @@\n \n smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)\n \n- content = 'From: %s\\n' % self.sender\n+ content = 'Date: %s\\n' % email.utils.formatdate()\n+ content += 'From: %s\\n' % self.sender\n content += 'To: %s\\n' % self.to\n if self.cc:\n content += 'Cc: %s\\n' % self.cc\n+ content += 'Message-ID: %s\\n' % email.utils.make_msgid()\n content += 'Subject: %s\\n\\n' % subject.strip()\n content += body\n", "issue": "Emails sent by the `mail` callback pluging are missing the `Date` header.\n### Summary\n\nWhen the `mail` callback plugin successfully sends an email, the receiver cannot find it in his mailbox.\r\nThis is because mailboxes are usually sorted by origination date and this field is missing in the emails sent by this plugin.\r\n\r\nAccording to [RFC 5322, the `Date` header is mandatory](https://datatracker.ietf.org/doc/html/rfc5322#section-3.6.1) but the `mail` callback plugin sends emails without it.\r\n\r\n> The only required header fields are the origination date field and\r\n the originator address field(s).\n\n### Issue Type\n\nBug Report\n\n### Component Name\n\n`community.general.mail` callback plugin\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.12.1]\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = ['/home/lenaic/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python3.10/site-packages/ansible\r\n ansible collection location = /home/lenaic/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /bin/ansible\r\n python version = 3.10.1 (main, Dec 18 2021, 23:53:45) [GCC 11.1.0]\r\n jinja version = 3.0.3\r\n libyaml = True\r\n```\r\n\n\n### Community.general Version\n\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\n\r\n# /usr/lib/python3.10/site-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 4.3.0 \r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\nCACHE_PLUGIN(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = jsonfile\r\nCACHE_PLUGIN_CONNECTION(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = ~/.ansible/tmp/facts_cache\r\nCACHE_PLUGIN_TIMEOUT(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = 86400\r\nCALLBACKS_ENABLED(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = ['community.general.mail']\r\nDEFAULT_FORCE_HANDLERS(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = True\r\nDEFAULT_GATHERING(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = smart\r\nDEFAULT_HOST_LIST(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = ['/home/lenaic/doc/devel/perso/lenaic_config/inventory-libvirt.yaml']\r\nDEFAULT_VAULT_PASSWORD_FILE(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = /home/lenaic/doc/devel/perso/lenaic_config/.vault_password\r\nINVENTORY_ENABLED(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = ['community.libvirt.libvirt']\r\nRETRY_FILES_ENABLED(/home/lenaic/doc/devel/perso/lenaic_config/ansible-libvirt.cfg) = False\r\n```\r\n\n\n### OS / Environment\n\n```console\r\n$ cat /etc/os-release \r\nNAME=\"Arch Linux\"\r\nPRETTY_NAME=\"Arch Linux\"\r\nID=arch\r\nBUILD_ID=rolling\r\nANSI_COLOR=\"38;2;23;147;209\"\r\nHOME_URL=\"https://archlinux.org/\"\r\nDOCUMENTATION_URL=\"https://wiki.archlinux.org/\"\r\nSUPPORT_URL=\"https://bbs.archlinux.org/\"\r\nBUG_REPORT_URL=\"https://bugs.archlinux.org/\"\r\nLOGO=archlinux-logo\r\n```\n\n### Steps to Reproduce\n\nEnable the `community.general.mail` callback plugin thanks to the following configuration options\r\n\r\n```ini\r\n[defaults]\r\ncallbacks_enabled = community.general.mail\r\n\r\n[callback_mail]\r\nto = me <[email protected]>\r\nsender = Ansible <[email protected]>\r\n```\r\n\r\nAnd try to launch a playbook some tasks of which are failing.\n\n### Expected Results\n\nThe email containing the Ansible failures appears in the receiver mailbox at the top with the date at which the playbook execution finished.\n\n### Actual Results\n\nThe received email is invisible because it is sorted as if it had been received the 1st January 1970 00:00:00.\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2012, Dag Wieers <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nname: mail\ntype: notification\nshort_description: Sends failure events via email\ndescription:\n- This callback will report failures via email\nauthor:\n- Dag Wieers (@dagwieers)\nrequirements:\n- whitelisting in configuration\noptions:\n mta:\n description: Mail Transfer Agent, server that accepts SMTP\n env:\n - name: SMTPHOST\n ini:\n - section: callback_mail\n key: smtphost\n default: localhost\n mtaport:\n description: Mail Transfer Agent Port, port at which server SMTP\n ini:\n - section: callback_mail\n key: smtpport\n default: 25\n to:\n description: Mail recipient\n ini:\n - section: callback_mail\n key: to\n default: root\n sender:\n description: Mail sender\n ini:\n - section: callback_mail\n key: sender\n cc:\n description: CC'd recipient\n ini:\n - section: callback_mail\n key: cc\n bcc:\n description: BCC'd recipient\n ini:\n - section: callback_mail\n key: bcc\nnotes:\n- \"TODO: expand configuration options now that plugins can leverage Ansible's configuration\"\n'''\n\nimport json\nimport os\nimport re\nimport smtplib\n\nfrom ansible.module_utils.six import string_types\nfrom ansible.module_utils.common.text.converters import to_bytes\nfrom ansible.parsing.ajson import AnsibleJSONEncoder\nfrom ansible.plugins.callback import CallbackBase\n\n\nclass CallbackModule(CallbackBase):\n ''' This Ansible callback plugin mails errors to interested parties. '''\n CALLBACK_VERSION = 2.0\n CALLBACK_TYPE = 'notification'\n CALLBACK_NAME = 'community.general.mail'\n CALLBACK_NEEDS_WHITELIST = True\n\n def __init__(self, display=None):\n super(CallbackModule, self).__init__(display=display)\n self.sender = None\n self.to = 'root'\n self.smtphost = os.getenv('SMTPHOST', 'localhost')\n self.smtpport = 25\n self.cc = None\n self.bcc = None\n\n def set_options(self, task_keys=None, var_options=None, direct=None):\n\n super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)\n\n self.sender = self.get_option('sender')\n self.to = self.get_option('to')\n self.smtphost = self.get_option('mta')\n self.smtpport = int(self.get_option('mtaport'))\n self.cc = self.get_option('cc')\n self.bcc = self.get_option('bcc')\n\n def mail(self, subject='Ansible error mail', body=None):\n if body is None:\n body = subject\n\n smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)\n\n content = 'From: %s\\n' % self.sender\n content += 'To: %s\\n' % self.to\n if self.cc:\n content += 'Cc: %s\\n' % self.cc\n content += 'Subject: %s\\n\\n' % subject.strip()\n content += body\n\n addresses = self.to.split(',')\n if self.cc:\n addresses += self.cc.split(',')\n if self.bcc:\n addresses += self.bcc.split(',')\n\n for address in addresses:\n smtp.sendmail(self.sender, address, to_bytes(content))\n\n smtp.quit()\n\n def subject_msg(self, multiline, failtype, linenr):\n return '%s: %s' % (failtype, multiline.strip('\\r\\n').splitlines()[linenr])\n\n def indent(self, multiline, indent=8):\n return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE)\n\n def body_blob(self, multiline, texttype):\n ''' Turn some text output in a well-indented block for sending in a mail body '''\n intro = 'with the following %s:\\n\\n' % texttype\n blob = ''\n for line in multiline.strip('\\r\\n').splitlines():\n blob += '%s\\n' % line\n return intro + self.indent(blob) + '\\n'\n\n def mail_result(self, result, failtype):\n host = result._host.get_name()\n if not self.sender:\n self.sender = '\"Ansible: %s\" <root>' % host\n\n # Add subject\n if self.itembody:\n subject = self.itemsubject\n elif result._result.get('failed_when_result') is True:\n subject = \"Failed due to 'failed_when' condition\"\n elif result._result.get('msg'):\n subject = self.subject_msg(result._result['msg'], failtype, 0)\n elif result._result.get('stderr'):\n subject = self.subject_msg(result._result['stderr'], failtype, -1)\n elif result._result.get('stdout'):\n subject = self.subject_msg(result._result['stdout'], failtype, -1)\n elif result._result.get('exception'): # Unrelated exceptions are added to output :-/\n subject = self.subject_msg(result._result['exception'], failtype, -1)\n else:\n subject = '%s: %s' % (failtype, result._task.name or result._task.action)\n\n # Make playbook name visible (e.g. in Outlook/Gmail condensed view)\n body = 'Playbook: %s\\n' % os.path.basename(self.playbook._file_name)\n if result._task.name:\n body += 'Task: %s\\n' % result._task.name\n body += 'Module: %s\\n' % result._task.action\n body += 'Host: %s\\n' % host\n body += '\\n'\n\n # Add task information (as much as possible)\n body += 'The following task failed:\\n\\n'\n if 'invocation' in result._result:\n body += self.indent('%s: %s\\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4)))\n elif result._task.name:\n body += self.indent('%s (%s)\\n' % (result._task.name, result._task.action))\n else:\n body += self.indent('%s\\n' % result._task.action)\n body += '\\n'\n\n # Add item / message\n if self.itembody:\n body += self.itembody\n elif result._result.get('failed_when_result') is True:\n body += \"due to the following condition:\\n\\n\" + self.indent('failed_when:\\n- ' + '\\n- '.join(result._task.failed_when)) + '\\n\\n'\n elif result._result.get('msg'):\n body += self.body_blob(result._result['msg'], 'message')\n\n # Add stdout / stderr / exception / warnings / deprecations\n if result._result.get('stdout'):\n body += self.body_blob(result._result['stdout'], 'standard output')\n if result._result.get('stderr'):\n body += self.body_blob(result._result['stderr'], 'error output')\n if result._result.get('exception'): # Unrelated exceptions are added to output :-/\n body += self.body_blob(result._result['exception'], 'exception')\n if result._result.get('warnings'):\n for i in range(len(result._result.get('warnings'))):\n body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1))\n if result._result.get('deprecations'):\n for i in range(len(result._result.get('deprecations'))):\n body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1))\n\n body += 'and a complete dump of the error:\\n\\n'\n body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)))\n\n self.mail(subject=subject, body=body)\n\n def v2_playbook_on_start(self, playbook):\n self.playbook = playbook\n self.itembody = ''\n\n def v2_runner_on_failed(self, result, ignore_errors=False):\n if ignore_errors:\n return\n\n self.mail_result(result, 'Failed')\n\n def v2_runner_on_unreachable(self, result):\n self.mail_result(result, 'Unreachable')\n\n def v2_runner_on_async_failed(self, result):\n self.mail_result(result, 'Async failure')\n\n def v2_runner_item_on_failed(self, result):\n # Pass item information to task failure\n self.itemsubject = result._result['msg']\n self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), \"failed item dump '%(item)s'\" % result._result)\n", "path": "plugins/callback/mail.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2012, Dag Wieers <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nname: mail\ntype: notification\nshort_description: Sends failure events via email\ndescription:\n- This callback will report failures via email\nauthor:\n- Dag Wieers (@dagwieers)\nrequirements:\n- whitelisting in configuration\noptions:\n mta:\n description: Mail Transfer Agent, server that accepts SMTP\n env:\n - name: SMTPHOST\n ini:\n - section: callback_mail\n key: smtphost\n default: localhost\n mtaport:\n description: Mail Transfer Agent Port, port at which server SMTP\n ini:\n - section: callback_mail\n key: smtpport\n default: 25\n to:\n description: Mail recipient\n ini:\n - section: callback_mail\n key: to\n default: root\n sender:\n description: Mail sender\n ini:\n - section: callback_mail\n key: sender\n cc:\n description: CC'd recipient\n ini:\n - section: callback_mail\n key: cc\n bcc:\n description: BCC'd recipient\n ini:\n - section: callback_mail\n key: bcc\nnotes:\n- \"TODO: expand configuration options now that plugins can leverage Ansible's configuration\"\n'''\n\nimport json\nimport os\nimport re\nimport email.utils\nimport smtplib\n\nfrom ansible.module_utils.six import string_types\nfrom ansible.module_utils.common.text.converters import to_bytes\nfrom ansible.parsing.ajson import AnsibleJSONEncoder\nfrom ansible.plugins.callback import CallbackBase\n\n\nclass CallbackModule(CallbackBase):\n ''' This Ansible callback plugin mails errors to interested parties. '''\n CALLBACK_VERSION = 2.0\n CALLBACK_TYPE = 'notification'\n CALLBACK_NAME = 'community.general.mail'\n CALLBACK_NEEDS_WHITELIST = True\n\n def __init__(self, display=None):\n super(CallbackModule, self).__init__(display=display)\n self.sender = None\n self.to = 'root'\n self.smtphost = os.getenv('SMTPHOST', 'localhost')\n self.smtpport = 25\n self.cc = None\n self.bcc = None\n\n def set_options(self, task_keys=None, var_options=None, direct=None):\n\n super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)\n\n self.sender = self.get_option('sender')\n self.to = self.get_option('to')\n self.smtphost = self.get_option('mta')\n self.smtpport = int(self.get_option('mtaport'))\n self.cc = self.get_option('cc')\n self.bcc = self.get_option('bcc')\n\n def mail(self, subject='Ansible error mail', body=None):\n if body is None:\n body = subject\n\n smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)\n\n content = 'Date: %s\\n' % email.utils.formatdate()\n content += 'From: %s\\n' % self.sender\n content += 'To: %s\\n' % self.to\n if self.cc:\n content += 'Cc: %s\\n' % self.cc\n content += 'Message-ID: %s\\n' % email.utils.make_msgid()\n content += 'Subject: %s\\n\\n' % subject.strip()\n content += body\n\n addresses = self.to.split(',')\n if self.cc:\n addresses += self.cc.split(',')\n if self.bcc:\n addresses += self.bcc.split(',')\n\n for address in addresses:\n smtp.sendmail(self.sender, address, to_bytes(content))\n\n smtp.quit()\n\n def subject_msg(self, multiline, failtype, linenr):\n return '%s: %s' % (failtype, multiline.strip('\\r\\n').splitlines()[linenr])\n\n def indent(self, multiline, indent=8):\n return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE)\n\n def body_blob(self, multiline, texttype):\n ''' Turn some text output in a well-indented block for sending in a mail body '''\n intro = 'with the following %s:\\n\\n' % texttype\n blob = ''\n for line in multiline.strip('\\r\\n').splitlines():\n blob += '%s\\n' % line\n return intro + self.indent(blob) + '\\n'\n\n def mail_result(self, result, failtype):\n host = result._host.get_name()\n if not self.sender:\n self.sender = '\"Ansible: %s\" <root>' % host\n\n # Add subject\n if self.itembody:\n subject = self.itemsubject\n elif result._result.get('failed_when_result') is True:\n subject = \"Failed due to 'failed_when' condition\"\n elif result._result.get('msg'):\n subject = self.subject_msg(result._result['msg'], failtype, 0)\n elif result._result.get('stderr'):\n subject = self.subject_msg(result._result['stderr'], failtype, -1)\n elif result._result.get('stdout'):\n subject = self.subject_msg(result._result['stdout'], failtype, -1)\n elif result._result.get('exception'): # Unrelated exceptions are added to output :-/\n subject = self.subject_msg(result._result['exception'], failtype, -1)\n else:\n subject = '%s: %s' % (failtype, result._task.name or result._task.action)\n\n # Make playbook name visible (e.g. in Outlook/Gmail condensed view)\n body = 'Playbook: %s\\n' % os.path.basename(self.playbook._file_name)\n if result._task.name:\n body += 'Task: %s\\n' % result._task.name\n body += 'Module: %s\\n' % result._task.action\n body += 'Host: %s\\n' % host\n body += '\\n'\n\n # Add task information (as much as possible)\n body += 'The following task failed:\\n\\n'\n if 'invocation' in result._result:\n body += self.indent('%s: %s\\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4)))\n elif result._task.name:\n body += self.indent('%s (%s)\\n' % (result._task.name, result._task.action))\n else:\n body += self.indent('%s\\n' % result._task.action)\n body += '\\n'\n\n # Add item / message\n if self.itembody:\n body += self.itembody\n elif result._result.get('failed_when_result') is True:\n body += \"due to the following condition:\\n\\n\" + self.indent('failed_when:\\n- ' + '\\n- '.join(result._task.failed_when)) + '\\n\\n'\n elif result._result.get('msg'):\n body += self.body_blob(result._result['msg'], 'message')\n\n # Add stdout / stderr / exception / warnings / deprecations\n if result._result.get('stdout'):\n body += self.body_blob(result._result['stdout'], 'standard output')\n if result._result.get('stderr'):\n body += self.body_blob(result._result['stderr'], 'error output')\n if result._result.get('exception'): # Unrelated exceptions are added to output :-/\n body += self.body_blob(result._result['exception'], 'exception')\n if result._result.get('warnings'):\n for i in range(len(result._result.get('warnings'))):\n body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1))\n if result._result.get('deprecations'):\n for i in range(len(result._result.get('deprecations'))):\n body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1))\n\n body += 'and a complete dump of the error:\\n\\n'\n body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)))\n\n self.mail(subject=subject, body=body)\n\n def v2_playbook_on_start(self, playbook):\n self.playbook = playbook\n self.itembody = ''\n\n def v2_runner_on_failed(self, result, ignore_errors=False):\n if ignore_errors:\n return\n\n self.mail_result(result, 'Failed')\n\n def v2_runner_on_unreachable(self, result):\n self.mail_result(result, 'Unreachable')\n\n def v2_runner_on_async_failed(self, result):\n self.mail_result(result, 'Async failure')\n\n def v2_runner_item_on_failed(self, result):\n # Pass item information to task failure\n self.itemsubject = result._result['msg']\n self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), \"failed item dump '%(item)s'\" % result._result)\n", "path": "plugins/callback/mail.py"}]} | 3,832 | 223 |
gh_patches_debug_1786 | rasdani/github-patches | git_diff | mozilla__telemetry-analysis-service-413 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImportError: No module named 'atmo.clusters.jobs'
```
app@a898b116953a:~$ ./manage.py update_clusters
Traceback (most recent call last):
File "./manage.py", line 11, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py", line 353, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py", line 345, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py", line 195, in fetch_command
klass = load_command_class(app_name, subcommand)
File "/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py", line 39, in load_command_class
module = import_module('%s.management.commands.%s' % (app_name, name))
File "/usr/local/lib/python3.5/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 986, in _gcd_import
File "<frozen importlib._bootstrap>", line 969, in _find_and_load
File "<frozen importlib._bootstrap>", line 958, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 673, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 673, in exec_module
File "<frozen importlib._bootstrap>", line 222, in _call_with_frames_removed
File "/app/atmo/clusters/management/commands/update_clusters.py", line 6, in <module>
from ...jobs import update_clusters
ImportError: No module named 'atmo.clusters.jobs'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `atmo/clusters/management/commands/update_clusters.py`
Content:
```
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, you can obtain one at http://mozilla.org/MPL/2.0/.
4 from django.core.management.base import BaseCommand
5
6 from ...jobs import update_clusters
7
8
9 class Command(BaseCommand):
10 help = 'Go through active clusters and update their status'
11
12 def handle(self, *args, **options):
13 self.stdout.write('Updating cluster info...', ending='')
14 update_clusters()
15 self.stdout.write('done.')
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/atmo/clusters/management/commands/update_clusters.py b/atmo/clusters/management/commands/update_clusters.py
--- a/atmo/clusters/management/commands/update_clusters.py
+++ b/atmo/clusters/management/commands/update_clusters.py
@@ -3,7 +3,7 @@
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
from django.core.management.base import BaseCommand
-from ...jobs import update_clusters
+from ...tasks import update_clusters
class Command(BaseCommand):
| {"golden_diff": "diff --git a/atmo/clusters/management/commands/update_clusters.py b/atmo/clusters/management/commands/update_clusters.py\n--- a/atmo/clusters/management/commands/update_clusters.py\n+++ b/atmo/clusters/management/commands/update_clusters.py\n@@ -3,7 +3,7 @@\n # file, you can obtain one at http://mozilla.org/MPL/2.0/.\n from django.core.management.base import BaseCommand\n \n-from ...jobs import update_clusters\n+from ...tasks import update_clusters\n \n \n class Command(BaseCommand):\n", "issue": "ImportError: No module named 'atmo.clusters.jobs'\n```\r\napp@a898b116953a:~$ ./manage.py update_clusters\r\nTraceback (most recent call last):\r\n File \"./manage.py\", line 11, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py\", line 353, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py\", line 345, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py\", line 195, in fetch_command\r\n klass = load_command_class(app_name, subcommand)\r\n File \"/usr/local/lib/python3.5/site-packages/django/core/management/__init__.py\", line 39, in load_command_class\r\n module = import_module('%s.management.commands.%s' % (app_name, name))\r\n File \"/usr/local/lib/python3.5/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 986, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 969, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 958, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 673, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 673, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 222, in _call_with_frames_removed\r\n File \"/app/atmo/clusters/management/commands/update_clusters.py\", line 6, in <module>\r\n from ...jobs import update_clusters\r\nImportError: No module named 'atmo.clusters.jobs'\r\n```\n", "before_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django.core.management.base import BaseCommand\n\nfrom ...jobs import update_clusters\n\n\nclass Command(BaseCommand):\n help = 'Go through active clusters and update their status'\n\n def handle(self, *args, **options):\n self.stdout.write('Updating cluster info...', ending='')\n update_clusters()\n self.stdout.write('done.')\n", "path": "atmo/clusters/management/commands/update_clusters.py"}], "after_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django.core.management.base import BaseCommand\n\nfrom ...tasks import update_clusters\n\n\nclass Command(BaseCommand):\n help = 'Go through active clusters and update their status'\n\n def handle(self, *args, **options):\n self.stdout.write('Updating cluster info...', ending='')\n update_clusters()\n self.stdout.write('done.')\n", "path": "atmo/clusters/management/commands/update_clusters.py"}]} | 880 | 114 |
gh_patches_debug_1821 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1713 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG - hooks not working on windows 10, when user account name contains non-ascii characters
When user account name contains non-ascii characters such as 'š', such that python executable ends up for example in C:\Users\john.š\\.cache\pre-commit\repo\py_env-python3.8\Scripts\python.exe, when committing to the git repository following message appears:
An unexpected error has occurred: AssertionError: BUG: expected environment for python to be healthy() immediately after install, please open an issue describing your environment.
PS: fucntion os.path.isfile() in parse_shebang.normexe() returns False, even though the executable exists there and is a file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/python.py`
Content:
```
1 import contextlib
2 import functools
3 import os
4 import sys
5 from typing import Dict
6 from typing import Generator
7 from typing import Optional
8 from typing import Sequence
9 from typing import Tuple
10
11 import pre_commit.constants as C
12 from pre_commit.envcontext import envcontext
13 from pre_commit.envcontext import PatchesT
14 from pre_commit.envcontext import UNSET
15 from pre_commit.envcontext import Var
16 from pre_commit.hook import Hook
17 from pre_commit.languages import helpers
18 from pre_commit.parse_shebang import find_executable
19 from pre_commit.prefix import Prefix
20 from pre_commit.util import CalledProcessError
21 from pre_commit.util import clean_path_on_failure
22 from pre_commit.util import cmd_output
23 from pre_commit.util import cmd_output_b
24
25 ENVIRONMENT_DIR = 'py_env'
26
27
28 @functools.lru_cache(maxsize=None)
29 def _version_info(exe: str) -> str:
30 prog = 'import sys;print(".".join(str(p) for p in sys.version_info))'
31 try:
32 return cmd_output(exe, '-S', '-c', prog)[1].strip()
33 except CalledProcessError:
34 return f'<<error retrieving version from {exe}>>'
35
36
37 def _read_pyvenv_cfg(filename: str) -> Dict[str, str]:
38 ret = {}
39 with open(filename) as f:
40 for line in f:
41 try:
42 k, v = line.split('=')
43 except ValueError: # blank line / comment / etc.
44 continue
45 else:
46 ret[k.strip()] = v.strip()
47 return ret
48
49
50 def bin_dir(venv: str) -> str:
51 """On windows there's a different directory for the virtualenv"""
52 bin_part = 'Scripts' if os.name == 'nt' else 'bin'
53 return os.path.join(venv, bin_part)
54
55
56 def get_env_patch(venv: str) -> PatchesT:
57 return (
58 ('PIP_DISABLE_PIP_VERSION_CHECK', '1'),
59 ('PYTHONHOME', UNSET),
60 ('VIRTUAL_ENV', venv),
61 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),
62 )
63
64
65 def _find_by_py_launcher(
66 version: str,
67 ) -> Optional[str]: # pragma: no cover (windows only)
68 if version.startswith('python'):
69 num = version[len('python'):]
70 cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')
71 env = dict(os.environ, PYTHONIOENCODING='UTF-8')
72 try:
73 return cmd_output(*cmd, env=env)[1].strip()
74 except CalledProcessError:
75 pass
76 return None
77
78
79 def _find_by_sys_executable() -> Optional[str]:
80 def _norm(path: str) -> Optional[str]:
81 _, exe = os.path.split(path.lower())
82 exe, _, _ = exe.partition('.exe')
83 if exe not in {'python', 'pythonw'} and find_executable(exe):
84 return exe
85 return None
86
87 # On linux, I see these common sys.executables:
88 #
89 # system `python`: /usr/bin/python -> python2.7
90 # system `python2`: /usr/bin/python2 -> python2.7
91 # virtualenv v: v/bin/python (will not return from this loop)
92 # virtualenv v -ppython2: v/bin/python -> python2
93 # virtualenv v -ppython2.7: v/bin/python -> python2.7
94 # virtualenv v -ppypy: v/bin/python -> v/bin/pypy
95 for path in (sys.executable, os.path.realpath(sys.executable)):
96 exe = _norm(path)
97 if exe:
98 return exe
99 return None
100
101
102 @functools.lru_cache(maxsize=1)
103 def get_default_version() -> str: # pragma: no cover (platform dependent)
104 # First attempt from `sys.executable` (or the realpath)
105 exe = _find_by_sys_executable()
106 if exe:
107 return exe
108
109 # Next try the `pythonX.X` executable
110 exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'
111 if find_executable(exe):
112 return exe
113
114 if _find_by_py_launcher(exe):
115 return exe
116
117 # We tried!
118 return C.DEFAULT
119
120
121 def _sys_executable_matches(version: str) -> bool:
122 if version == 'python':
123 return True
124 elif not version.startswith('python'):
125 return False
126
127 try:
128 info = tuple(int(p) for p in version[len('python'):].split('.'))
129 except ValueError:
130 return False
131
132 return sys.version_info[:len(info)] == info
133
134
135 def norm_version(version: str) -> Optional[str]:
136 if version == C.DEFAULT: # use virtualenv's default
137 return None
138 elif _sys_executable_matches(version): # virtualenv defaults to our exe
139 return None
140
141 if os.name == 'nt': # pragma: no cover (windows)
142 version_exec = _find_by_py_launcher(version)
143 if version_exec:
144 return version_exec
145
146 # Try looking up by name
147 version_exec = find_executable(version)
148 if version_exec and version_exec != version:
149 return version_exec
150
151 # Otherwise assume it is a path
152 return os.path.expanduser(version)
153
154
155 @contextlib.contextmanager
156 def in_env(
157 prefix: Prefix,
158 language_version: str,
159 ) -> Generator[None, None, None]:
160 directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)
161 envdir = prefix.path(directory)
162 with envcontext(get_env_patch(envdir)):
163 yield
164
165
166 def healthy(prefix: Prefix, language_version: str) -> bool:
167 directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)
168 envdir = prefix.path(directory)
169 pyvenv_cfg = os.path.join(envdir, 'pyvenv.cfg')
170
171 # created with "old" virtualenv
172 if not os.path.exists(pyvenv_cfg):
173 return False
174
175 exe_name = 'python.exe' if sys.platform == 'win32' else 'python'
176 py_exe = prefix.path(bin_dir(envdir), exe_name)
177 cfg = _read_pyvenv_cfg(pyvenv_cfg)
178
179 return (
180 'version_info' in cfg and
181 # always use uncached lookup here in case we replaced an unhealthy env
182 _version_info.__wrapped__(py_exe) == cfg['version_info'] and (
183 'base-executable' not in cfg or
184 _version_info(cfg['base-executable']) == cfg['version_info']
185 )
186 )
187
188
189 def install_environment(
190 prefix: Prefix,
191 version: str,
192 additional_dependencies: Sequence[str],
193 ) -> None:
194 envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))
195 venv_cmd = [sys.executable, '-mvirtualenv', envdir]
196 python = norm_version(version)
197 if python is not None:
198 venv_cmd.extend(('-p', python))
199 install_cmd = ('python', '-mpip', 'install', '.', *additional_dependencies)
200
201 with clean_path_on_failure(envdir):
202 cmd_output_b(*venv_cmd, cwd='/')
203 with in_env(prefix, version):
204 helpers.run_setup_cmd(prefix, install_cmd)
205
206
207 def run_hook(
208 hook: Hook,
209 file_args: Sequence[str],
210 color: bool,
211 ) -> Tuple[int, bytes]:
212 with in_env(hook.prefix, hook.language_version):
213 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -36,7 +36,7 @@
def _read_pyvenv_cfg(filename: str) -> Dict[str, str]:
ret = {}
- with open(filename) as f:
+ with open(filename, encoding='UTF-8') as f:
for line in f:
try:
k, v = line.split('=')
| {"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -36,7 +36,7 @@\n \n def _read_pyvenv_cfg(filename: str) -> Dict[str, str]:\n ret = {}\n- with open(filename) as f:\n+ with open(filename, encoding='UTF-8') as f:\n for line in f:\n try:\n k, v = line.split('=')\n", "issue": "BUG - hooks not working on windows 10, when user account name contains non-ascii characters\nWhen user account name contains non-ascii characters such as '\u0161', such that python executable ends up for example in C:\\Users\\john.\u0161\\\\.cache\\pre-commit\\repo\\py_env-python3.8\\Scripts\\python.exe, when committing to the git repository following message appears:\r\n\r\nAn unexpected error has occurred: AssertionError: BUG: expected environment for python to be healthy() immediately after install, please open an issue describing your environment.\r\n\r\nPS: fucntion os.path.isfile() in parse_shebang.normexe() returns False, even though the executable exists there and is a file.\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'py_env'\n\n\[email protected]_cache(maxsize=None)\ndef _version_info(exe: str) -> str:\n prog = 'import sys;print(\".\".join(str(p) for p in sys.version_info))'\n try:\n return cmd_output(exe, '-S', '-c', prog)[1].strip()\n except CalledProcessError:\n return f'<<error retrieving version from {exe}>>'\n\n\ndef _read_pyvenv_cfg(filename: str) -> Dict[str, str]:\n ret = {}\n with open(filename) as f:\n for line in f:\n try:\n k, v = line.split('=')\n except ValueError: # blank line / comment / etc.\n continue\n else:\n ret[k.strip()] = v.strip()\n return ret\n\n\ndef bin_dir(venv: str) -> str:\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PIP_DISABLE_PIP_VERSION_CHECK', '1'),\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(\n version: str,\n) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n env = dict(os.environ, PYTHONIOENCODING='UTF-8')\n try:\n return cmd_output(*cmd, env=env)[1].strip()\n except CalledProcessError:\n pass\n return None\n\n\ndef _find_by_sys_executable() -> Optional[str]:\n def _norm(path: str) -> Optional[str]:\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if exe not in {'python', 'pythonw'} and find_executable(exe):\n return exe\n return None\n\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in (sys.executable, os.path.realpath(sys.executable)):\n exe = _norm(path)\n if exe:\n return exe\n return None\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str: # pragma: no cover (platform dependent)\n # First attempt from `sys.executable` (or the realpath)\n exe = _find_by_sys_executable()\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef _sys_executable_matches(version: str) -> bool:\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version: str) -> Optional[str]:\n if version == C.DEFAULT: # use virtualenv's default\n return None\n elif _sys_executable_matches(version): # virtualenv defaults to our exe\n return None\n\n if os.name == 'nt': # pragma: no cover (windows)\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef healthy(prefix: Prefix, language_version: str) -> bool:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n pyvenv_cfg = os.path.join(envdir, 'pyvenv.cfg')\n\n # created with \"old\" virtualenv\n if not os.path.exists(pyvenv_cfg):\n return False\n\n exe_name = 'python.exe' if sys.platform == 'win32' else 'python'\n py_exe = prefix.path(bin_dir(envdir), exe_name)\n cfg = _read_pyvenv_cfg(pyvenv_cfg)\n\n return (\n 'version_info' in cfg and\n # always use uncached lookup here in case we replaced an unhealthy env\n _version_info.__wrapped__(py_exe) == cfg['version_info'] and (\n 'base-executable' not in cfg or\n _version_info(cfg['base-executable']) == cfg['version_info']\n )\n )\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))\n venv_cmd = [sys.executable, '-mvirtualenv', envdir]\n python = norm_version(version)\n if python is not None:\n venv_cmd.extend(('-p', python))\n install_cmd = ('python', '-mpip', 'install', '.', *additional_dependencies)\n\n with clean_path_on_failure(envdir):\n cmd_output_b(*venv_cmd, cwd='/')\n with in_env(prefix, version):\n helpers.run_setup_cmd(prefix, install_cmd)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/python.py"}], "after_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'py_env'\n\n\[email protected]_cache(maxsize=None)\ndef _version_info(exe: str) -> str:\n prog = 'import sys;print(\".\".join(str(p) for p in sys.version_info))'\n try:\n return cmd_output(exe, '-S', '-c', prog)[1].strip()\n except CalledProcessError:\n return f'<<error retrieving version from {exe}>>'\n\n\ndef _read_pyvenv_cfg(filename: str) -> Dict[str, str]:\n ret = {}\n with open(filename, encoding='UTF-8') as f:\n for line in f:\n try:\n k, v = line.split('=')\n except ValueError: # blank line / comment / etc.\n continue\n else:\n ret[k.strip()] = v.strip()\n return ret\n\n\ndef bin_dir(venv: str) -> str:\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PIP_DISABLE_PIP_VERSION_CHECK', '1'),\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(\n version: str,\n) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n env = dict(os.environ, PYTHONIOENCODING='UTF-8')\n try:\n return cmd_output(*cmd, env=env)[1].strip()\n except CalledProcessError:\n pass\n return None\n\n\ndef _find_by_sys_executable() -> Optional[str]:\n def _norm(path: str) -> Optional[str]:\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if exe not in {'python', 'pythonw'} and find_executable(exe):\n return exe\n return None\n\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in (sys.executable, os.path.realpath(sys.executable)):\n exe = _norm(path)\n if exe:\n return exe\n return None\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str: # pragma: no cover (platform dependent)\n # First attempt from `sys.executable` (or the realpath)\n exe = _find_by_sys_executable()\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef _sys_executable_matches(version: str) -> bool:\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version: str) -> Optional[str]:\n if version == C.DEFAULT: # use virtualenv's default\n return None\n elif _sys_executable_matches(version): # virtualenv defaults to our exe\n return None\n\n if os.name == 'nt': # pragma: no cover (windows)\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef healthy(prefix: Prefix, language_version: str) -> bool:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, language_version)\n envdir = prefix.path(directory)\n pyvenv_cfg = os.path.join(envdir, 'pyvenv.cfg')\n\n # created with \"old\" virtualenv\n if not os.path.exists(pyvenv_cfg):\n return False\n\n exe_name = 'python.exe' if sys.platform == 'win32' else 'python'\n py_exe = prefix.path(bin_dir(envdir), exe_name)\n cfg = _read_pyvenv_cfg(pyvenv_cfg)\n\n return (\n 'version_info' in cfg and\n # always use uncached lookup here in case we replaced an unhealthy env\n _version_info.__wrapped__(py_exe) == cfg['version_info'] and (\n 'base-executable' not in cfg or\n _version_info(cfg['base-executable']) == cfg['version_info']\n )\n )\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))\n venv_cmd = [sys.executable, '-mvirtualenv', envdir]\n python = norm_version(version)\n if python is not None:\n venv_cmd.extend(('-p', python))\n install_cmd = ('python', '-mpip', 'install', '.', *additional_dependencies)\n\n with clean_path_on_failure(envdir):\n cmd_output_b(*venv_cmd, cwd='/')\n with in_env(prefix, version):\n helpers.run_setup_cmd(prefix, install_cmd)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/python.py"}]} | 2,592 | 112 |
gh_patches_debug_39196 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-3705 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
testing 4295: 402 error for poll export
**URL:** https://meinberlin-dev.liqd.net/dashboard/modules/umfrage-24/poll/export/
**user:** initiator, moderator, group member
**expected behaviour:** download export
**behaviour:** 403 error
**important screensize:**
**device & browser:** big sur, firefox
**Comment/Question:**
Screenshot?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/polls/exports.py`
Content:
```
1 from django.utils.translation import ugettext as _
2 from rules.contrib.views import PermissionRequiredMixin
3
4 from adhocracy4.comments.models import Comment
5 from adhocracy4.exports import mixins
6 from adhocracy4.exports import views as export_views
7 from adhocracy4.polls import models as poll_models
8 from meinberlin.apps.users.models import User
9
10
11 class PollCommentExportView(
12 PermissionRequiredMixin,
13 mixins.ExportModelFieldsMixin,
14 mixins.UserGeneratedContentExportMixin,
15 mixins.ItemExportWithLinkMixin,
16 mixins.CommentExportWithRepliesToMixin,
17 export_views.BaseItemExportView
18 ):
19
20 model = Comment
21
22 fields = ['id', 'comment', 'created']
23 permission_required = 'a4projects.change_project'
24
25 def get_permission_object(self):
26 return self.module.project
27
28 def get_queryset(self):
29 comments = (
30 Comment.objects.filter(poll__module=self.module) |
31 Comment.objects.filter(parent_comment__poll__module=self.module)
32 )
33 return comments
34
35 def get_virtual_fields(self, virtual):
36 virtual.setdefault('id', _('ID'))
37 virtual.setdefault('comment', _('Comment'))
38 virtual.setdefault('created', _('Created'))
39 return super().get_virtual_fields(virtual)
40
41 @property
42 def raise_exception(self):
43 return self.request.user.is_authenticated
44
45
46 class PollExportView(
47 PermissionRequiredMixin,
48 export_views.BaseItemExportView
49 ):
50
51 permission_required = 'a4projects.change_project'
52
53 def get_queryset(self):
54 creators_vote = poll_models.Vote.objects.filter(
55 choice__question__poll=self.poll).values_list('creator', flat=True)
56 creators_answer = poll_models.Answer.objects.filter(
57 question__poll=self.poll).values_list('creator', flat=True)
58 creator_ids = list(set(creators_vote).union(set(creators_answer)))
59 return User.objects.filter(pk__in=creator_ids)
60
61 @property
62 def poll(self):
63 return poll_models.Poll.objects.get(module=self.module)
64
65 @property
66 def single_choice_questions(self):
67 return self.poll.questions.filter(
68 multiple_choice=False,
69 is_open=False).order_by('id')
70
71 @property
72 def multiple_choice_questions(self):
73 return self.poll.questions.filter(multiple_choice=True).order_by('id')
74
75 @property
76 def open_questions(self):
77 return self.poll.questions.filter(is_open=True).order_by('id')
78
79 def get_virtual_fields(self, virtual):
80 virtual = super().get_virtual_fields(virtual)
81 virtual = self.get_virtual_fields_choice_questions(
82 virtual, self.single_choice_questions)
83 virtual = self.get_virtual_fields_choice_questions(
84 virtual, self.multiple_choice_questions)
85 virtual = self.get_virtual_fields_open_questions(
86 virtual, self.open_questions)
87
88 return virtual
89
90 def get_virtual_fields_choice_questions(self, virtual, choice_questions):
91 for question in choice_questions.all():
92 for choice in question.choices.all():
93 identifier = 'Q' + str(question.pk) + '_A' + str(choice.pk)
94 virtual[(choice, False)] = identifier
95 if choice.is_other_choice:
96 identifier_answer = identifier + '_text'
97 virtual[(choice, True)] = identifier_answer
98
99 return virtual
100
101 def get_virtual_fields_open_questions(self, virtual, open_questions):
102 for question in open_questions.all():
103 identifier = 'Q' + str(question.pk)
104 virtual[(question, False)] = identifier
105 identifier_answer = identifier + '_text'
106 virtual[(question, True)] = identifier_answer
107
108 return virtual
109
110 def get_field_data(self, user, field):
111 field_object, is_text_field = field
112
113 if type(field_object) == poll_models.Choice:
114 votes_qs = poll_models.Vote.objects.filter(
115 choice=field_object,
116 creator=user)
117 if not is_text_field:
118 value = int(votes_qs.exists())
119 else:
120 vote = votes_qs.first()
121 if vote:
122 value = poll_models.OtherVote.objects.get(vote=vote).answer
123 else:
124 value = ''
125 else: # field_object is question
126 answers_qs = poll_models.Answer.objects.filter(
127 question=field_object,
128 creator=user)
129 if not is_text_field:
130 value = int(answers_qs.exists())
131 else:
132 answer = answers_qs.first()
133 if answer:
134 value = answer.answer
135 else:
136 value = ''
137
138 return value
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/polls/exports.py b/meinberlin/apps/polls/exports.py
--- a/meinberlin/apps/polls/exports.py
+++ b/meinberlin/apps/polls/exports.py
@@ -50,6 +50,9 @@
permission_required = 'a4projects.change_project'
+ def get_permission_object(self):
+ return self.module.project
+
def get_queryset(self):
creators_vote = poll_models.Vote.objects.filter(
choice__question__poll=self.poll).values_list('creator', flat=True)
@@ -63,47 +66,37 @@
return poll_models.Poll.objects.get(module=self.module)
@property
- def single_choice_questions(self):
- return self.poll.questions.filter(
- multiple_choice=False,
- is_open=False).order_by('id')
-
- @property
- def multiple_choice_questions(self):
- return self.poll.questions.filter(multiple_choice=True).order_by('id')
-
- @property
- def open_questions(self):
- return self.poll.questions.filter(is_open=True).order_by('id')
+ def questions(self):
+ return self.poll.questions.all()
def get_virtual_fields(self, virtual):
virtual = super().get_virtual_fields(virtual)
- virtual = self.get_virtual_fields_choice_questions(
- virtual, self.single_choice_questions)
- virtual = self.get_virtual_fields_choice_questions(
- virtual, self.multiple_choice_questions)
- virtual = self.get_virtual_fields_open_questions(
- virtual, self.open_questions)
+
+ for question in self.questions:
+ if question.is_open:
+ virtual = \
+ self.get_virtual_field_open_question(virtual, question)
+ else:
+ virtual = \
+ self.get_virtual_field_choice_question(virtual, question)
return virtual
- def get_virtual_fields_choice_questions(self, virtual, choice_questions):
- for question in choice_questions.all():
- for choice in question.choices.all():
- identifier = 'Q' + str(question.pk) + '_A' + str(choice.pk)
- virtual[(choice, False)] = identifier
- if choice.is_other_choice:
- identifier_answer = identifier + '_text'
- virtual[(choice, True)] = identifier_answer
+ def get_virtual_field_choice_question(self, virtual, choice_question):
+ for choice in choice_question.choices.all():
+ identifier = 'Q' + str(choice_question.pk) + '_A' + str(choice.pk)
+ virtual[(choice, False)] = identifier
+ if choice.is_other_choice:
+ identifier_answer = identifier + '_text'
+ virtual[(choice, True)] = identifier_answer
return virtual
- def get_virtual_fields_open_questions(self, virtual, open_questions):
- for question in open_questions.all():
- identifier = 'Q' + str(question.pk)
- virtual[(question, False)] = identifier
- identifier_answer = identifier + '_text'
- virtual[(question, True)] = identifier_answer
+ def get_virtual_field_open_question(self, virtual, open_question):
+ identifier = 'Q' + str(open_question.pk)
+ virtual[(open_question, False)] = identifier
+ identifier_answer = identifier + '_text'
+ virtual[(open_question, True)] = identifier_answer
return virtual
| {"golden_diff": "diff --git a/meinberlin/apps/polls/exports.py b/meinberlin/apps/polls/exports.py\n--- a/meinberlin/apps/polls/exports.py\n+++ b/meinberlin/apps/polls/exports.py\n@@ -50,6 +50,9 @@\n \n permission_required = 'a4projects.change_project'\n \n+ def get_permission_object(self):\n+ return self.module.project\n+\n def get_queryset(self):\n creators_vote = poll_models.Vote.objects.filter(\n choice__question__poll=self.poll).values_list('creator', flat=True)\n@@ -63,47 +66,37 @@\n return poll_models.Poll.objects.get(module=self.module)\n \n @property\n- def single_choice_questions(self):\n- return self.poll.questions.filter(\n- multiple_choice=False,\n- is_open=False).order_by('id')\n-\n- @property\n- def multiple_choice_questions(self):\n- return self.poll.questions.filter(multiple_choice=True).order_by('id')\n-\n- @property\n- def open_questions(self):\n- return self.poll.questions.filter(is_open=True).order_by('id')\n+ def questions(self):\n+ return self.poll.questions.all()\n \n def get_virtual_fields(self, virtual):\n virtual = super().get_virtual_fields(virtual)\n- virtual = self.get_virtual_fields_choice_questions(\n- virtual, self.single_choice_questions)\n- virtual = self.get_virtual_fields_choice_questions(\n- virtual, self.multiple_choice_questions)\n- virtual = self.get_virtual_fields_open_questions(\n- virtual, self.open_questions)\n+\n+ for question in self.questions:\n+ if question.is_open:\n+ virtual = \\\n+ self.get_virtual_field_open_question(virtual, question)\n+ else:\n+ virtual = \\\n+ self.get_virtual_field_choice_question(virtual, question)\n \n return virtual\n \n- def get_virtual_fields_choice_questions(self, virtual, choice_questions):\n- for question in choice_questions.all():\n- for choice in question.choices.all():\n- identifier = 'Q' + str(question.pk) + '_A' + str(choice.pk)\n- virtual[(choice, False)] = identifier\n- if choice.is_other_choice:\n- identifier_answer = identifier + '_text'\n- virtual[(choice, True)] = identifier_answer\n+ def get_virtual_field_choice_question(self, virtual, choice_question):\n+ for choice in choice_question.choices.all():\n+ identifier = 'Q' + str(choice_question.pk) + '_A' + str(choice.pk)\n+ virtual[(choice, False)] = identifier\n+ if choice.is_other_choice:\n+ identifier_answer = identifier + '_text'\n+ virtual[(choice, True)] = identifier_answer\n \n return virtual\n \n- def get_virtual_fields_open_questions(self, virtual, open_questions):\n- for question in open_questions.all():\n- identifier = 'Q' + str(question.pk)\n- virtual[(question, False)] = identifier\n- identifier_answer = identifier + '_text'\n- virtual[(question, True)] = identifier_answer\n+ def get_virtual_field_open_question(self, virtual, open_question):\n+ identifier = 'Q' + str(open_question.pk)\n+ virtual[(open_question, False)] = identifier\n+ identifier_answer = identifier + '_text'\n+ virtual[(open_question, True)] = identifier_answer\n \n return virtual\n", "issue": "testing 4295: 402 error for poll export\n**URL:** https://meinberlin-dev.liqd.net/dashboard/modules/umfrage-24/poll/export/\r\n**user:** initiator, moderator, group member\r\n**expected behaviour:** download export\r\n**behaviour:** 403 error\r\n**important screensize:**\r\n**device & browser:** big sur, firefox\r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "from django.utils.translation import ugettext as _\nfrom rules.contrib.views import PermissionRequiredMixin\n\nfrom adhocracy4.comments.models import Comment\nfrom adhocracy4.exports import mixins\nfrom adhocracy4.exports import views as export_views\nfrom adhocracy4.polls import models as poll_models\nfrom meinberlin.apps.users.models import User\n\n\nclass PollCommentExportView(\n PermissionRequiredMixin,\n mixins.ExportModelFieldsMixin,\n mixins.UserGeneratedContentExportMixin,\n mixins.ItemExportWithLinkMixin,\n mixins.CommentExportWithRepliesToMixin,\n export_views.BaseItemExportView\n):\n\n model = Comment\n\n fields = ['id', 'comment', 'created']\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.module.project\n\n def get_queryset(self):\n comments = (\n Comment.objects.filter(poll__module=self.module) |\n Comment.objects.filter(parent_comment__poll__module=self.module)\n )\n return comments\n\n def get_virtual_fields(self, virtual):\n virtual.setdefault('id', _('ID'))\n virtual.setdefault('comment', _('Comment'))\n virtual.setdefault('created', _('Created'))\n return super().get_virtual_fields(virtual)\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n\nclass PollExportView(\n PermissionRequiredMixin,\n export_views.BaseItemExportView\n):\n\n permission_required = 'a4projects.change_project'\n\n def get_queryset(self):\n creators_vote = poll_models.Vote.objects.filter(\n choice__question__poll=self.poll).values_list('creator', flat=True)\n creators_answer = poll_models.Answer.objects.filter(\n question__poll=self.poll).values_list('creator', flat=True)\n creator_ids = list(set(creators_vote).union(set(creators_answer)))\n return User.objects.filter(pk__in=creator_ids)\n\n @property\n def poll(self):\n return poll_models.Poll.objects.get(module=self.module)\n\n @property\n def single_choice_questions(self):\n return self.poll.questions.filter(\n multiple_choice=False,\n is_open=False).order_by('id')\n\n @property\n def multiple_choice_questions(self):\n return self.poll.questions.filter(multiple_choice=True).order_by('id')\n\n @property\n def open_questions(self):\n return self.poll.questions.filter(is_open=True).order_by('id')\n\n def get_virtual_fields(self, virtual):\n virtual = super().get_virtual_fields(virtual)\n virtual = self.get_virtual_fields_choice_questions(\n virtual, self.single_choice_questions)\n virtual = self.get_virtual_fields_choice_questions(\n virtual, self.multiple_choice_questions)\n virtual = self.get_virtual_fields_open_questions(\n virtual, self.open_questions)\n\n return virtual\n\n def get_virtual_fields_choice_questions(self, virtual, choice_questions):\n for question in choice_questions.all():\n for choice in question.choices.all():\n identifier = 'Q' + str(question.pk) + '_A' + str(choice.pk)\n virtual[(choice, False)] = identifier\n if choice.is_other_choice:\n identifier_answer = identifier + '_text'\n virtual[(choice, True)] = identifier_answer\n\n return virtual\n\n def get_virtual_fields_open_questions(self, virtual, open_questions):\n for question in open_questions.all():\n identifier = 'Q' + str(question.pk)\n virtual[(question, False)] = identifier\n identifier_answer = identifier + '_text'\n virtual[(question, True)] = identifier_answer\n\n return virtual\n\n def get_field_data(self, user, field):\n field_object, is_text_field = field\n\n if type(field_object) == poll_models.Choice:\n votes_qs = poll_models.Vote.objects.filter(\n choice=field_object,\n creator=user)\n if not is_text_field:\n value = int(votes_qs.exists())\n else:\n vote = votes_qs.first()\n if vote:\n value = poll_models.OtherVote.objects.get(vote=vote).answer\n else:\n value = ''\n else: # field_object is question\n answers_qs = poll_models.Answer.objects.filter(\n question=field_object,\n creator=user)\n if not is_text_field:\n value = int(answers_qs.exists())\n else:\n answer = answers_qs.first()\n if answer:\n value = answer.answer\n else:\n value = ''\n\n return value\n", "path": "meinberlin/apps/polls/exports.py"}], "after_files": [{"content": "from django.utils.translation import ugettext as _\nfrom rules.contrib.views import PermissionRequiredMixin\n\nfrom adhocracy4.comments.models import Comment\nfrom adhocracy4.exports import mixins\nfrom adhocracy4.exports import views as export_views\nfrom adhocracy4.polls import models as poll_models\nfrom meinberlin.apps.users.models import User\n\n\nclass PollCommentExportView(\n PermissionRequiredMixin,\n mixins.ExportModelFieldsMixin,\n mixins.UserGeneratedContentExportMixin,\n mixins.ItemExportWithLinkMixin,\n mixins.CommentExportWithRepliesToMixin,\n export_views.BaseItemExportView\n):\n\n model = Comment\n\n fields = ['id', 'comment', 'created']\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.module.project\n\n def get_queryset(self):\n comments = (\n Comment.objects.filter(poll__module=self.module) |\n Comment.objects.filter(parent_comment__poll__module=self.module)\n )\n return comments\n\n def get_virtual_fields(self, virtual):\n virtual.setdefault('id', _('ID'))\n virtual.setdefault('comment', _('Comment'))\n virtual.setdefault('created', _('Created'))\n return super().get_virtual_fields(virtual)\n\n @property\n def raise_exception(self):\n return self.request.user.is_authenticated\n\n\nclass PollExportView(\n PermissionRequiredMixin,\n export_views.BaseItemExportView\n):\n\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.module.project\n\n def get_queryset(self):\n creators_vote = poll_models.Vote.objects.filter(\n choice__question__poll=self.poll).values_list('creator', flat=True)\n creators_answer = poll_models.Answer.objects.filter(\n question__poll=self.poll).values_list('creator', flat=True)\n creator_ids = list(set(creators_vote).union(set(creators_answer)))\n return User.objects.filter(pk__in=creator_ids)\n\n @property\n def poll(self):\n return poll_models.Poll.objects.get(module=self.module)\n\n @property\n def questions(self):\n return self.poll.questions.all()\n\n def get_virtual_fields(self, virtual):\n virtual = super().get_virtual_fields(virtual)\n\n for question in self.questions:\n if question.is_open:\n virtual = \\\n self.get_virtual_field_open_question(virtual, question)\n else:\n virtual = \\\n self.get_virtual_field_choice_question(virtual, question)\n\n return virtual\n\n def get_virtual_field_choice_question(self, virtual, choice_question):\n for choice in choice_question.choices.all():\n identifier = 'Q' + str(choice_question.pk) + '_A' + str(choice.pk)\n virtual[(choice, False)] = identifier\n if choice.is_other_choice:\n identifier_answer = identifier + '_text'\n virtual[(choice, True)] = identifier_answer\n\n return virtual\n\n def get_virtual_field_open_question(self, virtual, open_question):\n identifier = 'Q' + str(open_question.pk)\n virtual[(open_question, False)] = identifier\n identifier_answer = identifier + '_text'\n virtual[(open_question, True)] = identifier_answer\n\n return virtual\n\n def get_field_data(self, user, field):\n field_object, is_text_field = field\n\n if type(field_object) == poll_models.Choice:\n votes_qs = poll_models.Vote.objects.filter(\n choice=field_object,\n creator=user)\n if not is_text_field:\n value = int(votes_qs.exists())\n else:\n vote = votes_qs.first()\n if vote:\n value = poll_models.OtherVote.objects.get(vote=vote).answer\n else:\n value = ''\n else: # field_object is question\n answers_qs = poll_models.Answer.objects.filter(\n question=field_object,\n creator=user)\n if not is_text_field:\n value = int(answers_qs.exists())\n else:\n answer = answers_qs.first()\n if answer:\n value = answer.answer\n else:\n value = ''\n\n return value\n", "path": "meinberlin/apps/polls/exports.py"}]} | 1,608 | 730 |
gh_patches_debug_11535 | rasdani/github-patches | git_diff | Qiskit__qiskit-5613 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Avoid dependencies duplicity
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
Currently, when you want to add or update a dependency, you need to do that in the `requirements.txt` and `setup.py` files. That is really error-prone.
It would be nice to avoid that situation and make changes only in one of the files when a dependency is added or updated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2017.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12
13 "The Qiskit Terra setup file."
14
15 import os
16 import sys
17 from setuptools import setup, find_packages, Extension
18 try:
19 from Cython.Build import cythonize
20 except ImportError:
21 import subprocess
22 subprocess.call([sys.executable, '-m', 'pip', 'install', 'Cython>=0.27.1'])
23 from Cython.Build import cythonize
24
25 REQUIREMENTS = [
26 "contextvars>=2.4;python_version<'3.7'",
27 "jsonschema>=2.6",
28 "retworkx>=0.7.0",
29 "numpy>=1.17",
30 "ply>=3.10",
31 "psutil>=5",
32 "scipy>=1.4",
33 "sympy>=1.3",
34 "dill>=0.3",
35 "fastjsonschema>=2.10",
36 "python-constraint>=1.4",
37 "python-dateutil>=2.8.0",
38 ]
39
40 # Add Cython extensions here
41 CYTHON_EXTS = ['utils', 'swap_trial']
42 CYTHON_MODULE = 'qiskit.transpiler.passes.routing.cython.stochastic_swap'
43 CYTHON_SOURCE_DIR = 'qiskit/transpiler/passes/routing/cython/stochastic_swap'
44
45 INCLUDE_DIRS = []
46 # Extra link args
47 LINK_FLAGS = []
48 # If on Win and not in MSYS2 (i.e. Visual studio compile)
49 if (sys.platform == 'win32' and os.environ.get('MSYSTEM') is None):
50 COMPILER_FLAGS = ['/O2']
51 # Everything else
52 else:
53 COMPILER_FLAGS = ['-O2', '-funroll-loops', '-std=c++11']
54 if sys.platform == 'darwin':
55 # These are needed for compiling on OSX 10.14+
56 COMPILER_FLAGS.append('-mmacosx-version-min=10.9')
57 LINK_FLAGS.append('-mmacosx-version-min=10.9')
58
59
60 EXT_MODULES = []
61 # Add Cython Extensions
62 for ext in CYTHON_EXTS:
63 mod = Extension(CYTHON_MODULE + '.' + ext,
64 sources=[CYTHON_SOURCE_DIR + '/' + ext + '.pyx'],
65 include_dirs=INCLUDE_DIRS,
66 extra_compile_args=COMPILER_FLAGS,
67 extra_link_args=LINK_FLAGS,
68 language='c++')
69 EXT_MODULES.append(mod)
70
71 # Read long description from README.
72 README_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)),
73 'README.md')
74 with open(README_PATH) as readme_file:
75 README = readme_file.read()
76
77 setup(
78 name="qiskit-terra",
79 version="0.17.0",
80 description="Software for developing quantum computing programs",
81 long_description=README,
82 long_description_content_type='text/markdown',
83 url="https://github.com/Qiskit/qiskit-terra",
84 author="Qiskit Development Team",
85 author_email="[email protected]",
86 license="Apache 2.0",
87 classifiers=[
88 "Environment :: Console",
89 "License :: OSI Approved :: Apache Software License",
90 "Intended Audience :: Developers",
91 "Intended Audience :: Science/Research",
92 "Operating System :: Microsoft :: Windows",
93 "Operating System :: MacOS",
94 "Operating System :: POSIX :: Linux",
95 "Programming Language :: Python :: 3 :: Only",
96 "Programming Language :: Python :: 3.6",
97 "Programming Language :: Python :: 3.7",
98 "Programming Language :: Python :: 3.8",
99 "Programming Language :: Python :: 3.9",
100 "Topic :: Scientific/Engineering",
101 ],
102 keywords="qiskit sdk quantum",
103 packages=find_packages(exclude=['test*']),
104 install_requires=REQUIREMENTS,
105 setup_requires=['Cython>=0.27.1'],
106 include_package_data=True,
107 python_requires=">=3.6",
108 extras_require={
109 'visualization': ['matplotlib>=2.1', 'ipywidgets>=7.3.0',
110 'pydot', "pillow>=4.2.1", "pylatexenc>=1.4",
111 "seaborn>=0.9.0", "pygments>=2.4"],
112 'classical-function-compiler': ['tweedledum'],
113 'full-featured-simulators': ['qiskit-aer>=0.1'],
114 'crosstalk-pass': ['z3-solver>=4.7'],
115 },
116 project_urls={
117 "Bug Tracker": "https://github.com/Qiskit/qiskit-terra/issues",
118 "Documentation": "https://qiskit.org/documentation/",
119 "Source Code": "https://github.com/Qiskit/qiskit-terra",
120 },
121 ext_modules=cythonize(EXT_MODULES),
122 zip_safe=False
123 )
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,20 +22,8 @@
subprocess.call([sys.executable, '-m', 'pip', 'install', 'Cython>=0.27.1'])
from Cython.Build import cythonize
-REQUIREMENTS = [
- "contextvars>=2.4;python_version<'3.7'",
- "jsonschema>=2.6",
- "retworkx>=0.7.0",
- "numpy>=1.17",
- "ply>=3.10",
- "psutil>=5",
- "scipy>=1.4",
- "sympy>=1.3",
- "dill>=0.3",
- "fastjsonschema>=2.10",
- "python-constraint>=1.4",
- "python-dateutil>=2.8.0",
-]
+with open('requirements.txt') as f:
+ REQUIREMENTS = f.read().splitlines()
# Add Cython extensions here
CYTHON_EXTS = ['utils', 'swap_trial']
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,20 +22,8 @@\n subprocess.call([sys.executable, '-m', 'pip', 'install', 'Cython>=0.27.1'])\n from Cython.Build import cythonize\n \n-REQUIREMENTS = [\n- \"contextvars>=2.4;python_version<'3.7'\",\n- \"jsonschema>=2.6\",\n- \"retworkx>=0.7.0\",\n- \"numpy>=1.17\",\n- \"ply>=3.10\",\n- \"psutil>=5\",\n- \"scipy>=1.4\",\n- \"sympy>=1.3\",\n- \"dill>=0.3\",\n- \"fastjsonschema>=2.10\",\n- \"python-constraint>=1.4\",\n- \"python-dateutil>=2.8.0\",\n-]\n+with open('requirements.txt') as f:\n+ REQUIREMENTS = f.read().splitlines()\n \n # Add Cython extensions here\n CYTHON_EXTS = ['utils', 'swap_trial']\n", "issue": "Avoid dependencies duplicity\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\nCurrently, when you want to add or update a dependency, you need to do that in the `requirements.txt` and `setup.py` files. That is really error-prone.\r\n\r\nIt would be nice to avoid that situation and make changes only in one of the files when a dependency is added or updated.\r\n\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"The Qiskit Terra setup file.\"\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages, Extension\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n import subprocess\n subprocess.call([sys.executable, '-m', 'pip', 'install', 'Cython>=0.27.1'])\n from Cython.Build import cythonize\n\nREQUIREMENTS = [\n \"contextvars>=2.4;python_version<'3.7'\",\n \"jsonschema>=2.6\",\n \"retworkx>=0.7.0\",\n \"numpy>=1.17\",\n \"ply>=3.10\",\n \"psutil>=5\",\n \"scipy>=1.4\",\n \"sympy>=1.3\",\n \"dill>=0.3\",\n \"fastjsonschema>=2.10\",\n \"python-constraint>=1.4\",\n \"python-dateutil>=2.8.0\",\n]\n\n# Add Cython extensions here\nCYTHON_EXTS = ['utils', 'swap_trial']\nCYTHON_MODULE = 'qiskit.transpiler.passes.routing.cython.stochastic_swap'\nCYTHON_SOURCE_DIR = 'qiskit/transpiler/passes/routing/cython/stochastic_swap'\n\nINCLUDE_DIRS = []\n# Extra link args\nLINK_FLAGS = []\n# If on Win and not in MSYS2 (i.e. Visual studio compile)\nif (sys.platform == 'win32' and os.environ.get('MSYSTEM') is None):\n COMPILER_FLAGS = ['/O2']\n# Everything else\nelse:\n COMPILER_FLAGS = ['-O2', '-funroll-loops', '-std=c++11']\n if sys.platform == 'darwin':\n # These are needed for compiling on OSX 10.14+\n COMPILER_FLAGS.append('-mmacosx-version-min=10.9')\n LINK_FLAGS.append('-mmacosx-version-min=10.9')\n\n\nEXT_MODULES = []\n# Add Cython Extensions\nfor ext in CYTHON_EXTS:\n mod = Extension(CYTHON_MODULE + '.' + ext,\n sources=[CYTHON_SOURCE_DIR + '/' + ext + '.pyx'],\n include_dirs=INCLUDE_DIRS,\n extra_compile_args=COMPILER_FLAGS,\n extra_link_args=LINK_FLAGS,\n language='c++')\n EXT_MODULES.append(mod)\n\n# Read long description from README.\nREADME_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n 'README.md')\nwith open(README_PATH) as readme_file:\n README = readme_file.read()\n\nsetup(\n name=\"qiskit-terra\",\n version=\"0.17.0\",\n description=\"Software for developing quantum computing programs\",\n long_description=README,\n long_description_content_type='text/markdown',\n url=\"https://github.com/Qiskit/qiskit-terra\",\n author=\"Qiskit Development Team\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n classifiers=[\n \"Environment :: Console\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"qiskit sdk quantum\",\n packages=find_packages(exclude=['test*']),\n install_requires=REQUIREMENTS,\n setup_requires=['Cython>=0.27.1'],\n include_package_data=True,\n python_requires=\">=3.6\",\n extras_require={\n 'visualization': ['matplotlib>=2.1', 'ipywidgets>=7.3.0',\n 'pydot', \"pillow>=4.2.1\", \"pylatexenc>=1.4\",\n \"seaborn>=0.9.0\", \"pygments>=2.4\"],\n 'classical-function-compiler': ['tweedledum'],\n 'full-featured-simulators': ['qiskit-aer>=0.1'],\n 'crosstalk-pass': ['z3-solver>=4.7'],\n },\n project_urls={\n \"Bug Tracker\": \"https://github.com/Qiskit/qiskit-terra/issues\",\n \"Documentation\": \"https://qiskit.org/documentation/\",\n \"Source Code\": \"https://github.com/Qiskit/qiskit-terra\",\n },\n ext_modules=cythonize(EXT_MODULES),\n zip_safe=False\n)\n", "path": "setup.py"}], "after_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"The Qiskit Terra setup file.\"\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages, Extension\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n import subprocess\n subprocess.call([sys.executable, '-m', 'pip', 'install', 'Cython>=0.27.1'])\n from Cython.Build import cythonize\n\nwith open('requirements.txt') as f:\n REQUIREMENTS = f.read().splitlines()\n\n# Add Cython extensions here\nCYTHON_EXTS = ['utils', 'swap_trial']\nCYTHON_MODULE = 'qiskit.transpiler.passes.routing.cython.stochastic_swap'\nCYTHON_SOURCE_DIR = 'qiskit/transpiler/passes/routing/cython/stochastic_swap'\n\nINCLUDE_DIRS = []\n# Extra link args\nLINK_FLAGS = []\n# If on Win and not in MSYS2 (i.e. Visual studio compile)\nif (sys.platform == 'win32' and os.environ.get('MSYSTEM') is None):\n COMPILER_FLAGS = ['/O2']\n# Everything else\nelse:\n COMPILER_FLAGS = ['-O2', '-funroll-loops', '-std=c++11']\n if sys.platform == 'darwin':\n # These are needed for compiling on OSX 10.14+\n COMPILER_FLAGS.append('-mmacosx-version-min=10.9')\n LINK_FLAGS.append('-mmacosx-version-min=10.9')\n\n\nEXT_MODULES = []\n# Add Cython Extensions\nfor ext in CYTHON_EXTS:\n mod = Extension(CYTHON_MODULE + '.' + ext,\n sources=[CYTHON_SOURCE_DIR + '/' + ext + '.pyx'],\n include_dirs=INCLUDE_DIRS,\n extra_compile_args=COMPILER_FLAGS,\n extra_link_args=LINK_FLAGS,\n language='c++')\n EXT_MODULES.append(mod)\n\n# Read long description from README.\nREADME_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n 'README.md')\nwith open(README_PATH) as readme_file:\n README = readme_file.read()\n\nsetup(\n name=\"qiskit-terra\",\n version=\"0.17.0\",\n description=\"Software for developing quantum computing programs\",\n long_description=README,\n long_description_content_type='text/markdown',\n url=\"https://github.com/Qiskit/qiskit-terra\",\n author=\"Qiskit Development Team\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n classifiers=[\n \"Environment :: Console\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"qiskit sdk quantum\",\n packages=find_packages(exclude=['test*']),\n install_requires=REQUIREMENTS,\n setup_requires=['Cython>=0.27.1'],\n include_package_data=True,\n python_requires=\">=3.6\",\n extras_require={\n 'visualization': ['matplotlib>=2.1', 'ipywidgets>=7.3.0',\n 'pydot', \"pillow>=4.2.1\", \"pylatexenc>=1.4\",\n \"seaborn>=0.9.0\", \"pygments>=2.4\"],\n 'classical-function-compiler': ['tweedledum'],\n 'full-featured-simulators': ['qiskit-aer>=0.1'],\n 'crosstalk-pass': ['z3-solver>=4.7'],\n },\n project_urls={\n \"Bug Tracker\": \"https://github.com/Qiskit/qiskit-terra/issues\",\n \"Documentation\": \"https://qiskit.org/documentation/\",\n \"Source Code\": \"https://github.com/Qiskit/qiskit-terra\",\n },\n ext_modules=cythonize(EXT_MODULES),\n zip_safe=False\n)\n", "path": "setup.py"}]} | 1,784 | 255 |
gh_patches_debug_387 | rasdani/github-patches | git_diff | chainer__chainer-1568 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistency between cupy.dstack and numpy.dstack
```
In [10]: import cupy, numpy
In [11]: a = cupy.arange(24).reshape(2, 3, 4)
In [12]: numpy.dstack((a.get(),))
Out[12]:
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
In [13]: cupy.dstack((a,))
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-13-aa573685da21> in <module>()
----> 1 cupy.dstack((a,))
/home/delta/dev/chainer/cupy/manipulation/join.py in dstack(tup)
101
102 """
--> 103 return concatenate(cupy.atleast_3d(*tup), 2)
104
105
/home/delta/dev/chainer/cupy/manipulation/join.py in concatenate(tup, axis)
59 ndim = a.ndim
60 shape = list(a.shape)
---> 61 axis = _get_positive_axis(a.ndim, axis)
62 continue
63
/home/delta/dev/chainer/cupy/manipulation/join.py in _get_positive_axis(ndim, axis)
167 a += ndim
168 if a < 0 or a >= ndim:
--> 169 raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))
170 return a
IndexError: axis 2 out of bounds [0, 2)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/manipulation/join.py`
Content:
```
1 import numpy
2 import six
3
4 import cupy
5
6
7 def column_stack(tup):
8 """Stacks 1-D and 2-D arrays as columns into a 2-D array.
9
10 A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays
11 are concatenated along the second axis.
12
13 Args:
14 tup (sequence of arrays): 1-D or 2-D arrays to be stacked.
15
16 Returns:
17 cupy.ndarray: A new 2-D array of stacked columns.
18
19 .. seealso:: :func:`numpy.column_stack`
20
21 """
22 if any(not isinstance(a, cupy.ndarray) for a in tup):
23 raise TypeError('Only cupy arrays can be column stacked')
24
25 lst = list(tup)
26 for i, a in enumerate(lst):
27 if a.ndim == 1:
28 a = a[:, cupy.newaxis]
29 lst[i] = a
30 elif a.ndim != 2:
31 raise ValueError(
32 'Only 1 or 2 dimensional arrays can be column stacked')
33
34 return concatenate(lst, axis=1)
35
36
37 def concatenate(tup, axis=0):
38 """Joins arrays along an axis.
39
40 Args:
41 tup (sequence of arrays): Arrays to be joined. All of these should have
42 same dimensionalities except the specified axis.
43 axis (int): The axis to join arrays along.
44
45 Returns:
46 cupy.ndarray: Joined array.
47
48 .. seealso:: :func:`numpy.concatenate`
49
50 """
51 ndim = None
52 shape = None
53 for a in tup:
54 if not isinstance(a, cupy.ndarray):
55 raise TypeError('Only cupy arrays can be concatenated')
56 if a.ndim == 0:
57 raise TypeError('zero-dimensional arrays cannot be concatenated')
58 if ndim is None:
59 ndim = a.ndim
60 shape = list(a.shape)
61 axis = _get_positive_axis(a.ndim, axis)
62 continue
63
64 if a.ndim != ndim:
65 raise ValueError(
66 'All arrays to concatenate must have the same ndim')
67 if any(i != axis and shape[i] != a.shape[i]
68 for i in six.moves.range(ndim)):
69 raise ValueError(
70 'All arrays must have same shape except the axis to '
71 'concatenate')
72 shape[axis] += a.shape[axis]
73
74 if ndim is None:
75 raise ValueError('Cannot concatenate from empty tuple')
76
77 dtype = numpy.find_common_type([a.dtype for a in tup], [])
78 ret = cupy.empty(shape, dtype=dtype)
79
80 skip = (slice(None),) * axis
81 i = 0
82 for a in tup:
83 aw = a.shape[axis]
84 ret[skip + (slice(i, i + aw),)] = a
85 i += aw
86
87 return ret
88
89
90 def dstack(tup):
91 """Stacks arrays along the third axis.
92
93 Args:
94 tup (sequence of arrays): Arrays to be stacked. Each array is converted
95 by :func:`cupy.atleast_3d` before stacking.
96
97 Returns:
98 cupy.ndarray: Stacked array.
99
100 .. seealso:: :func:`numpy.dstack`
101
102 """
103 return concatenate(cupy.atleast_3d(*tup), 2)
104
105
106 def hstack(tup):
107 """Stacks arrays horizontally.
108
109 If an input array has one dimension, then the array is treated as a
110 horizontal vector and stacked along the first axis. Otherwise, the array is
111 stacked along the second axis.
112
113 Args:
114 tup (sequence of arrays): Arrays to be stacked.
115
116 Returns:
117 cupy.ndarray: Stacked array.
118
119 .. seealso:: :func:`numpy.hstack`
120
121 """
122 arrs = [cupy.atleast_1d(a) for a in tup]
123 axis = 1
124 if arrs[0].ndim == 1:
125 axis = 0
126 return concatenate(arrs, axis)
127
128
129 def vstack(tup):
130 """Stacks arrays vertically.
131
132 If an input array has one dimension, then the array is treated as a
133 horizontal vector and stacked along the additional axis at the head.
134 Otherwise, the array is stacked along the first axis.
135
136 Args:
137 tup (sequence of arrays): Arrays to be stacked. Each array is converted
138 by :func:`cupy.atleast_2d` before stacking.
139
140 Returns:
141 cupy.ndarray: Stacked array.
142
143 .. seealso:: :func:`numpy.dstack`
144
145 """
146 return concatenate([cupy.atleast_2d(m) for m in tup], 0)
147
148
149 def stack(tup, axis=0):
150 """Stacks arrays along a new axis.
151
152 Args:
153 tup (sequence of arrays): Arrays to be stacked.
154 axis (int): Axis along which the arrays are stacked.
155
156 Returns:
157 cupy.ndarray: Stacked array.
158
159 .. seealso:: :func:`numpy.stack`
160 """
161 return concatenate([cupy.expand_dims(x, axis) for x in tup], axis)
162
163
164 def _get_positive_axis(ndim, axis):
165 a = axis
166 if a < 0:
167 a += ndim
168 if a < 0 or a >= ndim:
169 raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))
170 return a
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/manipulation/join.py b/cupy/manipulation/join.py
--- a/cupy/manipulation/join.py
+++ b/cupy/manipulation/join.py
@@ -100,7 +100,7 @@
.. seealso:: :func:`numpy.dstack`
"""
- return concatenate(cupy.atleast_3d(*tup), 2)
+ return concatenate([cupy.atleast_3d(m) for m in tup], 2)
def hstack(tup):
| {"golden_diff": "diff --git a/cupy/manipulation/join.py b/cupy/manipulation/join.py\n--- a/cupy/manipulation/join.py\n+++ b/cupy/manipulation/join.py\n@@ -100,7 +100,7 @@\n .. seealso:: :func:`numpy.dstack`\n \n \"\"\"\n- return concatenate(cupy.atleast_3d(*tup), 2)\n+ return concatenate([cupy.atleast_3d(m) for m in tup], 2)\n \n \n def hstack(tup):\n", "issue": "Inconsistency between cupy.dstack and numpy.dstack\n```\nIn [10]: import cupy, numpy\nIn [11]: a = cupy.arange(24).reshape(2, 3, 4)\nIn [12]: numpy.dstack((a.get(),))\nOut[12]: \narray([[[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]],\n\n [[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]]])\n\nIn [13]: cupy.dstack((a,))\n---------------------------------------------------------------------------\nIndexError Traceback (most recent call last)\n<ipython-input-13-aa573685da21> in <module>()\n----> 1 cupy.dstack((a,))\n\n/home/delta/dev/chainer/cupy/manipulation/join.py in dstack(tup)\n 101 \n 102 \"\"\"\n--> 103 return concatenate(cupy.atleast_3d(*tup), 2)\n 104 \n 105 \n\n/home/delta/dev/chainer/cupy/manipulation/join.py in concatenate(tup, axis)\n 59 ndim = a.ndim\n 60 shape = list(a.shape)\n---> 61 axis = _get_positive_axis(a.ndim, axis)\n 62 continue\n 63 \n\n/home/delta/dev/chainer/cupy/manipulation/join.py in _get_positive_axis(ndim, axis)\n 167 a += ndim\n 168 if a < 0 or a >= ndim:\n--> 169 raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))\n 170 return a\n\nIndexError: axis 2 out of bounds [0, 2)\n```\n\n", "before_files": [{"content": "import numpy\nimport six\n\nimport cupy\n\n\ndef column_stack(tup):\n \"\"\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\n\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\n are concatenated along the second axis.\n\n Args:\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\n\n Returns:\n cupy.ndarray: A new 2-D array of stacked columns.\n\n .. seealso:: :func:`numpy.column_stack`\n\n \"\"\"\n if any(not isinstance(a, cupy.ndarray) for a in tup):\n raise TypeError('Only cupy arrays can be column stacked')\n\n lst = list(tup)\n for i, a in enumerate(lst):\n if a.ndim == 1:\n a = a[:, cupy.newaxis]\n lst[i] = a\n elif a.ndim != 2:\n raise ValueError(\n 'Only 1 or 2 dimensional arrays can be column stacked')\n\n return concatenate(lst, axis=1)\n\n\ndef concatenate(tup, axis=0):\n \"\"\"Joins arrays along an axis.\n\n Args:\n tup (sequence of arrays): Arrays to be joined. All of these should have\n same dimensionalities except the specified axis.\n axis (int): The axis to join arrays along.\n\n Returns:\n cupy.ndarray: Joined array.\n\n .. seealso:: :func:`numpy.concatenate`\n\n \"\"\"\n ndim = None\n shape = None\n for a in tup:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be concatenated')\n if a.ndim == 0:\n raise TypeError('zero-dimensional arrays cannot be concatenated')\n if ndim is None:\n ndim = a.ndim\n shape = list(a.shape)\n axis = _get_positive_axis(a.ndim, axis)\n continue\n\n if a.ndim != ndim:\n raise ValueError(\n 'All arrays to concatenate must have the same ndim')\n if any(i != axis and shape[i] != a.shape[i]\n for i in six.moves.range(ndim)):\n raise ValueError(\n 'All arrays must have same shape except the axis to '\n 'concatenate')\n shape[axis] += a.shape[axis]\n\n if ndim is None:\n raise ValueError('Cannot concatenate from empty tuple')\n\n dtype = numpy.find_common_type([a.dtype for a in tup], [])\n ret = cupy.empty(shape, dtype=dtype)\n\n skip = (slice(None),) * axis\n i = 0\n for a in tup:\n aw = a.shape[axis]\n ret[skip + (slice(i, i + aw),)] = a\n i += aw\n\n return ret\n\n\ndef dstack(tup):\n \"\"\"Stacks arrays along the third axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_3d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate(cupy.atleast_3d(*tup), 2)\n\n\ndef hstack(tup):\n \"\"\"Stacks arrays horizontally.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the first axis. Otherwise, the array is\n stacked along the second axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.hstack`\n\n \"\"\"\n arrs = [cupy.atleast_1d(a) for a in tup]\n axis = 1\n if arrs[0].ndim == 1:\n axis = 0\n return concatenate(arrs, axis)\n\n\ndef vstack(tup):\n \"\"\"Stacks arrays vertically.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the additional axis at the head.\n Otherwise, the array is stacked along the first axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_2d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_2d(m) for m in tup], 0)\n\n\ndef stack(tup, axis=0):\n \"\"\"Stacks arrays along a new axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n axis (int): Axis along which the arrays are stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.stack`\n \"\"\"\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis)\n\n\ndef _get_positive_axis(ndim, axis):\n a = axis\n if a < 0:\n a += ndim\n if a < 0 or a >= ndim:\n raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))\n return a\n", "path": "cupy/manipulation/join.py"}], "after_files": [{"content": "import numpy\nimport six\n\nimport cupy\n\n\ndef column_stack(tup):\n \"\"\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\n\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\n are concatenated along the second axis.\n\n Args:\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\n\n Returns:\n cupy.ndarray: A new 2-D array of stacked columns.\n\n .. seealso:: :func:`numpy.column_stack`\n\n \"\"\"\n if any(not isinstance(a, cupy.ndarray) for a in tup):\n raise TypeError('Only cupy arrays can be column stacked')\n\n lst = list(tup)\n for i, a in enumerate(lst):\n if a.ndim == 1:\n a = a[:, cupy.newaxis]\n lst[i] = a\n elif a.ndim != 2:\n raise ValueError(\n 'Only 1 or 2 dimensional arrays can be column stacked')\n\n return concatenate(lst, axis=1)\n\n\ndef concatenate(tup, axis=0):\n \"\"\"Joins arrays along an axis.\n\n Args:\n tup (sequence of arrays): Arrays to be joined. All of these should have\n same dimensionalities except the specified axis.\n axis (int): The axis to join arrays along.\n\n Returns:\n cupy.ndarray: Joined array.\n\n .. seealso:: :func:`numpy.concatenate`\n\n \"\"\"\n ndim = None\n shape = None\n for a in tup:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be concatenated')\n if a.ndim == 0:\n raise TypeError('zero-dimensional arrays cannot be concatenated')\n if ndim is None:\n ndim = a.ndim\n shape = list(a.shape)\n axis = _get_positive_axis(a.ndim, axis)\n continue\n\n if a.ndim != ndim:\n raise ValueError(\n 'All arrays to concatenate must have the same ndim')\n if any(i != axis and shape[i] != a.shape[i]\n for i in six.moves.range(ndim)):\n raise ValueError(\n 'All arrays must have same shape except the axis to '\n 'concatenate')\n shape[axis] += a.shape[axis]\n\n if ndim is None:\n raise ValueError('Cannot concatenate from empty tuple')\n\n dtype = numpy.find_common_type([a.dtype for a in tup], [])\n ret = cupy.empty(shape, dtype=dtype)\n\n skip = (slice(None),) * axis\n i = 0\n for a in tup:\n aw = a.shape[axis]\n ret[skip + (slice(i, i + aw),)] = a\n i += aw\n\n return ret\n\n\ndef dstack(tup):\n \"\"\"Stacks arrays along the third axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_3d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_3d(m) for m in tup], 2)\n\n\ndef hstack(tup):\n \"\"\"Stacks arrays horizontally.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the first axis. Otherwise, the array is\n stacked along the second axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.hstack`\n\n \"\"\"\n arrs = [cupy.atleast_1d(a) for a in tup]\n axis = 1\n if arrs[0].ndim == 1:\n axis = 0\n return concatenate(arrs, axis)\n\n\ndef vstack(tup):\n \"\"\"Stacks arrays vertically.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the additional axis at the head.\n Otherwise, the array is stacked along the first axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_2d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_2d(m) for m in tup], 0)\n\n\ndef stack(tup, axis=0):\n \"\"\"Stacks arrays along a new axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n axis (int): Axis along which the arrays are stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.stack`\n \"\"\"\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis)\n\n\ndef _get_positive_axis(ndim, axis):\n a = axis\n if a < 0:\n a += ndim\n if a < 0 or a >= ndim:\n raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))\n return a\n", "path": "cupy/manipulation/join.py"}]} | 2,313 | 120 |
gh_patches_debug_40989 | rasdani/github-patches | git_diff | deepset-ai__haystack-1309 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TransformersSummarizer crashes if given long input
If the TransformersSummarizer is given an input that is longer than the model's max_seq_len, an error will be thrown. Instead, I think a warning message should be printed to console and the input text should be truncated so that the Node can still run.
TransformersSummarizer crashes if given long input
If the TransformersSummarizer is given an input that is longer than the model's max_seq_len, an error will be thrown. Instead, I think a warning message should be printed to console and the input text should be truncated so that the Node can still run.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/summarizer/transformers.py`
Content:
```
1 import logging
2 from typing import List, Optional
3
4 from transformers import pipeline
5 from transformers.models.auto.modeling_auto import AutoModelForSeq2SeqLM
6
7 from haystack import Document
8 from haystack.summarizer.base import BaseSummarizer
9
10 logger = logging.getLogger(__name__)
11
12
13 class TransformersSummarizer(BaseSummarizer):
14 """
15 Transformer based model to summarize the documents using the HuggingFace's transformers framework
16
17 You can use any model that has been fine-tuned on a summarization task. For example:
18 '`bart-large-cnn`', '`t5-small`', '`t5-base`', '`t5-large`', '`t5-3b`', '`t5-11b`'.
19 See the up-to-date list of available models on
20 `huggingface.co/models <https://huggingface.co/models?filter=summarization>`__
21
22 **Example**
23
24 ```python
25 | docs = [Document(text="PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions."
26 | "The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by"
27 | "the shutoffs which were expected to last through at least midday tomorrow.")]
28 |
29 | # Summarize
30 | summary = summarizer.predict(
31 | documents=docs,
32 | generate_single_summary=True
33 | )
34 |
35 | # Show results (List of Documents, containing summary and original text)
36 | print(summary)
37 |
38 | [
39 | {
40 | "text": "California's largest electricity provider has turned off power to hundreds of thousands of customers.",
41 | ...
42 | "meta": {
43 | "context": "PGE stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. ..."
44 | },
45 | ...
46 | },
47 ```
48 """
49
50 def __init__(
51 self,
52 model_name_or_path: str = "google/pegasus-xsum",
53 model_version: Optional[str] = None,
54 tokenizer: Optional[str] = None,
55 max_length: int = 200,
56 min_length: int = 5,
57 use_gpu: int = 0,
58 clean_up_tokenization_spaces: bool = True,
59 separator_for_single_summary: str = " ",
60 generate_single_summary: bool = False,
61 ):
62 """
63 Load a Summarization model from Transformers.
64 See the up-to-date list of available models at
65 https://huggingface.co/models?filter=summarization
66
67 :param model_name_or_path: Directory of a saved model or the name of a public model e.g.
68 'facebook/rag-token-nq', 'facebook/rag-sequence-nq'.
69 See https://huggingface.co/models?filter=summarization for full list of available models.
70 :param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
71 :param tokenizer: Name of the tokenizer (usually the same as model)
72 :param max_length: Maximum length of summarized text
73 :param min_length: Minimum length of summarized text
74 :param use_gpu: If < 0, then use cpu. If >= 0, this is the ordinal of the gpu to use
75 :param clean_up_tokenization_spaces: Whether or not to clean up the potential extra spaces in the text output
76 :param separator_for_single_summary: If `generate_single_summary=True` in `predict()`, we need to join all docs
77 into a single text. This separator appears between those subsequent docs.
78 :param generate_single_summary: Whether to generate a single summary for all documents or one summary per document.
79 If set to "True", all docs will be joined to a single string that will then
80 be summarized.
81 Important: The summary will depend on the order of the supplied documents!
82 """
83
84 # save init parameters to enable export of component config as YAML
85 self.set_config(
86 model_name_or_path=model_name_or_path, model_version=model_version, tokenizer=tokenizer,
87 max_length=max_length, min_length=min_length, use_gpu=use_gpu,
88 clean_up_tokenization_spaces=clean_up_tokenization_spaces,
89 separator_for_single_summary=separator_for_single_summary, generate_single_summary=generate_single_summary,
90 )
91
92 # TODO AutoModelForSeq2SeqLM is only necessary with transformers==4.1.1, with newer versions use the pipeline directly
93 if tokenizer is None:
94 tokenizer = model_name_or_path
95 model = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path=model_name_or_path, revision=model_version)
96 self.summarizer = pipeline("summarization", model=model, tokenizer=tokenizer, device=use_gpu)
97 self.max_length = max_length
98 self.min_length = min_length
99 self.clean_up_tokenization_spaces = clean_up_tokenization_spaces
100 self.separator_for_single_summary = separator_for_single_summary
101 self.generate_single_summary = generate_single_summary
102
103 def predict(self, documents: List[Document], generate_single_summary: Optional[bool] = None) -> List[Document]:
104 """
105 Produce the summarization from the supplied documents.
106 These document can for example be retrieved via the Retriever.
107
108 :param documents: Related documents (e.g. coming from a retriever) that the answer shall be conditioned on.
109 :param generate_single_summary: Whether to generate a single summary for all documents or one summary per document.
110 If set to "True", all docs will be joined to a single string that will then
111 be summarized.
112 Important: The summary will depend on the order of the supplied documents!
113 :return: List of Documents, where Document.text contains the summarization and Document.meta["context"]
114 the original, not summarized text
115 """
116
117 if self.min_length > self.max_length:
118 raise AttributeError("min_length cannot be greater than max_length")
119
120 if len(documents) == 0:
121 raise AttributeError("Summarizer needs at least one document to produce a summary.")
122
123 if generate_single_summary is None:
124 generate_single_summary = self.generate_single_summary
125
126 contexts: List[str] = [doc.text for doc in documents]
127
128 if generate_single_summary:
129 # Documents order is very important to produce summary.
130 # Different order of same documents produce different summary.
131 contexts = [self.separator_for_single_summary.join(contexts)]
132
133 summaries = self.summarizer(
134 contexts,
135 min_length=self.min_length,
136 max_length=self.max_length,
137 return_text=True,
138 clean_up_tokenization_spaces=self.clean_up_tokenization_spaces,
139 )
140
141 result: List[Document] = []
142
143 for context, summarized_answer in zip(contexts, summaries):
144 cur_doc = Document(text=summarized_answer['summary_text'], meta={"context": context})
145 result.append(cur_doc)
146
147 return result
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/summarizer/transformers.py b/haystack/summarizer/transformers.py
--- a/haystack/summarizer/transformers.py
+++ b/haystack/summarizer/transformers.py
@@ -1,5 +1,5 @@
import logging
-from typing import List, Optional
+from typing import List, Optional, Set
from transformers import pipeline
from transformers.models.auto.modeling_auto import AutoModelForSeq2SeqLM
@@ -99,8 +99,10 @@
self.clean_up_tokenization_spaces = clean_up_tokenization_spaces
self.separator_for_single_summary = separator_for_single_summary
self.generate_single_summary = generate_single_summary
+ self.print_log: Set[str] = set()
- def predict(self, documents: List[Document], generate_single_summary: Optional[bool] = None) -> List[Document]:
+ def predict(self, documents: List[Document], generate_single_summary: Optional[bool] = None,
+ truncation: bool = True) -> List[Document]:
"""
Produce the summarization from the supplied documents.
These document can for example be retrieved via the Retriever.
@@ -110,6 +112,7 @@
If set to "True", all docs will be joined to a single string that will then
be summarized.
Important: The summary will depend on the order of the supplied documents!
+ :param truncation: Truncate to a maximum length accepted by the model
:return: List of Documents, where Document.text contains the summarization and Document.meta["context"]
the original, not summarized text
"""
@@ -130,12 +133,25 @@
# Different order of same documents produce different summary.
contexts = [self.separator_for_single_summary.join(contexts)]
+ encoded_input = self.summarizer.tokenizer(contexts, verbose=False)
+ for input_id in encoded_input['input_ids']:
+ tokens_count: int = len(input_id)
+ if tokens_count > self.summarizer.tokenizer.model_max_length:
+ truncation_warning = "One or more of your input document texts is longer than the specified " \
+ f"maximum sequence length for this summarizer model. "\
+ f"Generating summary from first {self.summarizer.tokenizer.model_max_length}"\
+ f" tokens."
+ if truncation_warning not in self.print_log:
+ logger.warning(truncation_warning)
+ self.print_log.add(truncation_warning)
+
summaries = self.summarizer(
contexts,
min_length=self.min_length,
max_length=self.max_length,
return_text=True,
clean_up_tokenization_spaces=self.clean_up_tokenization_spaces,
+ truncation=True,
)
result: List[Document] = []
| {"golden_diff": "diff --git a/haystack/summarizer/transformers.py b/haystack/summarizer/transformers.py\n--- a/haystack/summarizer/transformers.py\n+++ b/haystack/summarizer/transformers.py\n@@ -1,5 +1,5 @@\n import logging\n-from typing import List, Optional\n+from typing import List, Optional, Set\n \n from transformers import pipeline\n from transformers.models.auto.modeling_auto import AutoModelForSeq2SeqLM\n@@ -99,8 +99,10 @@\n self.clean_up_tokenization_spaces = clean_up_tokenization_spaces\n self.separator_for_single_summary = separator_for_single_summary\n self.generate_single_summary = generate_single_summary\n+ self.print_log: Set[str] = set()\n \n- def predict(self, documents: List[Document], generate_single_summary: Optional[bool] = None) -> List[Document]:\n+ def predict(self, documents: List[Document], generate_single_summary: Optional[bool] = None,\n+ truncation: bool = True) -> List[Document]:\n \"\"\"\n Produce the summarization from the supplied documents.\n These document can for example be retrieved via the Retriever.\n@@ -110,6 +112,7 @@\n If set to \"True\", all docs will be joined to a single string that will then\n be summarized.\n Important: The summary will depend on the order of the supplied documents!\n+ :param truncation: Truncate to a maximum length accepted by the model\n :return: List of Documents, where Document.text contains the summarization and Document.meta[\"context\"]\n the original, not summarized text\n \"\"\"\n@@ -130,12 +133,25 @@\n # Different order of same documents produce different summary.\n contexts = [self.separator_for_single_summary.join(contexts)]\n \n+ encoded_input = self.summarizer.tokenizer(contexts, verbose=False)\n+ for input_id in encoded_input['input_ids']:\n+ tokens_count: int = len(input_id)\n+ if tokens_count > self.summarizer.tokenizer.model_max_length:\n+ truncation_warning = \"One or more of your input document texts is longer than the specified \" \\\n+ f\"maximum sequence length for this summarizer model. \"\\\n+ f\"Generating summary from first {self.summarizer.tokenizer.model_max_length}\"\\\n+ f\" tokens.\"\n+ if truncation_warning not in self.print_log:\n+ logger.warning(truncation_warning)\n+ self.print_log.add(truncation_warning)\n+\n summaries = self.summarizer(\n contexts,\n min_length=self.min_length,\n max_length=self.max_length,\n return_text=True,\n clean_up_tokenization_spaces=self.clean_up_tokenization_spaces,\n+ truncation=True,\n )\n \n result: List[Document] = []\n", "issue": "TransformersSummarizer crashes if given long input\nIf the TransformersSummarizer is given an input that is longer than the model's max_seq_len, an error will be thrown. Instead, I think a warning message should be printed to console and the input text should be truncated so that the Node can still run.\nTransformersSummarizer crashes if given long input\nIf the TransformersSummarizer is given an input that is longer than the model's max_seq_len, an error will be thrown. Instead, I think a warning message should be printed to console and the input text should be truncated so that the Node can still run.\n", "before_files": [{"content": "import logging\nfrom typing import List, Optional\n\nfrom transformers import pipeline\nfrom transformers.models.auto.modeling_auto import AutoModelForSeq2SeqLM\n\nfrom haystack import Document\nfrom haystack.summarizer.base import BaseSummarizer\n\nlogger = logging.getLogger(__name__)\n\n\nclass TransformersSummarizer(BaseSummarizer):\n \"\"\"\n Transformer based model to summarize the documents using the HuggingFace's transformers framework\n\n You can use any model that has been fine-tuned on a summarization task. For example:\n '`bart-large-cnn`', '`t5-small`', '`t5-base`', '`t5-large`', '`t5-3b`', '`t5-11b`'.\n See the up-to-date list of available models on\n `huggingface.co/models <https://huggingface.co/models?filter=summarization>`__\n\n **Example**\n\n ```python\n | docs = [Document(text=\"PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions.\"\n | \"The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by\"\n | \"the shutoffs which were expected to last through at least midday tomorrow.\")]\n |\n | # Summarize\n | summary = summarizer.predict(\n | documents=docs,\n | generate_single_summary=True\n | )\n |\n | # Show results (List of Documents, containing summary and original text)\n | print(summary)\n |\n | [\n | {\n | \"text\": \"California's largest electricity provider has turned off power to hundreds of thousands of customers.\",\n | ...\n | \"meta\": {\n | \"context\": \"PGE stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. ...\"\n | },\n | ...\n | },\n ```\n \"\"\"\n\n def __init__(\n self,\n model_name_or_path: str = \"google/pegasus-xsum\",\n model_version: Optional[str] = None,\n tokenizer: Optional[str] = None,\n max_length: int = 200,\n min_length: int = 5,\n use_gpu: int = 0,\n clean_up_tokenization_spaces: bool = True,\n separator_for_single_summary: str = \" \",\n generate_single_summary: bool = False,\n ):\n \"\"\"\n Load a Summarization model from Transformers.\n See the up-to-date list of available models at\n https://huggingface.co/models?filter=summarization\n\n :param model_name_or_path: Directory of a saved model or the name of a public model e.g.\n 'facebook/rag-token-nq', 'facebook/rag-sequence-nq'.\n See https://huggingface.co/models?filter=summarization for full list of available models.\n :param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.\n :param tokenizer: Name of the tokenizer (usually the same as model)\n :param max_length: Maximum length of summarized text\n :param min_length: Minimum length of summarized text\n :param use_gpu: If < 0, then use cpu. If >= 0, this is the ordinal of the gpu to use\n :param clean_up_tokenization_spaces: Whether or not to clean up the potential extra spaces in the text output\n :param separator_for_single_summary: If `generate_single_summary=True` in `predict()`, we need to join all docs\n into a single text. This separator appears between those subsequent docs.\n :param generate_single_summary: Whether to generate a single summary for all documents or one summary per document.\n If set to \"True\", all docs will be joined to a single string that will then\n be summarized.\n Important: The summary will depend on the order of the supplied documents!\n \"\"\"\n\n # save init parameters to enable export of component config as YAML\n self.set_config(\n model_name_or_path=model_name_or_path, model_version=model_version, tokenizer=tokenizer,\n max_length=max_length, min_length=min_length, use_gpu=use_gpu,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n separator_for_single_summary=separator_for_single_summary, generate_single_summary=generate_single_summary,\n )\n\n # TODO AutoModelForSeq2SeqLM is only necessary with transformers==4.1.1, with newer versions use the pipeline directly\n if tokenizer is None:\n tokenizer = model_name_or_path\n model = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path=model_name_or_path, revision=model_version)\n self.summarizer = pipeline(\"summarization\", model=model, tokenizer=tokenizer, device=use_gpu)\n self.max_length = max_length\n self.min_length = min_length\n self.clean_up_tokenization_spaces = clean_up_tokenization_spaces\n self.separator_for_single_summary = separator_for_single_summary\n self.generate_single_summary = generate_single_summary\n\n def predict(self, documents: List[Document], generate_single_summary: Optional[bool] = None) -> List[Document]:\n \"\"\"\n Produce the summarization from the supplied documents.\n These document can for example be retrieved via the Retriever.\n\n :param documents: Related documents (e.g. coming from a retriever) that the answer shall be conditioned on.\n :param generate_single_summary: Whether to generate a single summary for all documents or one summary per document.\n If set to \"True\", all docs will be joined to a single string that will then\n be summarized.\n Important: The summary will depend on the order of the supplied documents!\n :return: List of Documents, where Document.text contains the summarization and Document.meta[\"context\"]\n the original, not summarized text\n \"\"\"\n\n if self.min_length > self.max_length:\n raise AttributeError(\"min_length cannot be greater than max_length\")\n\n if len(documents) == 0:\n raise AttributeError(\"Summarizer needs at least one document to produce a summary.\")\n\n if generate_single_summary is None:\n generate_single_summary = self.generate_single_summary\n\n contexts: List[str] = [doc.text for doc in documents]\n\n if generate_single_summary:\n # Documents order is very important to produce summary.\n # Different order of same documents produce different summary.\n contexts = [self.separator_for_single_summary.join(contexts)]\n\n summaries = self.summarizer(\n contexts,\n min_length=self.min_length,\n max_length=self.max_length,\n return_text=True,\n clean_up_tokenization_spaces=self.clean_up_tokenization_spaces,\n )\n\n result: List[Document] = []\n\n for context, summarized_answer in zip(contexts, summaries):\n cur_doc = Document(text=summarized_answer['summary_text'], meta={\"context\": context})\n result.append(cur_doc)\n\n return result\n", "path": "haystack/summarizer/transformers.py"}], "after_files": [{"content": "import logging\nfrom typing import List, Optional, Set\n\nfrom transformers import pipeline\nfrom transformers.models.auto.modeling_auto import AutoModelForSeq2SeqLM\n\nfrom haystack import Document\nfrom haystack.summarizer.base import BaseSummarizer\n\nlogger = logging.getLogger(__name__)\n\n\nclass TransformersSummarizer(BaseSummarizer):\n \"\"\"\n Transformer based model to summarize the documents using the HuggingFace's transformers framework\n\n You can use any model that has been fine-tuned on a summarization task. For example:\n '`bart-large-cnn`', '`t5-small`', '`t5-base`', '`t5-large`', '`t5-3b`', '`t5-11b`'.\n See the up-to-date list of available models on\n `huggingface.co/models <https://huggingface.co/models?filter=summarization>`__\n\n **Example**\n\n ```python\n | docs = [Document(text=\"PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions.\"\n | \"The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by\"\n | \"the shutoffs which were expected to last through at least midday tomorrow.\")]\n |\n | # Summarize\n | summary = summarizer.predict(\n | documents=docs,\n | generate_single_summary=True\n | )\n |\n | # Show results (List of Documents, containing summary and original text)\n | print(summary)\n |\n | [\n | {\n | \"text\": \"California's largest electricity provider has turned off power to hundreds of thousands of customers.\",\n | ...\n | \"meta\": {\n | \"context\": \"PGE stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. ...\"\n | },\n | ...\n | },\n ```\n \"\"\"\n\n def __init__(\n self,\n model_name_or_path: str = \"google/pegasus-xsum\",\n model_version: Optional[str] = None,\n tokenizer: Optional[str] = None,\n max_length: int = 200,\n min_length: int = 5,\n use_gpu: int = 0,\n clean_up_tokenization_spaces: bool = True,\n separator_for_single_summary: str = \" \",\n generate_single_summary: bool = False,\n ):\n \"\"\"\n Load a Summarization model from Transformers.\n See the up-to-date list of available models at\n https://huggingface.co/models?filter=summarization\n\n :param model_name_or_path: Directory of a saved model or the name of a public model e.g.\n 'facebook/rag-token-nq', 'facebook/rag-sequence-nq'.\n See https://huggingface.co/models?filter=summarization for full list of available models.\n :param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.\n :param tokenizer: Name of the tokenizer (usually the same as model)\n :param max_length: Maximum length of summarized text\n :param min_length: Minimum length of summarized text\n :param use_gpu: If < 0, then use cpu. If >= 0, this is the ordinal of the gpu to use\n :param clean_up_tokenization_spaces: Whether or not to clean up the potential extra spaces in the text output\n :param separator_for_single_summary: If `generate_single_summary=True` in `predict()`, we need to join all docs\n into a single text. This separator appears between those subsequent docs.\n :param generate_single_summary: Whether to generate a single summary for all documents or one summary per document.\n If set to \"True\", all docs will be joined to a single string that will then\n be summarized.\n Important: The summary will depend on the order of the supplied documents!\n \"\"\"\n\n # save init parameters to enable export of component config as YAML\n self.set_config(\n model_name_or_path=model_name_or_path, model_version=model_version, tokenizer=tokenizer,\n max_length=max_length, min_length=min_length, use_gpu=use_gpu,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n separator_for_single_summary=separator_for_single_summary, generate_single_summary=generate_single_summary,\n )\n\n # TODO AutoModelForSeq2SeqLM is only necessary with transformers==4.1.1, with newer versions use the pipeline directly\n if tokenizer is None:\n tokenizer = model_name_or_path\n model = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path=model_name_or_path, revision=model_version)\n self.summarizer = pipeline(\"summarization\", model=model, tokenizer=tokenizer, device=use_gpu)\n self.max_length = max_length\n self.min_length = min_length\n self.clean_up_tokenization_spaces = clean_up_tokenization_spaces\n self.separator_for_single_summary = separator_for_single_summary\n self.generate_single_summary = generate_single_summary\n self.print_log: Set[str] = set()\n\n def predict(self, documents: List[Document], generate_single_summary: Optional[bool] = None,\n truncation: bool = True) -> List[Document]:\n \"\"\"\n Produce the summarization from the supplied documents.\n These document can for example be retrieved via the Retriever.\n\n :param documents: Related documents (e.g. coming from a retriever) that the answer shall be conditioned on.\n :param generate_single_summary: Whether to generate a single summary for all documents or one summary per document.\n If set to \"True\", all docs will be joined to a single string that will then\n be summarized.\n Important: The summary will depend on the order of the supplied documents!\n :param truncation: Truncate to a maximum length accepted by the model\n :return: List of Documents, where Document.text contains the summarization and Document.meta[\"context\"]\n the original, not summarized text\n \"\"\"\n\n if self.min_length > self.max_length:\n raise AttributeError(\"min_length cannot be greater than max_length\")\n\n if len(documents) == 0:\n raise AttributeError(\"Summarizer needs at least one document to produce a summary.\")\n\n if generate_single_summary is None:\n generate_single_summary = self.generate_single_summary\n\n contexts: List[str] = [doc.text for doc in documents]\n\n if generate_single_summary:\n # Documents order is very important to produce summary.\n # Different order of same documents produce different summary.\n contexts = [self.separator_for_single_summary.join(contexts)]\n\n encoded_input = self.summarizer.tokenizer(contexts, verbose=False)\n for input_id in encoded_input['input_ids']:\n tokens_count: int = len(input_id)\n if tokens_count > self.summarizer.tokenizer.model_max_length:\n truncation_warning = \"One or more of your input document texts is longer than the specified \" \\\n f\"maximum sequence length for this summarizer model. \"\\\n f\"Generating summary from first {self.summarizer.tokenizer.model_max_length}\"\\\n f\" tokens.\"\n if truncation_warning not in self.print_log:\n logger.warning(truncation_warning)\n self.print_log.add(truncation_warning)\n\n summaries = self.summarizer(\n contexts,\n min_length=self.min_length,\n max_length=self.max_length,\n return_text=True,\n clean_up_tokenization_spaces=self.clean_up_tokenization_spaces,\n truncation=True,\n )\n\n result: List[Document] = []\n\n for context, summarized_answer in zip(contexts, summaries):\n cur_doc = Document(text=summarized_answer['summary_text'], meta={\"context\": context})\n result.append(cur_doc)\n\n return result\n", "path": "haystack/summarizer/transformers.py"}]} | 2,232 | 615 |
gh_patches_debug_3482 | rasdani/github-patches | git_diff | docker__docker-py-1528 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot wait blocking generator output from events()
Since upgrade to `docker 2.2.0`.
API 'events()' using same API client time out.
So I got `UnixHTTPConnectionPool(host='localhost', port=None): Read timed out.` exception message.
But in my use case, `containers()` or other client APIs are reasonable to set an default timeout.
But `events()` should have another timeout setting for users.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/api/daemon.py`
Content:
```
1 import os
2 import warnings
3 from datetime import datetime
4
5 from .. import auth, utils
6 from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
7
8
9 class DaemonApiMixin(object):
10 @utils.minimum_version('1.25')
11 def df(self):
12 """
13 Get data usage information.
14
15 Returns:
16 (dict): A dictionary representing different resource categories
17 and their respective data usage.
18
19 Raises:
20 :py:class:`docker.errors.APIError`
21 If the server returns an error.
22 """
23 url = self._url('/system/df')
24 return self._result(self._get(url), True)
25
26 def events(self, since=None, until=None, filters=None, decode=None):
27 """
28 Get real-time events from the server. Similar to the ``docker events``
29 command.
30
31 Args:
32 since (UTC datetime or int): Get events from this point
33 until (UTC datetime or int): Get events until this point
34 filters (dict): Filter the events by event time, container or image
35 decode (bool): If set to true, stream will be decoded into dicts on
36 the fly. False by default.
37
38 Returns:
39 (generator): A blocking generator you can iterate over to retrieve
40 events as they happen.
41
42 Raises:
43 :py:class:`docker.errors.APIError`
44 If the server returns an error.
45
46 Example:
47
48 >>> for event in client.events()
49 ... print event
50 {u'from': u'image/with:tag',
51 u'id': u'container-id',
52 u'status': u'start',
53 u'time': 1423339459}
54 ...
55 """
56
57 if isinstance(since, datetime):
58 since = utils.datetime_to_timestamp(since)
59
60 if isinstance(until, datetime):
61 until = utils.datetime_to_timestamp(until)
62
63 if filters:
64 filters = utils.convert_filters(filters)
65
66 params = {
67 'since': since,
68 'until': until,
69 'filters': filters
70 }
71
72 return self._stream_helper(
73 self._get(self._url('/events'), params=params, stream=True),
74 decode=decode
75 )
76
77 def info(self):
78 """
79 Display system-wide information. Identical to the ``docker info``
80 command.
81
82 Returns:
83 (dict): The info as a dict
84
85 Raises:
86 :py:class:`docker.errors.APIError`
87 If the server returns an error.
88 """
89 return self._result(self._get(self._url("/info")), True)
90
91 def login(self, username, password=None, email=None, registry=None,
92 reauth=False, insecure_registry=False, dockercfg_path=None):
93 """
94 Authenticate with a registry. Similar to the ``docker login`` command.
95
96 Args:
97 username (str): The registry username
98 password (str): The plaintext password
99 email (str): The email for the registry account
100 registry (str): URL to the registry. E.g.
101 ``https://index.docker.io/v1/``
102 reauth (bool): Whether refresh existing authentication on the
103 Docker server.
104 dockercfg_path (str): Use a custom path for the ``.dockercfg`` file
105 (default ``$HOME/.dockercfg``)
106
107 Returns:
108 (dict): The response from the login request
109
110 Raises:
111 :py:class:`docker.errors.APIError`
112 If the server returns an error.
113 """
114 if insecure_registry:
115 warnings.warn(
116 INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
117 DeprecationWarning
118 )
119
120 # If we don't have any auth data so far, try reloading the config file
121 # one more time in case anything showed up in there.
122 # If dockercfg_path is passed check to see if the config file exists,
123 # if so load that config.
124 if dockercfg_path and os.path.exists(dockercfg_path):
125 self._auth_configs = auth.load_config(dockercfg_path)
126 elif not self._auth_configs:
127 self._auth_configs = auth.load_config()
128
129 authcfg = auth.resolve_authconfig(self._auth_configs, registry)
130 # If we found an existing auth config for this registry and username
131 # combination, we can return it immediately unless reauth is requested.
132 if authcfg and authcfg.get('username', None) == username \
133 and not reauth:
134 return authcfg
135
136 req_data = {
137 'username': username,
138 'password': password,
139 'email': email,
140 'serveraddress': registry,
141 }
142
143 response = self._post_json(self._url('/auth'), data=req_data)
144 if response.status_code == 200:
145 self._auth_configs[registry or auth.INDEX_NAME] = req_data
146 return self._result(response, json=True)
147
148 def ping(self):
149 """
150 Checks the server is responsive. An exception will be raised if it
151 isn't responding.
152
153 Returns:
154 (bool) The response from the server.
155
156 Raises:
157 :py:class:`docker.errors.APIError`
158 If the server returns an error.
159 """
160 return self._result(self._get(self._url('/_ping'))) == 'OK'
161
162 def version(self, api_version=True):
163 """
164 Returns version information from the server. Similar to the ``docker
165 version`` command.
166
167 Returns:
168 (dict): The server version information
169
170 Raises:
171 :py:class:`docker.errors.APIError`
172 If the server returns an error.
173 """
174 url = self._url("/version", versioned_api=api_version)
175 return self._result(self._get(url), json=True)
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/api/daemon.py b/docker/api/daemon.py
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -68,9 +68,10 @@
'until': until,
'filters': filters
}
+ url = self._url('/events')
return self._stream_helper(
- self._get(self._url('/events'), params=params, stream=True),
+ self._get(url, params=params, stream=True, timeout=None),
decode=decode
)
| {"golden_diff": "diff --git a/docker/api/daemon.py b/docker/api/daemon.py\n--- a/docker/api/daemon.py\n+++ b/docker/api/daemon.py\n@@ -68,9 +68,10 @@\n 'until': until,\n 'filters': filters\n }\n+ url = self._url('/events')\n \n return self._stream_helper(\n- self._get(self._url('/events'), params=params, stream=True),\n+ self._get(url, params=params, stream=True, timeout=None),\n decode=decode\n )\n", "issue": "Cannot wait blocking generator output from events()\nSince upgrade to `docker 2.2.0`. \r\n\r\nAPI 'events()' using same API client time out. \r\nSo I got `UnixHTTPConnectionPool(host='localhost', port=None): Read timed out.` exception message.\r\n\r\nBut in my use case, `containers()` or other client APIs are reasonable to set an default timeout.\r\nBut `events()` should have another timeout setting for users.\n", "before_files": [{"content": "import os\nimport warnings\nfrom datetime import datetime\n\nfrom .. import auth, utils\nfrom ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING\n\n\nclass DaemonApiMixin(object):\n @utils.minimum_version('1.25')\n def df(self):\n \"\"\"\n Get data usage information.\n\n Returns:\n (dict): A dictionary representing different resource categories\n and their respective data usage.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n url = self._url('/system/df')\n return self._result(self._get(url), True)\n\n def events(self, since=None, until=None, filters=None, decode=None):\n \"\"\"\n Get real-time events from the server. Similar to the ``docker events``\n command.\n\n Args:\n since (UTC datetime or int): Get events from this point\n until (UTC datetime or int): Get events until this point\n filters (dict): Filter the events by event time, container or image\n decode (bool): If set to true, stream will be decoded into dicts on\n the fly. False by default.\n\n Returns:\n (generator): A blocking generator you can iterate over to retrieve\n events as they happen.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> for event in client.events()\n ... print event\n {u'from': u'image/with:tag',\n u'id': u'container-id',\n u'status': u'start',\n u'time': 1423339459}\n ...\n \"\"\"\n\n if isinstance(since, datetime):\n since = utils.datetime_to_timestamp(since)\n\n if isinstance(until, datetime):\n until = utils.datetime_to_timestamp(until)\n\n if filters:\n filters = utils.convert_filters(filters)\n\n params = {\n 'since': since,\n 'until': until,\n 'filters': filters\n }\n\n return self._stream_helper(\n self._get(self._url('/events'), params=params, stream=True),\n decode=decode\n )\n\n def info(self):\n \"\"\"\n Display system-wide information. Identical to the ``docker info``\n command.\n\n Returns:\n (dict): The info as a dict\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self._result(self._get(self._url(\"/info\")), True)\n\n def login(self, username, password=None, email=None, registry=None,\n reauth=False, insecure_registry=False, dockercfg_path=None):\n \"\"\"\n Authenticate with a registry. Similar to the ``docker login`` command.\n\n Args:\n username (str): The registry username\n password (str): The plaintext password\n email (str): The email for the registry account\n registry (str): URL to the registry. E.g.\n ``https://index.docker.io/v1/``\n reauth (bool): Whether refresh existing authentication on the\n Docker server.\n dockercfg_path (str): Use a custom path for the ``.dockercfg`` file\n (default ``$HOME/.dockercfg``)\n\n Returns:\n (dict): The response from the login request\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n if insecure_registry:\n warnings.warn(\n INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),\n DeprecationWarning\n )\n\n # If we don't have any auth data so far, try reloading the config file\n # one more time in case anything showed up in there.\n # If dockercfg_path is passed check to see if the config file exists,\n # if so load that config.\n if dockercfg_path and os.path.exists(dockercfg_path):\n self._auth_configs = auth.load_config(dockercfg_path)\n elif not self._auth_configs:\n self._auth_configs = auth.load_config()\n\n authcfg = auth.resolve_authconfig(self._auth_configs, registry)\n # If we found an existing auth config for this registry and username\n # combination, we can return it immediately unless reauth is requested.\n if authcfg and authcfg.get('username', None) == username \\\n and not reauth:\n return authcfg\n\n req_data = {\n 'username': username,\n 'password': password,\n 'email': email,\n 'serveraddress': registry,\n }\n\n response = self._post_json(self._url('/auth'), data=req_data)\n if response.status_code == 200:\n self._auth_configs[registry or auth.INDEX_NAME] = req_data\n return self._result(response, json=True)\n\n def ping(self):\n \"\"\"\n Checks the server is responsive. An exception will be raised if it\n isn't responding.\n\n Returns:\n (bool) The response from the server.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self._result(self._get(self._url('/_ping'))) == 'OK'\n\n def version(self, api_version=True):\n \"\"\"\n Returns version information from the server. Similar to the ``docker\n version`` command.\n\n Returns:\n (dict): The server version information\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n url = self._url(\"/version\", versioned_api=api_version)\n return self._result(self._get(url), json=True)\n", "path": "docker/api/daemon.py"}], "after_files": [{"content": "import os\nimport warnings\nfrom datetime import datetime\n\nfrom .. import auth, utils\nfrom ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING\n\n\nclass DaemonApiMixin(object):\n @utils.minimum_version('1.25')\n def df(self):\n \"\"\"\n Get data usage information.\n\n Returns:\n (dict): A dictionary representing different resource categories\n and their respective data usage.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n url = self._url('/system/df')\n return self._result(self._get(url), True)\n\n def events(self, since=None, until=None, filters=None, decode=None):\n \"\"\"\n Get real-time events from the server. Similar to the ``docker events``\n command.\n\n Args:\n since (UTC datetime or int): Get events from this point\n until (UTC datetime or int): Get events until this point\n filters (dict): Filter the events by event time, container or image\n decode (bool): If set to true, stream will be decoded into dicts on\n the fly. False by default.\n\n Returns:\n (generator): A blocking generator you can iterate over to retrieve\n events as they happen.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> for event in client.events()\n ... print event\n {u'from': u'image/with:tag',\n u'id': u'container-id',\n u'status': u'start',\n u'time': 1423339459}\n ...\n \"\"\"\n\n if isinstance(since, datetime):\n since = utils.datetime_to_timestamp(since)\n\n if isinstance(until, datetime):\n until = utils.datetime_to_timestamp(until)\n\n if filters:\n filters = utils.convert_filters(filters)\n\n params = {\n 'since': since,\n 'until': until,\n 'filters': filters\n }\n url = self._url('/events')\n\n return self._stream_helper(\n self._get(url, params=params, stream=True, timeout=None),\n decode=decode\n )\n\n def info(self):\n \"\"\"\n Display system-wide information. Identical to the ``docker info``\n command.\n\n Returns:\n (dict): The info as a dict\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self._result(self._get(self._url(\"/info\")), True)\n\n def login(self, username, password=None, email=None, registry=None,\n reauth=False, insecure_registry=False, dockercfg_path=None):\n \"\"\"\n Authenticate with a registry. Similar to the ``docker login`` command.\n\n Args:\n username (str): The registry username\n password (str): The plaintext password\n email (str): The email for the registry account\n registry (str): URL to the registry. E.g.\n ``https://index.docker.io/v1/``\n reauth (bool): Whether refresh existing authentication on the\n Docker server.\n dockercfg_path (str): Use a custom path for the ``.dockercfg`` file\n (default ``$HOME/.dockercfg``)\n\n Returns:\n (dict): The response from the login request\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n if insecure_registry:\n warnings.warn(\n INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),\n DeprecationWarning\n )\n\n # If we don't have any auth data so far, try reloading the config file\n # one more time in case anything showed up in there.\n # If dockercfg_path is passed check to see if the config file exists,\n # if so load that config.\n if dockercfg_path and os.path.exists(dockercfg_path):\n self._auth_configs = auth.load_config(dockercfg_path)\n elif not self._auth_configs:\n self._auth_configs = auth.load_config()\n\n authcfg = auth.resolve_authconfig(self._auth_configs, registry)\n # If we found an existing auth config for this registry and username\n # combination, we can return it immediately unless reauth is requested.\n if authcfg and authcfg.get('username', None) == username \\\n and not reauth:\n return authcfg\n\n req_data = {\n 'username': username,\n 'password': password,\n 'email': email,\n 'serveraddress': registry,\n }\n\n response = self._post_json(self._url('/auth'), data=req_data)\n if response.status_code == 200:\n self._auth_configs[registry or auth.INDEX_NAME] = req_data\n return self._result(response, json=True)\n\n def ping(self):\n \"\"\"\n Checks the server is responsive. An exception will be raised if it\n isn't responding.\n\n Returns:\n (bool) The response from the server.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self._result(self._get(self._url('/_ping'))) == 'OK'\n\n def version(self, api_version=True):\n \"\"\"\n Returns version information from the server. Similar to the ``docker\n version`` command.\n\n Returns:\n (dict): The server version information\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n url = self._url(\"/version\", versioned_api=api_version)\n return self._result(self._get(url), json=True)\n", "path": "docker/api/daemon.py"}]} | 1,998 | 117 |
gh_patches_debug_52919 | rasdani/github-patches | git_diff | great-expectations__great_expectations-3469 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py`
Content:
```
1 import logging
2 from functools import reduce
3
4 from great_expectations.execution_engine import (
5 PandasExecutionEngine,
6 SparkDFExecutionEngine,
7 SqlAlchemyExecutionEngine,
8 )
9 from great_expectations.expectations.metrics.import_manager import F, sa
10 from great_expectations.expectations.metrics.map_metric_provider import (
11 MulticolumnMapMetricProvider,
12 multicolumn_condition_partial,
13 )
14
15 logger = logging.getLogger(__name__)
16
17
18 class SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider):
19 condition_metric_name = "select_column_values.unique.within_record"
20 condition_domain_keys = (
21 "batch_id",
22 "table",
23 "column_list",
24 "row_condition",
25 "condition_parser",
26 "ignore_row_if",
27 )
28
29 @multicolumn_condition_partial(engine=PandasExecutionEngine)
30 def _pandas(cls, column_list, **kwargs):
31 num_columns = len(column_list.columns)
32 row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns
33 return row_wise_cond
34
35 @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)
36 def _sqlalchemy(cls, column_list, **kwargs):
37 """
38 The present approach relies on an inefficient query condition construction implementation, whose computational
39 cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is
40 available, this is the only feasible mechanism under the current architecture, where map metric providers must
41 return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios).
42 """
43 num_columns = len(column_list)
44
45 # An arbitrary "num_columns" value used for issuing an explanatory message as a warning.
46 if num_columns > 100:
47 logger.warning(
48 f"""Batch data with {num_columns} columns is detected. Computing the "{cls.condition_metric_name}" \
49 metric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process.
50 """
51 )
52
53 conditions = sa.or_(
54 *(
55 sa.or_(
56 column_list[idx_src] == column_list[idx_dest],
57 sa.and_(
58 column_list[idx_src] == None, column_list[idx_dest] == None
59 ),
60 )
61 for idx_src in range(num_columns - 1)
62 for idx_dest in range(idx_src + 1, num_columns)
63 )
64 )
65 row_wise_cond = sa.not_(sa.or_(conditions))
66 return row_wise_cond
67
68 @multicolumn_condition_partial(engine=SparkDFExecutionEngine)
69 def _spark(cls, column_list, **kwargs):
70 column_names = column_list.columns
71 num_columns = len(column_names)
72
73 conditions = []
74 for idx_src in range(num_columns - 1):
75 for idx_dest in range(idx_src + 1, num_columns):
76 conditions.append(
77 F.col(column_names[idx_src]).eqNullSafe(
78 F.col(column_names[idx_dest])
79 )
80 )
81
82 row_wise_cond = ~reduce(lambda a, b: a | b, conditions)
83 return row_wise_cond
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py
--- a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py
+++ b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py
@@ -62,7 +62,7 @@
for idx_dest in range(idx_src + 1, num_columns)
)
)
- row_wise_cond = sa.not_(sa.or_(conditions))
+ row_wise_cond = sa.not_(conditions)
return row_wise_cond
@multicolumn_condition_partial(engine=SparkDFExecutionEngine)
| {"golden_diff": "diff --git a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n--- a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n+++ b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n@@ -62,7 +62,7 @@\n for idx_dest in range(idx_src + 1, num_columns)\n )\n )\n- row_wise_cond = sa.not_(sa.or_(conditions))\n+ row_wise_cond = sa.not_(conditions)\n return row_wise_cond\n \n @multicolumn_condition_partial(engine=SparkDFExecutionEngine)\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import logging\nfrom functools import reduce\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider):\n condition_metric_name = \"select_column_values.unique.within_record\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n num_columns = len(column_list.columns)\n row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns\n return row_wise_cond\n\n @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column_list, **kwargs):\n \"\"\"\n The present approach relies on an inefficient query condition construction implementation, whose computational\n cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is\n available, this is the only feasible mechanism under the current architecture, where map metric providers must\n return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios).\n \"\"\"\n num_columns = len(column_list)\n\n # An arbitrary \"num_columns\" value used for issuing an explanatory message as a warning.\n if num_columns > 100:\n logger.warning(\n f\"\"\"Batch data with {num_columns} columns is detected. Computing the \"{cls.condition_metric_name}\" \\\nmetric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process.\n\"\"\"\n )\n\n conditions = sa.or_(\n *(\n sa.or_(\n column_list[idx_src] == column_list[idx_dest],\n sa.and_(\n column_list[idx_src] == None, column_list[idx_dest] == None\n ),\n )\n for idx_src in range(num_columns - 1)\n for idx_dest in range(idx_src + 1, num_columns)\n )\n )\n row_wise_cond = sa.not_(sa.or_(conditions))\n return row_wise_cond\n\n @multicolumn_condition_partial(engine=SparkDFExecutionEngine)\n def _spark(cls, column_list, **kwargs):\n column_names = column_list.columns\n num_columns = len(column_names)\n\n conditions = []\n for idx_src in range(num_columns - 1):\n for idx_dest in range(idx_src + 1, num_columns):\n conditions.append(\n F.col(column_names[idx_src]).eqNullSafe(\n F.col(column_names[idx_dest])\n )\n )\n\n row_wise_cond = ~reduce(lambda a, b: a | b, conditions)\n return row_wise_cond\n", "path": "great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py"}], "after_files": [{"content": "import logging\nfrom functools import reduce\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider):\n condition_metric_name = \"select_column_values.unique.within_record\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n num_columns = len(column_list.columns)\n row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns\n return row_wise_cond\n\n @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column_list, **kwargs):\n \"\"\"\n The present approach relies on an inefficient query condition construction implementation, whose computational\n cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is\n available, this is the only feasible mechanism under the current architecture, where map metric providers must\n return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios).\n \"\"\"\n num_columns = len(column_list)\n\n # An arbitrary \"num_columns\" value used for issuing an explanatory message as a warning.\n if num_columns > 100:\n logger.warning(\n f\"\"\"Batch data with {num_columns} columns is detected. Computing the \"{cls.condition_metric_name}\" \\\nmetric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process.\n\"\"\"\n )\n\n conditions = sa.or_(\n *(\n sa.or_(\n column_list[idx_src] == column_list[idx_dest],\n sa.and_(\n column_list[idx_src] == None, column_list[idx_dest] == None\n ),\n )\n for idx_src in range(num_columns - 1)\n for idx_dest in range(idx_src + 1, num_columns)\n )\n )\n row_wise_cond = sa.not_(conditions)\n return row_wise_cond\n\n @multicolumn_condition_partial(engine=SparkDFExecutionEngine)\n def _spark(cls, column_list, **kwargs):\n column_names = column_list.columns\n num_columns = len(column_names)\n\n conditions = []\n for idx_src in range(num_columns - 1):\n for idx_dest in range(idx_src + 1, num_columns):\n conditions.append(\n F.col(column_names[idx_src]).eqNullSafe(\n F.col(column_names[idx_dest])\n )\n )\n\n row_wise_cond = ~reduce(lambda a, b: a | b, conditions)\n return row_wise_cond\n", "path": "great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py"}]} | 1,122 | 179 |
gh_patches_debug_43188 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-2357 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Axes zoom area does not resize in 0.12.4
### Short description
When a plot is first generated with a given size, say `width ` and `height` in pixels, the entire axes areas on the left and bottom of the plot are zoomable. When the plot size is resized such that the plot is larger (e.g. the window is grabbed along an edge or corner and expanded), only the top `height` portion of the y-axes or the left `width` portion of the x-axis retain the ability to zoom the plot using the scroll wheel. The axes area outside (i.e. the lower portion of the y-axis or the right portion of the x-axis) are not zoomable. If hovering over the plot, not on an axes, there is no issue with zooming. Reverting to 0.12.3 fixes issue.
I was able to reproduce this in both custom plots and many of the pyqtgraph.example scripts.
### Tested environment(s)
* PyQtGraph version: 0.12.4
* Qt Python binding: PySide2 5.15.2.1 Qt 5.15.2
* Python version: 3.7
* NumPy version: 1.21.6
* Operating system: Windows 10 Enterprise 21H2
* Installation method: pip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/graphicsItems/GraphicsWidget.py`
Content:
```
1 from ..Qt import QtGui, QtWidgets
2 from .GraphicsItem import GraphicsItem
3
4 __all__ = ['GraphicsWidget']
5
6 class GraphicsWidget(GraphicsItem, QtWidgets.QGraphicsWidget):
7
8 _qtBaseClass = QtWidgets.QGraphicsWidget
9 def __init__(self, *args, **kargs):
10 """
11 **Bases:** :class:`GraphicsItem <pyqtgraph.GraphicsItem>`, :class:`QtWidgets.QGraphicsWidget`
12
13 Extends QGraphicsWidget with several helpful methods and workarounds for PyQt bugs.
14 Most of the extra functionality is inherited from :class:`GraphicsItem <pyqtgraph.GraphicsItem>`.
15 """
16 QtWidgets.QGraphicsWidget.__init__(self, *args, **kargs)
17 GraphicsItem.__init__(self)
18
19 # cache bouding rect and geometry
20 self._boundingRectCache = self._previousGeometry = None
21 self._painterPathCache = None
22
23 ## done by GraphicsItem init
24 #GraphicsScene.registerObject(self) ## workaround for pyqt bug in graphicsscene.items()
25
26 # Removed due to https://bugreports.qt-project.org/browse/PYSIDE-86
27 #def itemChange(self, change, value):
28 ## BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!
29 ##ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) ## segv occurs here
30 ## The default behavior is just to return the value argument, so we'll do that
31 ## without calling the original method.
32 #ret = value
33 #if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:
34 #self._updateView()
35 #return ret
36
37 def setFixedHeight(self, h):
38 self.setMaximumHeight(h)
39 self.setMinimumHeight(h)
40
41 def setFixedWidth(self, h):
42 self.setMaximumWidth(h)
43 self.setMinimumWidth(h)
44
45 def height(self):
46 return self.geometry().height()
47
48 def width(self):
49 return self.geometry().width()
50
51 def boundingRect(self):
52 geometry = self.geometry()
53 if geometry != self._previousGeometry:
54 self._painterPathCache = None
55
56 br = self.mapRectFromParent(geometry).normalized()
57 self._boundingRectCache = br
58 self._previousGeometry = geometry
59 else:
60 br = self._boundingRectCache
61
62 return br
63
64 def shape(self): ## No idea why this is necessary, but rotated items do not receive clicks otherwise.
65 p = self._painterPathCache
66 if p is None:
67 self._painterPathCache = p = QtGui.QPainterPath()
68 p.addRect(self.boundingRect())
69
70 return p
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyqtgraph/graphicsItems/GraphicsWidget.py b/pyqtgraph/graphicsItems/GraphicsWidget.py
--- a/pyqtgraph/graphicsItems/GraphicsWidget.py
+++ b/pyqtgraph/graphicsItems/GraphicsWidget.py
@@ -3,36 +3,43 @@
__all__ = ['GraphicsWidget']
+
class GraphicsWidget(GraphicsItem, QtWidgets.QGraphicsWidget):
_qtBaseClass = QtWidgets.QGraphicsWidget
- def __init__(self, *args, **kargs):
+
+ def __init__(self, *args, **kwargs):
"""
**Bases:** :class:`GraphicsItem <pyqtgraph.GraphicsItem>`, :class:`QtWidgets.QGraphicsWidget`
Extends QGraphicsWidget with several helpful methods and workarounds for PyQt bugs.
Most of the extra functionality is inherited from :class:`GraphicsItem <pyqtgraph.GraphicsItem>`.
"""
- QtWidgets.QGraphicsWidget.__init__(self, *args, **kargs)
+ QtWidgets.QGraphicsWidget.__init__(self, *args, **kwargs)
GraphicsItem.__init__(self)
- # cache bouding rect and geometry
+ # cache bounding rect and geometry
self._boundingRectCache = self._previousGeometry = None
self._painterPathCache = None
-
- ## done by GraphicsItem init
- #GraphicsScene.registerObject(self) ## workaround for pyqt bug in graphicsscene.items()
+ self.geometryChanged.connect(self._resetCachedProperties)
+
+ # done by GraphicsItem init
+ # GraphicsScene.registerObject(self) # workaround for pyqt bug in GraphicsScene.items()
# Removed due to https://bugreports.qt-project.org/browse/PYSIDE-86
- #def itemChange(self, change, value):
- ## BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!
- ##ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) ## segv occurs here
- ## The default behavior is just to return the value argument, so we'll do that
- ## without calling the original method.
- #ret = value
- #if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:
- #self._updateView()
- #return ret
+ # def itemChange(self, change, value):
+ # # BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!
+ # # ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) # segv occurs here
+ # # The default behavior is just to return the value argument, so we'll do that
+ # # without calling the original method.
+ # ret = value
+ # if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:
+ # self._updateView()
+ # return ret
+
+ def _resetCachedProperties(self):
+ self._boundingRectCache = self._previousGeometry = None
+ self._painterPathCache = None
def setFixedHeight(self, h):
self.setMaximumHeight(h)
@@ -41,10 +48,10 @@
def setFixedWidth(self, h):
self.setMaximumWidth(h)
self.setMinimumWidth(h)
-
+
def height(self):
return self.geometry().height()
-
+
def width(self):
return self.geometry().width()
@@ -52,19 +59,16 @@
geometry = self.geometry()
if geometry != self._previousGeometry:
self._painterPathCache = None
-
br = self.mapRectFromParent(geometry).normalized()
self._boundingRectCache = br
self._previousGeometry = geometry
else:
br = self._boundingRectCache
-
return br
- def shape(self): ## No idea why this is necessary, but rotated items do not receive clicks otherwise.
+ def shape(self):
p = self._painterPathCache
if p is None:
self._painterPathCache = p = QtGui.QPainterPath()
p.addRect(self.boundingRect())
-
return p
| {"golden_diff": "diff --git a/pyqtgraph/graphicsItems/GraphicsWidget.py b/pyqtgraph/graphicsItems/GraphicsWidget.py\n--- a/pyqtgraph/graphicsItems/GraphicsWidget.py\n+++ b/pyqtgraph/graphicsItems/GraphicsWidget.py\n@@ -3,36 +3,43 @@\n \n __all__ = ['GraphicsWidget']\n \n+\n class GraphicsWidget(GraphicsItem, QtWidgets.QGraphicsWidget):\n \n _qtBaseClass = QtWidgets.QGraphicsWidget\n- def __init__(self, *args, **kargs):\n+\n+ def __init__(self, *args, **kwargs):\n \"\"\"\n **Bases:** :class:`GraphicsItem <pyqtgraph.GraphicsItem>`, :class:`QtWidgets.QGraphicsWidget`\n \n Extends QGraphicsWidget with several helpful methods and workarounds for PyQt bugs. \n Most of the extra functionality is inherited from :class:`GraphicsItem <pyqtgraph.GraphicsItem>`.\n \"\"\"\n- QtWidgets.QGraphicsWidget.__init__(self, *args, **kargs)\n+ QtWidgets.QGraphicsWidget.__init__(self, *args, **kwargs)\n GraphicsItem.__init__(self)\n \n- # cache bouding rect and geometry\n+ # cache bounding rect and geometry\n self._boundingRectCache = self._previousGeometry = None\n self._painterPathCache = None\n- \n- ## done by GraphicsItem init\n- #GraphicsScene.registerObject(self) ## workaround for pyqt bug in graphicsscene.items()\n+ self.geometryChanged.connect(self._resetCachedProperties)\n+\n+ # done by GraphicsItem init\n+ # GraphicsScene.registerObject(self) # workaround for pyqt bug in GraphicsScene.items()\n \n # Removed due to https://bugreports.qt-project.org/browse/PYSIDE-86\n- #def itemChange(self, change, value):\n- ## BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!\n- ##ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) ## segv occurs here\n- ## The default behavior is just to return the value argument, so we'll do that\n- ## without calling the original method.\n- #ret = value\n- #if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:\n- #self._updateView()\n- #return ret\n+ # def itemChange(self, change, value):\n+ # # BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!\n+ # # ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) # segv occurs here\n+ # # The default behavior is just to return the value argument, so we'll do that\n+ # # without calling the original method.\n+ # ret = value\n+ # if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:\n+ # self._updateView()\n+ # return ret\n+\n+ def _resetCachedProperties(self):\n+ self._boundingRectCache = self._previousGeometry = None\n+ self._painterPathCache = None\n \n def setFixedHeight(self, h):\n self.setMaximumHeight(h)\n@@ -41,10 +48,10 @@\n def setFixedWidth(self, h):\n self.setMaximumWidth(h)\n self.setMinimumWidth(h)\n- \n+\n def height(self):\n return self.geometry().height()\n- \n+\n def width(self):\n return self.geometry().width()\n \n@@ -52,19 +59,16 @@\n geometry = self.geometry()\n if geometry != self._previousGeometry:\n self._painterPathCache = None\n- \n br = self.mapRectFromParent(geometry).normalized()\n self._boundingRectCache = br\n self._previousGeometry = geometry\n else:\n br = self._boundingRectCache\n-\n return br\n \n- def shape(self): ## No idea why this is necessary, but rotated items do not receive clicks otherwise.\n+ def shape(self):\n p = self._painterPathCache\n if p is None:\n self._painterPathCache = p = QtGui.QPainterPath()\n p.addRect(self.boundingRect())\n-\n return p\n", "issue": "Axes zoom area does not resize in 0.12.4\n### Short description\r\nWhen a plot is first generated with a given size, say `width ` and `height` in pixels, the entire axes areas on the left and bottom of the plot are zoomable. When the plot size is resized such that the plot is larger (e.g. the window is grabbed along an edge or corner and expanded), only the top `height` portion of the y-axes or the left `width` portion of the x-axis retain the ability to zoom the plot using the scroll wheel. The axes area outside (i.e. the lower portion of the y-axis or the right portion of the x-axis) are not zoomable. If hovering over the plot, not on an axes, there is no issue with zooming. Reverting to 0.12.3 fixes issue.\r\n\r\nI was able to reproduce this in both custom plots and many of the pyqtgraph.example scripts.\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 0.12.4\r\n * Qt Python binding: PySide2 5.15.2.1 Qt 5.15.2\r\n * Python version: 3.7\r\n * NumPy version: 1.21.6\r\n * Operating system: Windows 10 Enterprise 21H2\r\n * Installation method: pip\r\n\n", "before_files": [{"content": "from ..Qt import QtGui, QtWidgets\nfrom .GraphicsItem import GraphicsItem\n\n__all__ = ['GraphicsWidget']\n\nclass GraphicsWidget(GraphicsItem, QtWidgets.QGraphicsWidget):\n \n _qtBaseClass = QtWidgets.QGraphicsWidget\n def __init__(self, *args, **kargs):\n \"\"\"\n **Bases:** :class:`GraphicsItem <pyqtgraph.GraphicsItem>`, :class:`QtWidgets.QGraphicsWidget`\n \n Extends QGraphicsWidget with several helpful methods and workarounds for PyQt bugs. \n Most of the extra functionality is inherited from :class:`GraphicsItem <pyqtgraph.GraphicsItem>`.\n \"\"\"\n QtWidgets.QGraphicsWidget.__init__(self, *args, **kargs)\n GraphicsItem.__init__(self)\n\n # cache bouding rect and geometry\n self._boundingRectCache = self._previousGeometry = None\n self._painterPathCache = None\n \n ## done by GraphicsItem init\n #GraphicsScene.registerObject(self) ## workaround for pyqt bug in graphicsscene.items()\n\n # Removed due to https://bugreports.qt-project.org/browse/PYSIDE-86\n #def itemChange(self, change, value):\n ## BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!\n ##ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) ## segv occurs here\n ## The default behavior is just to return the value argument, so we'll do that\n ## without calling the original method.\n #ret = value\n #if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:\n #self._updateView()\n #return ret\n\n def setFixedHeight(self, h):\n self.setMaximumHeight(h)\n self.setMinimumHeight(h)\n\n def setFixedWidth(self, h):\n self.setMaximumWidth(h)\n self.setMinimumWidth(h)\n \n def height(self):\n return self.geometry().height()\n \n def width(self):\n return self.geometry().width()\n\n def boundingRect(self):\n geometry = self.geometry()\n if geometry != self._previousGeometry:\n self._painterPathCache = None\n \n br = self.mapRectFromParent(geometry).normalized()\n self._boundingRectCache = br\n self._previousGeometry = geometry\n else:\n br = self._boundingRectCache\n\n return br\n\n def shape(self): ## No idea why this is necessary, but rotated items do not receive clicks otherwise.\n p = self._painterPathCache\n if p is None:\n self._painterPathCache = p = QtGui.QPainterPath()\n p.addRect(self.boundingRect())\n\n return p\n", "path": "pyqtgraph/graphicsItems/GraphicsWidget.py"}], "after_files": [{"content": "from ..Qt import QtGui, QtWidgets\nfrom .GraphicsItem import GraphicsItem\n\n__all__ = ['GraphicsWidget']\n\n\nclass GraphicsWidget(GraphicsItem, QtWidgets.QGraphicsWidget):\n \n _qtBaseClass = QtWidgets.QGraphicsWidget\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n **Bases:** :class:`GraphicsItem <pyqtgraph.GraphicsItem>`, :class:`QtWidgets.QGraphicsWidget`\n \n Extends QGraphicsWidget with several helpful methods and workarounds for PyQt bugs. \n Most of the extra functionality is inherited from :class:`GraphicsItem <pyqtgraph.GraphicsItem>`.\n \"\"\"\n QtWidgets.QGraphicsWidget.__init__(self, *args, **kwargs)\n GraphicsItem.__init__(self)\n\n # cache bounding rect and geometry\n self._boundingRectCache = self._previousGeometry = None\n self._painterPathCache = None\n self.geometryChanged.connect(self._resetCachedProperties)\n\n # done by GraphicsItem init\n # GraphicsScene.registerObject(self) # workaround for pyqt bug in GraphicsScene.items()\n\n # Removed due to https://bugreports.qt-project.org/browse/PYSIDE-86\n # def itemChange(self, change, value):\n # # BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!\n # # ret = QtWidgets.QGraphicsWidget.itemChange(self, change, value) # segv occurs here\n # # The default behavior is just to return the value argument, so we'll do that\n # # without calling the original method.\n # ret = value\n # if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:\n # self._updateView()\n # return ret\n\n def _resetCachedProperties(self):\n self._boundingRectCache = self._previousGeometry = None\n self._painterPathCache = None\n\n def setFixedHeight(self, h):\n self.setMaximumHeight(h)\n self.setMinimumHeight(h)\n\n def setFixedWidth(self, h):\n self.setMaximumWidth(h)\n self.setMinimumWidth(h)\n\n def height(self):\n return self.geometry().height()\n\n def width(self):\n return self.geometry().width()\n\n def boundingRect(self):\n geometry = self.geometry()\n if geometry != self._previousGeometry:\n self._painterPathCache = None\n br = self.mapRectFromParent(geometry).normalized()\n self._boundingRectCache = br\n self._previousGeometry = geometry\n else:\n br = self._boundingRectCache\n return br\n\n def shape(self):\n p = self._painterPathCache\n if p is None:\n self._painterPathCache = p = QtGui.QPainterPath()\n p.addRect(self.boundingRect())\n return p\n", "path": "pyqtgraph/graphicsItems/GraphicsWidget.py"}]} | 1,261 | 914 |
gh_patches_debug_9524 | rasdani/github-patches | git_diff | ansible-collections__community.general-458 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rhsm_repository is really slow
##### SUMMARY
Using rhsm_repository is really slow, making it annoying to use a playback that uses it.
(copied from https://github.com/ansible/ansible/issues/69722)
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
rhsm_repository
##### ANSIBLE VERSION
```
ansible 2.9.9
config file = /root/ansible/ansible.cfg
configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python2.7/site-packages/ansible
executable location = /usr/bin/ansible
python version = 2.7.5 (default, Apr 2 2020, 13:16:51) [GCC 4.8.5 20150623 (Red Hat 4.8.5-39)]
```
##### CONFIGURATION
```
[defaults]
stdout_callback = yaml
force_handlers = True
```
##### OS / ENVIRONMENT
Ansible machine:
Target machine: RHEL 8
##### STEPS TO REPRODUCE
```yaml
- name: enable Red Hat CodeReady repo
rhsm_repository:
name: codeready-builder-for-rhel-8-x86_64-rpms
when: ansible_facts['distribution'] == "RedHat" and ansible_facts['distribution_major_version'] == "8"
```
##### EXPECTED RESULTS
The above task should complete in a short time, at least when the repository is already enabled.
##### ACTUAL RESULTS
You have time to get coffee whilst your playbook runs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/packaging/os/rhsm_repository.py`
Content:
```
1 #!/usr/bin/python
2
3 # Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino)
4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
5
6 from __future__ import absolute_import, division, print_function
7 __metaclass__ = type
8
9 DOCUMENTATION = '''
10 ---
11 module: rhsm_repository
12 short_description: Manage RHSM repositories using the subscription-manager command
13 description:
14 - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription
15 Management entitlement platform using the C(subscription-manager) command.
16 author: Giovanni Sciortino (@giovannisciortino)
17 notes:
18 - In order to manage RHSM repositories the system must be already registered
19 to RHSM manually or using the Ansible C(redhat_subscription) module.
20
21 requirements:
22 - subscription-manager
23 options:
24 state:
25 description:
26 - If state is equal to present or disabled, indicates the desired
27 repository state.
28 choices: [present, enabled, absent, disabled]
29 required: True
30 default: "present"
31 name:
32 description:
33 - The ID of repositories to enable.
34 - To operate on several repositories this can accept a comma separated
35 list or a YAML list.
36 required: True
37 purge:
38 description:
39 - Disable all currently enabled repositories that are not not specified in C(name).
40 Only set this to C(True) if passing in a list of repositories to the C(name) field.
41 Using this with C(loop) will most likely not have the desired result.
42 type: bool
43 default: False
44 '''
45
46 EXAMPLES = '''
47 - name: Enable a RHSM repository
48 rhsm_repository:
49 name: rhel-7-server-rpms
50
51 - name: Disable all RHSM repositories
52 rhsm_repository:
53 name: '*'
54 state: disabled
55
56 - name: Enable all repositories starting with rhel-6-server
57 rhsm_repository:
58 name: rhel-6-server*
59 state: enabled
60
61 - name: Disable all repositories except rhel-7-server-rpms
62 rhsm_repository:
63 name: rhel-7-server-rpms
64 purge: True
65 '''
66
67 RETURN = '''
68 repositories:
69 description:
70 - The list of RHSM repositories with their states.
71 - When this module is used to change the repository states, this list contains the updated states after the changes.
72 returned: success
73 type: list
74 '''
75
76 import re
77 import os
78 from fnmatch import fnmatch
79 from copy import deepcopy
80 from ansible.module_utils.basic import AnsibleModule
81
82
83 def run_subscription_manager(module, arguments):
84 # Execute subscription-manager with arguments and manage common errors
85 rhsm_bin = module.get_bin_path('subscription-manager')
86 if not rhsm_bin:
87 module.fail_json(msg='The executable file subscription-manager was not found in PATH')
88
89 lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
90 rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env)
91
92 if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0):
93 module.fail_json(msg='The executable file subscription-manager must be run using root privileges')
94 elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
95 module.fail_json(msg='This system has no repositories available through subscriptions')
96 elif rc == 1:
97 module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
98 else:
99 return rc, out, err
100
101
102 def get_repository_list(module, list_parameter):
103 # Generate RHSM repository list and return a list of dict
104 if list_parameter == 'list_enabled':
105 rhsm_arguments = ['repos', '--list-enabled']
106 elif list_parameter == 'list_disabled':
107 rhsm_arguments = ['repos', '--list-disabled']
108 elif list_parameter == 'list':
109 rhsm_arguments = ['repos', '--list']
110 rc, out, err = run_subscription_manager(module, rhsm_arguments)
111
112 skip_lines = [
113 '+----------------------------------------------------------+',
114 ' Available Repositories in /etc/yum.repos.d/redhat.repo'
115 ]
116 repo_id_re = re.compile(r'Repo ID:\s+(.*)')
117 repo_name_re = re.compile(r'Repo Name:\s+(.*)')
118 repo_url_re = re.compile(r'Repo URL:\s+(.*)')
119 repo_enabled_re = re.compile(r'Enabled:\s+(.*)')
120
121 repo_id = ''
122 repo_name = ''
123 repo_url = ''
124 repo_enabled = ''
125
126 repo_result = []
127 for line in out.splitlines():
128 if line == '' or line in skip_lines:
129 continue
130
131 repo_id_match = repo_id_re.match(line)
132 if repo_id_match:
133 repo_id = repo_id_match.group(1)
134 continue
135
136 repo_name_match = repo_name_re.match(line)
137 if repo_name_match:
138 repo_name = repo_name_match.group(1)
139 continue
140
141 repo_url_match = repo_url_re.match(line)
142 if repo_url_match:
143 repo_url = repo_url_match.group(1)
144 continue
145
146 repo_enabled_match = repo_enabled_re.match(line)
147 if repo_enabled_match:
148 repo_enabled = repo_enabled_match.group(1)
149
150 repo = {
151 "id": repo_id,
152 "name": repo_name,
153 "url": repo_url,
154 "enabled": True if repo_enabled == '1' else False
155 }
156
157 repo_result.append(repo)
158
159 return repo_result
160
161
162 def repository_modify(module, state, name, purge=False):
163 name = set(name)
164 current_repo_list = get_repository_list(module, 'list')
165 updated_repo_list = deepcopy(current_repo_list)
166 matched_existing_repo = {}
167 for repoid in name:
168 matched_existing_repo[repoid] = []
169 for idx, repo in enumerate(current_repo_list):
170 if fnmatch(repo['id'], repoid):
171 matched_existing_repo[repoid].append(repo)
172 # Update current_repo_list to return it as result variable
173 updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False
174
175 changed = False
176 results = []
177 diff_before = ""
178 diff_after = ""
179 rhsm_arguments = ['repos']
180
181 for repoid in matched_existing_repo:
182 if len(matched_existing_repo[repoid]) == 0:
183 results.append("%s is not a valid repository ID" % repoid)
184 module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid)
185 for repo in matched_existing_repo[repoid]:
186 if state in ['disabled', 'absent']:
187 if repo['enabled']:
188 changed = True
189 diff_before += "Repository '%s' is enabled for this system\n" % repo['id']
190 diff_after += "Repository '%s' is disabled for this system\n" % repo['id']
191 results.append("Repository '%s' is disabled for this system" % repo['id'])
192 rhsm_arguments += ['--disable', repo['id']]
193 elif state in ['enabled', 'present']:
194 if not repo['enabled']:
195 changed = True
196 diff_before += "Repository '%s' is disabled for this system\n" % repo['id']
197 diff_after += "Repository '%s' is enabled for this system\n" % repo['id']
198 results.append("Repository '%s' is enabled for this system" % repo['id'])
199 rhsm_arguments += ['--enable', repo['id']]
200
201 # Disable all enabled repos on the system that are not in the task and not
202 # marked as disabled by the task
203 if purge:
204 enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled'])
205 matched_repoids_set = set(matched_existing_repo.keys())
206 difference = enabled_repo_ids.difference(matched_repoids_set)
207 if len(difference) > 0:
208 for repoid in difference:
209 changed = True
210 diff_before.join("Repository '{repoid}'' is enabled for this system\n".format(repoid=repoid))
211 diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid))
212 results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid))
213 rhsm_arguments.extend(['--disable', repoid])
214
215 diff = {'before': diff_before,
216 'after': diff_after,
217 'before_header': "RHSM repositories",
218 'after_header': "RHSM repositories"}
219
220 if not module.check_mode:
221 rc, out, err = run_subscription_manager(module, rhsm_arguments)
222 results = out.splitlines()
223 module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)
224
225
226 def main():
227 module = AnsibleModule(
228 argument_spec=dict(
229 name=dict(type='list', required=True),
230 state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'),
231 purge=dict(type='bool', default=False),
232 ),
233 supports_check_mode=True,
234 )
235 name = module.params['name']
236 state = module.params['state']
237 purge = module.params['purge']
238
239 repository_modify(module, state, name, purge)
240
241
242 if __name__ == '__main__':
243 main()
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/packaging/os/rhsm_repository.py b/plugins/modules/packaging/os/rhsm_repository.py
--- a/plugins/modules/packaging/os/rhsm_repository.py
+++ b/plugins/modules/packaging/os/rhsm_repository.py
@@ -217,7 +217,7 @@
'before_header': "RHSM repositories",
'after_header': "RHSM repositories"}
- if not module.check_mode:
+ if not module.check_mode and changed:
rc, out, err = run_subscription_manager(module, rhsm_arguments)
results = out.splitlines()
module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)
| {"golden_diff": "diff --git a/plugins/modules/packaging/os/rhsm_repository.py b/plugins/modules/packaging/os/rhsm_repository.py\n--- a/plugins/modules/packaging/os/rhsm_repository.py\n+++ b/plugins/modules/packaging/os/rhsm_repository.py\n@@ -217,7 +217,7 @@\n 'before_header': \"RHSM repositories\",\n 'after_header': \"RHSM repositories\"}\n \n- if not module.check_mode:\n+ if not module.check_mode and changed:\n rc, out, err = run_subscription_manager(module, rhsm_arguments)\n results = out.splitlines()\n module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)\n", "issue": "rhsm_repository is really slow\n##### SUMMARY\r\nUsing rhsm_repository is really slow, making it annoying to use a playback that uses it.\r\n(copied from https://github.com/ansible/ansible/issues/69722)\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\nrhsm_repository\r\n\r\n##### ANSIBLE VERSION\r\n```\r\nansible 2.9.9\r\n config file = /root/ansible/ansible.cfg\r\n configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python2.7/site-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 2.7.5 (default, Apr 2 2020, 13:16:51) [GCC 4.8.5 20150623 (Red Hat 4.8.5-39)]\r\n```\r\n\r\n##### CONFIGURATION\r\n```\r\n[defaults]\r\nstdout_callback = yaml\r\nforce_handlers = True\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\nAnsible machine:\r\nTarget machine: RHEL 8\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\n```yaml\r\n- name: enable Red Hat CodeReady repo\r\n rhsm_repository:\r\n name: codeready-builder-for-rhel-8-x86_64-rpms\r\n when: ansible_facts['distribution'] == \"RedHat\" and ansible_facts['distribution_major_version'] == \"8\"\r\n```\r\n\r\n##### EXPECTED RESULTS\r\nThe above task should complete in a short time, at least when the repository is already enabled.\r\n\r\n\r\n##### ACTUAL RESULTS\r\nYou have time to get coffee whilst your playbook runs.\n", "before_files": [{"content": "#!/usr/bin/python\n\n# Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino)\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\nmodule: rhsm_repository\nshort_description: Manage RHSM repositories using the subscription-manager command\ndescription:\n - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription\n Management entitlement platform using the C(subscription-manager) command.\nauthor: Giovanni Sciortino (@giovannisciortino)\nnotes:\n - In order to manage RHSM repositories the system must be already registered\n to RHSM manually or using the Ansible C(redhat_subscription) module.\n\nrequirements:\n - subscription-manager\noptions:\n state:\n description:\n - If state is equal to present or disabled, indicates the desired\n repository state.\n choices: [present, enabled, absent, disabled]\n required: True\n default: \"present\"\n name:\n description:\n - The ID of repositories to enable.\n - To operate on several repositories this can accept a comma separated\n list or a YAML list.\n required: True\n purge:\n description:\n - Disable all currently enabled repositories that are not not specified in C(name).\n Only set this to C(True) if passing in a list of repositories to the C(name) field.\n Using this with C(loop) will most likely not have the desired result.\n type: bool\n default: False\n'''\n\nEXAMPLES = '''\n- name: Enable a RHSM repository\n rhsm_repository:\n name: rhel-7-server-rpms\n\n- name: Disable all RHSM repositories\n rhsm_repository:\n name: '*'\n state: disabled\n\n- name: Enable all repositories starting with rhel-6-server\n rhsm_repository:\n name: rhel-6-server*\n state: enabled\n\n- name: Disable all repositories except rhel-7-server-rpms\n rhsm_repository:\n name: rhel-7-server-rpms\n purge: True\n'''\n\nRETURN = '''\nrepositories:\n description:\n - The list of RHSM repositories with their states.\n - When this module is used to change the repository states, this list contains the updated states after the changes.\n returned: success\n type: list\n'''\n\nimport re\nimport os\nfrom fnmatch import fnmatch\nfrom copy import deepcopy\nfrom ansible.module_utils.basic import AnsibleModule\n\n\ndef run_subscription_manager(module, arguments):\n # Execute subscription-manager with arguments and manage common errors\n rhsm_bin = module.get_bin_path('subscription-manager')\n if not rhsm_bin:\n module.fail_json(msg='The executable file subscription-manager was not found in PATH')\n\n lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')\n rc, out, err = module.run_command(\"%s %s\" % (rhsm_bin, \" \".join(arguments)), environ_update=lang_env)\n\n if rc == 1 and (err == 'The password you typed is invalid.\\nPlease try again.\\n' or os.getuid() != 0):\n module.fail_json(msg='The executable file subscription-manager must be run using root privileges')\n elif rc == 0 and out == 'This system has no repositories available through subscriptions.\\n':\n module.fail_json(msg='This system has no repositories available through subscriptions')\n elif rc == 1:\n module.fail_json(msg='subscription-manager failed with the following error: %s' % err)\n else:\n return rc, out, err\n\n\ndef get_repository_list(module, list_parameter):\n # Generate RHSM repository list and return a list of dict\n if list_parameter == 'list_enabled':\n rhsm_arguments = ['repos', '--list-enabled']\n elif list_parameter == 'list_disabled':\n rhsm_arguments = ['repos', '--list-disabled']\n elif list_parameter == 'list':\n rhsm_arguments = ['repos', '--list']\n rc, out, err = run_subscription_manager(module, rhsm_arguments)\n\n skip_lines = [\n '+----------------------------------------------------------+',\n ' Available Repositories in /etc/yum.repos.d/redhat.repo'\n ]\n repo_id_re = re.compile(r'Repo ID:\\s+(.*)')\n repo_name_re = re.compile(r'Repo Name:\\s+(.*)')\n repo_url_re = re.compile(r'Repo URL:\\s+(.*)')\n repo_enabled_re = re.compile(r'Enabled:\\s+(.*)')\n\n repo_id = ''\n repo_name = ''\n repo_url = ''\n repo_enabled = ''\n\n repo_result = []\n for line in out.splitlines():\n if line == '' or line in skip_lines:\n continue\n\n repo_id_match = repo_id_re.match(line)\n if repo_id_match:\n repo_id = repo_id_match.group(1)\n continue\n\n repo_name_match = repo_name_re.match(line)\n if repo_name_match:\n repo_name = repo_name_match.group(1)\n continue\n\n repo_url_match = repo_url_re.match(line)\n if repo_url_match:\n repo_url = repo_url_match.group(1)\n continue\n\n repo_enabled_match = repo_enabled_re.match(line)\n if repo_enabled_match:\n repo_enabled = repo_enabled_match.group(1)\n\n repo = {\n \"id\": repo_id,\n \"name\": repo_name,\n \"url\": repo_url,\n \"enabled\": True if repo_enabled == '1' else False\n }\n\n repo_result.append(repo)\n\n return repo_result\n\n\ndef repository_modify(module, state, name, purge=False):\n name = set(name)\n current_repo_list = get_repository_list(module, 'list')\n updated_repo_list = deepcopy(current_repo_list)\n matched_existing_repo = {}\n for repoid in name:\n matched_existing_repo[repoid] = []\n for idx, repo in enumerate(current_repo_list):\n if fnmatch(repo['id'], repoid):\n matched_existing_repo[repoid].append(repo)\n # Update current_repo_list to return it as result variable\n updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False\n\n changed = False\n results = []\n diff_before = \"\"\n diff_after = \"\"\n rhsm_arguments = ['repos']\n\n for repoid in matched_existing_repo:\n if len(matched_existing_repo[repoid]) == 0:\n results.append(\"%s is not a valid repository ID\" % repoid)\n module.fail_json(results=results, msg=\"%s is not a valid repository ID\" % repoid)\n for repo in matched_existing_repo[repoid]:\n if state in ['disabled', 'absent']:\n if repo['enabled']:\n changed = True\n diff_before += \"Repository '%s' is enabled for this system\\n\" % repo['id']\n diff_after += \"Repository '%s' is disabled for this system\\n\" % repo['id']\n results.append(\"Repository '%s' is disabled for this system\" % repo['id'])\n rhsm_arguments += ['--disable', repo['id']]\n elif state in ['enabled', 'present']:\n if not repo['enabled']:\n changed = True\n diff_before += \"Repository '%s' is disabled for this system\\n\" % repo['id']\n diff_after += \"Repository '%s' is enabled for this system\\n\" % repo['id']\n results.append(\"Repository '%s' is enabled for this system\" % repo['id'])\n rhsm_arguments += ['--enable', repo['id']]\n\n # Disable all enabled repos on the system that are not in the task and not\n # marked as disabled by the task\n if purge:\n enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled'])\n matched_repoids_set = set(matched_existing_repo.keys())\n difference = enabled_repo_ids.difference(matched_repoids_set)\n if len(difference) > 0:\n for repoid in difference:\n changed = True\n diff_before.join(\"Repository '{repoid}'' is enabled for this system\\n\".format(repoid=repoid))\n diff_after.join(\"Repository '{repoid}' is disabled for this system\\n\".format(repoid=repoid))\n results.append(\"Repository '{repoid}' is disabled for this system\".format(repoid=repoid))\n rhsm_arguments.extend(['--disable', repoid])\n\n diff = {'before': diff_before,\n 'after': diff_after,\n 'before_header': \"RHSM repositories\",\n 'after_header': \"RHSM repositories\"}\n\n if not module.check_mode:\n rc, out, err = run_subscription_manager(module, rhsm_arguments)\n results = out.splitlines()\n module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(type='list', required=True),\n state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'),\n purge=dict(type='bool', default=False),\n ),\n supports_check_mode=True,\n )\n name = module.params['name']\n state = module.params['state']\n purge = module.params['purge']\n\n repository_modify(module, state, name, purge)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/packaging/os/rhsm_repository.py"}], "after_files": [{"content": "#!/usr/bin/python\n\n# Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino)\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\nmodule: rhsm_repository\nshort_description: Manage RHSM repositories using the subscription-manager command\ndescription:\n - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription\n Management entitlement platform using the C(subscription-manager) command.\nauthor: Giovanni Sciortino (@giovannisciortino)\nnotes:\n - In order to manage RHSM repositories the system must be already registered\n to RHSM manually or using the Ansible C(redhat_subscription) module.\n\nrequirements:\n - subscription-manager\noptions:\n state:\n description:\n - If state is equal to present or disabled, indicates the desired\n repository state.\n choices: [present, enabled, absent, disabled]\n required: True\n default: \"present\"\n name:\n description:\n - The ID of repositories to enable.\n - To operate on several repositories this can accept a comma separated\n list or a YAML list.\n required: True\n purge:\n description:\n - Disable all currently enabled repositories that are not not specified in C(name).\n Only set this to C(True) if passing in a list of repositories to the C(name) field.\n Using this with C(loop) will most likely not have the desired result.\n type: bool\n default: False\n'''\n\nEXAMPLES = '''\n- name: Enable a RHSM repository\n rhsm_repository:\n name: rhel-7-server-rpms\n\n- name: Disable all RHSM repositories\n rhsm_repository:\n name: '*'\n state: disabled\n\n- name: Enable all repositories starting with rhel-6-server\n rhsm_repository:\n name: rhel-6-server*\n state: enabled\n\n- name: Disable all repositories except rhel-7-server-rpms\n rhsm_repository:\n name: rhel-7-server-rpms\n purge: True\n'''\n\nRETURN = '''\nrepositories:\n description:\n - The list of RHSM repositories with their states.\n - When this module is used to change the repository states, this list contains the updated states after the changes.\n returned: success\n type: list\n'''\n\nimport re\nimport os\nfrom fnmatch import fnmatch\nfrom copy import deepcopy\nfrom ansible.module_utils.basic import AnsibleModule\n\n\ndef run_subscription_manager(module, arguments):\n # Execute subscription-manager with arguments and manage common errors\n rhsm_bin = module.get_bin_path('subscription-manager')\n if not rhsm_bin:\n module.fail_json(msg='The executable file subscription-manager was not found in PATH')\n\n lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')\n rc, out, err = module.run_command(\"%s %s\" % (rhsm_bin, \" \".join(arguments)), environ_update=lang_env)\n\n if rc == 1 and (err == 'The password you typed is invalid.\\nPlease try again.\\n' or os.getuid() != 0):\n module.fail_json(msg='The executable file subscription-manager must be run using root privileges')\n elif rc == 0 and out == 'This system has no repositories available through subscriptions.\\n':\n module.fail_json(msg='This system has no repositories available through subscriptions')\n elif rc == 1:\n module.fail_json(msg='subscription-manager failed with the following error: %s' % err)\n else:\n return rc, out, err\n\n\ndef get_repository_list(module, list_parameter):\n # Generate RHSM repository list and return a list of dict\n if list_parameter == 'list_enabled':\n rhsm_arguments = ['repos', '--list-enabled']\n elif list_parameter == 'list_disabled':\n rhsm_arguments = ['repos', '--list-disabled']\n elif list_parameter == 'list':\n rhsm_arguments = ['repos', '--list']\n rc, out, err = run_subscription_manager(module, rhsm_arguments)\n\n skip_lines = [\n '+----------------------------------------------------------+',\n ' Available Repositories in /etc/yum.repos.d/redhat.repo'\n ]\n repo_id_re = re.compile(r'Repo ID:\\s+(.*)')\n repo_name_re = re.compile(r'Repo Name:\\s+(.*)')\n repo_url_re = re.compile(r'Repo URL:\\s+(.*)')\n repo_enabled_re = re.compile(r'Enabled:\\s+(.*)')\n\n repo_id = ''\n repo_name = ''\n repo_url = ''\n repo_enabled = ''\n\n repo_result = []\n for line in out.splitlines():\n if line == '' or line in skip_lines:\n continue\n\n repo_id_match = repo_id_re.match(line)\n if repo_id_match:\n repo_id = repo_id_match.group(1)\n continue\n\n repo_name_match = repo_name_re.match(line)\n if repo_name_match:\n repo_name = repo_name_match.group(1)\n continue\n\n repo_url_match = repo_url_re.match(line)\n if repo_url_match:\n repo_url = repo_url_match.group(1)\n continue\n\n repo_enabled_match = repo_enabled_re.match(line)\n if repo_enabled_match:\n repo_enabled = repo_enabled_match.group(1)\n\n repo = {\n \"id\": repo_id,\n \"name\": repo_name,\n \"url\": repo_url,\n \"enabled\": True if repo_enabled == '1' else False\n }\n\n repo_result.append(repo)\n\n return repo_result\n\n\ndef repository_modify(module, state, name, purge=False):\n name = set(name)\n current_repo_list = get_repository_list(module, 'list')\n updated_repo_list = deepcopy(current_repo_list)\n matched_existing_repo = {}\n for repoid in name:\n matched_existing_repo[repoid] = []\n for idx, repo in enumerate(current_repo_list):\n if fnmatch(repo['id'], repoid):\n matched_existing_repo[repoid].append(repo)\n # Update current_repo_list to return it as result variable\n updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False\n\n changed = False\n results = []\n diff_before = \"\"\n diff_after = \"\"\n rhsm_arguments = ['repos']\n\n for repoid in matched_existing_repo:\n if len(matched_existing_repo[repoid]) == 0:\n results.append(\"%s is not a valid repository ID\" % repoid)\n module.fail_json(results=results, msg=\"%s is not a valid repository ID\" % repoid)\n for repo in matched_existing_repo[repoid]:\n if state in ['disabled', 'absent']:\n if repo['enabled']:\n changed = True\n diff_before += \"Repository '%s' is enabled for this system\\n\" % repo['id']\n diff_after += \"Repository '%s' is disabled for this system\\n\" % repo['id']\n results.append(\"Repository '%s' is disabled for this system\" % repo['id'])\n rhsm_arguments += ['--disable', repo['id']]\n elif state in ['enabled', 'present']:\n if not repo['enabled']:\n changed = True\n diff_before += \"Repository '%s' is disabled for this system\\n\" % repo['id']\n diff_after += \"Repository '%s' is enabled for this system\\n\" % repo['id']\n results.append(\"Repository '%s' is enabled for this system\" % repo['id'])\n rhsm_arguments += ['--enable', repo['id']]\n\n # Disable all enabled repos on the system that are not in the task and not\n # marked as disabled by the task\n if purge:\n enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled'])\n matched_repoids_set = set(matched_existing_repo.keys())\n difference = enabled_repo_ids.difference(matched_repoids_set)\n if len(difference) > 0:\n for repoid in difference:\n changed = True\n diff_before.join(\"Repository '{repoid}'' is enabled for this system\\n\".format(repoid=repoid))\n diff_after.join(\"Repository '{repoid}' is disabled for this system\\n\".format(repoid=repoid))\n results.append(\"Repository '{repoid}' is disabled for this system\".format(repoid=repoid))\n rhsm_arguments.extend(['--disable', repoid])\n\n diff = {'before': diff_before,\n 'after': diff_after,\n 'before_header': \"RHSM repositories\",\n 'after_header': \"RHSM repositories\"}\n\n if not module.check_mode and changed:\n rc, out, err = run_subscription_manager(module, rhsm_arguments)\n results = out.splitlines()\n module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(type='list', required=True),\n state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'),\n purge=dict(type='bool', default=False),\n ),\n supports_check_mode=True,\n )\n name = module.params['name']\n state = module.params['state']\n purge = module.params['purge']\n\n repository_modify(module, state, name, purge)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/packaging/os/rhsm_repository.py"}]} | 3,310 | 154 |
gh_patches_debug_19989 | rasdani/github-patches | git_diff | AnalogJ__lexicon-276 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
api change of online.net broke lexicon plugin
Hi,
Blame them ! i'm working on it...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lexicon/providers/online.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import print_function
3
4 import json
5 import logging
6
7 import requests
8
9 from .base import Provider as BaseProvider
10
11 logger = logging.getLogger(__name__)
12
13
14 def ProviderParser(subparser):
15 subparser.add_argument("--auth-token", help="specify private api token")
16
17 def to_data(type, content):
18 if type == "TXT":
19 return '"{0}"'.format(content)
20 else:
21 return content
22
23 class Provider(BaseProvider):
24
25 def __init__(self, options, engine_overrides=None):
26 super(Provider, self).__init__(options, engine_overrides)
27 self.zone_name = 'Zone Automatic Lexicon '
28 self.domain_id = None
29 self.passive_zone = None
30 self.active_zone = None
31 self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.online.net/api/v1')
32
33 def authenticate(self):
34 payload = self._get('/domain/')
35 domain = self.options['domain']
36 did = None
37 for row in payload:
38 if row['name'] == domain:
39 did = row['id']
40 break
41
42 if did is None:
43 raise Exception('No domain found')
44
45 self.domain_id = did
46 self.init_zones()
47
48 def list_zones(self):
49 return self._get('/domain/{0}/version'.format(self.domain_id))
50
51 def init_zones(self):
52 # sets current zone version
53 zone_name_a = self.zone_name + 'A'
54 zone_name_b = self.zone_name + 'B'
55 active_row = None
56 passive_row = None
57 for row in self.list_zones():
58 if row['active'] == True:
59 active_row = row
60 elif row['name'] == zone_name_a or row['name'] == zone_name_b:
61 passive_row = row
62
63 if passive_row is None:
64 passive_row = self._post('/domain/{0}/version'.format(self.domain_id), {
65 'name': zone_name_b if active_row['name'] == zone_name_a else zone_name_a
66 })
67
68 self.active_zone = active_row['uuid_ref']
69 self.passive_zone = passive_row['uuid_ref']
70 self.update_passive_zone()
71
72
73 def update_passive_zone(self):
74 self._put(
75 '/domain/{0}/version/{1}/zone_from_bind'.format(
76 self.domain_id,
77 self.passive_zone
78 ),
79 self.get_bind_zone()
80 )
81
82 def get_bind_zone(self):
83 records = self.list_zone_records(self.active_zone)
84 # then convert records to bind format
85 bindStr = ''
86 for record in records:
87 bindStr = bindStr + '{0} {1} IN {2} {3}{4}\n'.format(
88 record['name'] or '@',
89 record['ttl'],
90 record['type'],
91 '{0} '.format(record['aux']) if 'aux' in record else '',
92 record['data'] or ''
93 )
94 return bindStr
95
96 def enable_zone(self):
97 zone = self.passive_zone
98 if zone is None:
99 raise Exception("Could not enable uninitialized passive_zone")
100 payload = self._patch('/domain/{0}/version/{1}/enable'.format(
101 self.domain_id,
102 zone
103 ))
104 self.passive_zone = self.active_zone
105 self.active_zone = zone
106 self.update_passive_zone()
107
108
109 # Create record. If record already exists with the same content, do nothing'
110 def create_record(self, type, name, content):
111 try:
112 record = self.find_record(type, name, content)
113 if record is not None:
114 return True
115
116 record = {
117 'name': self._fqdn_name(name),
118 'type': type,
119 'data': to_data(type, content),
120 'priority': self.options['priority'] or '',
121 'ttl': self.options['ttl'] or ''
122 }
123
124 payload = self._post(
125 '/domain/{0}/version/{1}/zone'.format(
126 self.domain_id,
127 self.passive_zone
128 ),
129 record
130 )
131 except Exception as e:
132 logger.debug(e)
133 return False
134
135 self.enable_zone()
136 logger.debug('create_record: %s', True)
137 return True
138
139 def find_zone_records(self, zone, type=None, name=None, content=None):
140 records = []
141 for record in self.list_zone_records(zone):
142 processed_record = {
143 'id': record['id'],
144 'type': record['type'],
145 'name': self._full_name(record['name']),
146 'ttl': record['ttl'],
147 'content': record['data'],
148 'priority': record['aux'] if 'aux' in record else ''
149 }
150 records.append(self._clean_TXT_record(processed_record))
151
152 if type:
153 records = [record for record in records if record['type'] == type]
154 if name:
155 fullName = self._full_name(name)
156 records = [record for record in records if record['name'] == fullName]
157 if content:
158 records = [record for record in records if record['content'] == content]
159
160 logger.debug('list_records: %s', records)
161 return records
162
163 def list_zone_records(self, zone_id):
164 return self._get('/domain/{0}/version/{1}/zone'.format(self.domain_id, zone_id))
165
166 def list_records(self, type=None, name=None, content=None):
167 return self.find_zone_records(self.passive_zone, type, name, content)
168
169 def find_record(self, type=None, name=None, content=None):
170 record = None
171 records = self.list_records(type, name, content)
172 if len(records) < 1:
173 return None
174 else:
175 return records[0]
176
177
178 # Create or update a record.
179 def update_record(self, id, type=None, name=None, content=None):
180 record = self.find_record(type, name)
181 if record is None:
182 logger.debug("cannot find record to update: %s %s %s", id, type, name)
183 return True
184 if type:
185 record['type'] = type
186 if name:
187 record['name'] = self._fqdn_name(name)
188 if content:
189 record['data'] = to_data(type, content)
190 if self.options.get('ttl'):
191 record['ttl'] = self.options.get('ttl')
192 # it is weird that 'aux' becomes 'priority' in online's api
193 if self.options['priority']:
194 record['priority'] = self.options['priority']
195
196 if id is None:
197 id = record['id']
198
199 record.pop('id')
200
201 try:
202 payload = self._patch('/domain/{0}/version/{1}/zone/{2}'.format(
203 self.domain_id,
204 self.passive_zone,
205 id
206 ), record)
207
208 except Exception as e:
209 logger.debug(e)
210 return False
211
212 self.enable_zone()
213 # If it didn't raise from the http status code, then we're good
214 logger.debug('update_record: %s', id)
215 return True
216
217 # Delete an existing record.
218 # If record does not exist, do nothing.
219 def delete_record(self, id=None, type=None, name=None, content=None):
220 records = self.list_records(type, name, content)
221 if len(records) == 0:
222 logger.debug("Cannot find records %s %s %s", type, name, content)
223 return False
224 logger.debug('delete_records: %s records found', len(records))
225 try:
226 for record in records:
227 payload = self._delete('/domain/{0}/version/{1}/zone/{2}'.format(
228 self.domain_id,
229 self.passive_zone,
230 record['id']
231 ))
232 except Exception as e:
233 logger.debug(e)
234 return False
235
236 self.enable_zone()
237 # is always True at this point, if a non 200 response is returned an error is raised.
238 logger.debug('delete_record: %s', True)
239 return True
240
241 def _patch(self, url='/', data=None, query_params=None):
242 return self._request('PATCH', url, data=data, query_params=query_params)
243
244 # Helpers
245 def _request(self, action='GET', url='/', data=None, query_params=None):
246 if query_params is None:
247 query_params = {}
248
249 headers = {
250 'Accept': 'application/json',
251 'Authorization': 'Bearer {0}'.format(self.options['auth_token'])
252 }
253 if data is not None:
254 if type(data) is str:
255 headers['Content-Type'] = 'text/plain';
256 else:
257 headers['Content-Type'] = 'application/json';
258 data = json.dumps(data)
259
260 r = requests.request(
261 action,
262 self.api_endpoint + url,
263 params=query_params,
264 data=data,
265 headers=headers
266 )
267 r.raise_for_status() # if the request fails for any reason, throw an error.
268
269 return r.text and r.json() or ''
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lexicon/providers/online.py b/lexicon/providers/online.py
--- a/lexicon/providers/online.py
+++ b/lexicon/providers/online.py
@@ -25,24 +25,12 @@
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.zone_name = 'Zone Automatic Lexicon '
- self.domain_id = None
self.passive_zone = None
self.active_zone = None
+ self.domain_id = self.options['domain']
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.online.net/api/v1')
def authenticate(self):
- payload = self._get('/domain/')
- domain = self.options['domain']
- did = None
- for row in payload:
- if row['name'] == domain:
- did = row['id']
- break
-
- if did is None:
- raise Exception('No domain found')
-
- self.domain_id = did
self.init_zones()
def list_zones(self):
| {"golden_diff": "diff --git a/lexicon/providers/online.py b/lexicon/providers/online.py\n--- a/lexicon/providers/online.py\n+++ b/lexicon/providers/online.py\n@@ -25,24 +25,12 @@\n def __init__(self, options, engine_overrides=None):\n super(Provider, self).__init__(options, engine_overrides)\n self.zone_name = 'Zone Automatic Lexicon '\n- self.domain_id = None\n self.passive_zone = None\n self.active_zone = None\n+ self.domain_id = self.options['domain']\n self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.online.net/api/v1')\n \n def authenticate(self):\n- payload = self._get('/domain/')\n- domain = self.options['domain']\n- did = None\n- for row in payload:\n- if row['name'] == domain:\n- did = row['id']\n- break\n-\n- if did is None:\n- raise Exception('No domain found')\n-\n- self.domain_id = did\n self.init_zones()\n \n def list_zones(self):\n", "issue": "api change of online.net broke lexicon plugin\nHi,\r\n\r\nBlame them ! i'm working on it...\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport json\nimport logging\n\nimport requests\n\nfrom .base import Provider as BaseProvider\n\nlogger = logging.getLogger(__name__)\n\n\ndef ProviderParser(subparser):\n subparser.add_argument(\"--auth-token\", help=\"specify private api token\")\n\ndef to_data(type, content):\n if type == \"TXT\":\n return '\"{0}\"'.format(content)\n else:\n return content\n\nclass Provider(BaseProvider):\n\n def __init__(self, options, engine_overrides=None):\n super(Provider, self).__init__(options, engine_overrides)\n self.zone_name = 'Zone Automatic Lexicon '\n self.domain_id = None\n self.passive_zone = None\n self.active_zone = None\n self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.online.net/api/v1')\n\n def authenticate(self):\n payload = self._get('/domain/')\n domain = self.options['domain']\n did = None\n for row in payload:\n if row['name'] == domain:\n did = row['id']\n break\n\n if did is None:\n raise Exception('No domain found')\n\n self.domain_id = did\n self.init_zones()\n\n def list_zones(self):\n return self._get('/domain/{0}/version'.format(self.domain_id))\n\n def init_zones(self):\n # sets current zone version\n zone_name_a = self.zone_name + 'A'\n zone_name_b = self.zone_name + 'B'\n active_row = None\n passive_row = None\n for row in self.list_zones():\n if row['active'] == True:\n active_row = row\n elif row['name'] == zone_name_a or row['name'] == zone_name_b:\n passive_row = row\n\n if passive_row is None:\n passive_row = self._post('/domain/{0}/version'.format(self.domain_id), {\n 'name': zone_name_b if active_row['name'] == zone_name_a else zone_name_a\n })\n\n self.active_zone = active_row['uuid_ref']\n self.passive_zone = passive_row['uuid_ref']\n self.update_passive_zone()\n\n\n def update_passive_zone(self):\n self._put(\n '/domain/{0}/version/{1}/zone_from_bind'.format(\n self.domain_id,\n self.passive_zone\n ),\n self.get_bind_zone()\n )\n\n def get_bind_zone(self):\n records = self.list_zone_records(self.active_zone)\n # then convert records to bind format\n bindStr = ''\n for record in records:\n bindStr = bindStr + '{0} {1} IN {2} {3}{4}\\n'.format(\n record['name'] or '@',\n record['ttl'],\n record['type'],\n '{0} '.format(record['aux']) if 'aux' in record else '',\n record['data'] or ''\n )\n return bindStr\n\n def enable_zone(self):\n zone = self.passive_zone\n if zone is None:\n raise Exception(\"Could not enable uninitialized passive_zone\")\n payload = self._patch('/domain/{0}/version/{1}/enable'.format(\n self.domain_id,\n zone\n ))\n self.passive_zone = self.active_zone\n self.active_zone = zone\n self.update_passive_zone()\n\n\n # Create record. If record already exists with the same content, do nothing'\n def create_record(self, type, name, content):\n try:\n record = self.find_record(type, name, content)\n if record is not None:\n return True\n\n record = {\n 'name': self._fqdn_name(name),\n 'type': type,\n 'data': to_data(type, content),\n 'priority': self.options['priority'] or '',\n 'ttl': self.options['ttl'] or ''\n }\n\n payload = self._post(\n '/domain/{0}/version/{1}/zone'.format(\n self.domain_id,\n self.passive_zone\n ),\n record\n )\n except Exception as e:\n logger.debug(e)\n return False\n\n self.enable_zone()\n logger.debug('create_record: %s', True)\n return True\n\n def find_zone_records(self, zone, type=None, name=None, content=None):\n records = []\n for record in self.list_zone_records(zone):\n processed_record = {\n 'id': record['id'],\n 'type': record['type'],\n 'name': self._full_name(record['name']),\n 'ttl': record['ttl'],\n 'content': record['data'],\n 'priority': record['aux'] if 'aux' in record else ''\n }\n records.append(self._clean_TXT_record(processed_record))\n\n if type:\n records = [record for record in records if record['type'] == type]\n if name:\n fullName = self._full_name(name)\n records = [record for record in records if record['name'] == fullName]\n if content:\n records = [record for record in records if record['content'] == content]\n\n logger.debug('list_records: %s', records)\n return records\n\n def list_zone_records(self, zone_id):\n return self._get('/domain/{0}/version/{1}/zone'.format(self.domain_id, zone_id))\n\n def list_records(self, type=None, name=None, content=None):\n return self.find_zone_records(self.passive_zone, type, name, content)\n\n def find_record(self, type=None, name=None, content=None):\n record = None\n records = self.list_records(type, name, content)\n if len(records) < 1:\n return None\n else:\n return records[0]\n\n\n # Create or update a record.\n def update_record(self, id, type=None, name=None, content=None):\n record = self.find_record(type, name)\n if record is None:\n logger.debug(\"cannot find record to update: %s %s %s\", id, type, name)\n return True\n if type:\n record['type'] = type\n if name:\n record['name'] = self._fqdn_name(name)\n if content:\n record['data'] = to_data(type, content)\n if self.options.get('ttl'):\n record['ttl'] = self.options.get('ttl')\n # it is weird that 'aux' becomes 'priority' in online's api\n if self.options['priority']:\n record['priority'] = self.options['priority']\n\n if id is None:\n id = record['id']\n\n record.pop('id')\n\n try:\n payload = self._patch('/domain/{0}/version/{1}/zone/{2}'.format(\n self.domain_id,\n self.passive_zone,\n id\n ), record)\n\n except Exception as e:\n logger.debug(e)\n return False\n\n self.enable_zone()\n # If it didn't raise from the http status code, then we're good\n logger.debug('update_record: %s', id)\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def delete_record(self, id=None, type=None, name=None, content=None):\n records = self.list_records(type, name, content)\n if len(records) == 0:\n logger.debug(\"Cannot find records %s %s %s\", type, name, content)\n return False\n logger.debug('delete_records: %s records found', len(records))\n try:\n for record in records:\n payload = self._delete('/domain/{0}/version/{1}/zone/{2}'.format(\n self.domain_id,\n self.passive_zone,\n record['id']\n ))\n except Exception as e:\n logger.debug(e)\n return False\n\n self.enable_zone()\n # is always True at this point, if a non 200 response is returned an error is raised.\n logger.debug('delete_record: %s', True)\n return True\n\n def _patch(self, url='/', data=None, query_params=None):\n return self._request('PATCH', url, data=data, query_params=query_params)\n\n # Helpers\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if query_params is None:\n query_params = {}\n\n headers = {\n 'Accept': 'application/json',\n 'Authorization': 'Bearer {0}'.format(self.options['auth_token'])\n }\n if data is not None:\n if type(data) is str:\n headers['Content-Type'] = 'text/plain';\n else:\n headers['Content-Type'] = 'application/json';\n data = json.dumps(data)\n\n r = requests.request(\n action,\n self.api_endpoint + url,\n params=query_params,\n data=data,\n headers=headers\n )\n r.raise_for_status() # if the request fails for any reason, throw an error.\n\n return r.text and r.json() or ''\n", "path": "lexicon/providers/online.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport json\nimport logging\n\nimport requests\n\nfrom .base import Provider as BaseProvider\n\nlogger = logging.getLogger(__name__)\n\n\ndef ProviderParser(subparser):\n subparser.add_argument(\"--auth-token\", help=\"specify private api token\")\n\ndef to_data(type, content):\n if type == \"TXT\":\n return '\"{0}\"'.format(content)\n else:\n return content\n\nclass Provider(BaseProvider):\n\n def __init__(self, options, engine_overrides=None):\n super(Provider, self).__init__(options, engine_overrides)\n self.zone_name = 'Zone Automatic Lexicon '\n self.passive_zone = None\n self.active_zone = None\n self.domain_id = self.options['domain']\n self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.online.net/api/v1')\n\n def authenticate(self):\n self.init_zones()\n\n def list_zones(self):\n return self._get('/domain/{0}/version'.format(self.domain_id))\n\n def init_zones(self):\n # sets current zone version\n zone_name_a = self.zone_name + 'A'\n zone_name_b = self.zone_name + 'B'\n active_row = None\n passive_row = None\n for row in self.list_zones():\n if row['active'] == True:\n active_row = row\n elif row['name'] == zone_name_a or row['name'] == zone_name_b:\n passive_row = row\n\n if passive_row is None:\n passive_row = self._post('/domain/{0}/version'.format(self.domain_id), {\n 'name': zone_name_b if active_row['name'] == zone_name_a else zone_name_a\n })\n\n self.active_zone = active_row['uuid_ref']\n self.passive_zone = passive_row['uuid_ref']\n self.update_passive_zone()\n\n\n def update_passive_zone(self):\n self._put(\n '/domain/{0}/version/{1}/zone_from_bind'.format(\n self.domain_id,\n self.passive_zone\n ),\n self.get_bind_zone()\n )\n\n def get_bind_zone(self):\n records = self.list_zone_records(self.active_zone)\n # then convert records to bind format\n bindStr = ''\n for record in records:\n bindStr = bindStr + '{0} {1} IN {2} {3}{4}\\n'.format(\n record['name'] or '@',\n record['ttl'],\n record['type'],\n '{0} '.format(record['aux']) if 'aux' in record else '',\n record['data'] or ''\n )\n return bindStr\n\n def enable_zone(self):\n zone = self.passive_zone\n if zone is None:\n raise Exception(\"Could not enable uninitialized passive_zone\")\n payload = self._patch('/domain/{0}/version/{1}/enable'.format(\n self.domain_id,\n zone\n ))\n self.passive_zone = self.active_zone\n self.active_zone = zone\n self.update_passive_zone()\n\n\n # Create record. If record already exists with the same content, do nothing'\n def create_record(self, type, name, content):\n try:\n record = self.find_record(type, name, content)\n if record is not None:\n return True\n\n record = {\n 'name': self._fqdn_name(name),\n 'type': type,\n 'data': to_data(type, content),\n 'priority': self.options['priority'] or '',\n 'ttl': self.options['ttl'] or ''\n }\n\n payload = self._post(\n '/domain/{0}/version/{1}/zone'.format(\n self.domain_id,\n self.passive_zone\n ),\n record\n )\n except Exception as e:\n logger.debug(e)\n return False\n\n self.enable_zone()\n logger.debug('create_record: %s', True)\n return True\n\n def find_zone_records(self, zone, type=None, name=None, content=None):\n records = []\n for record in self.list_zone_records(zone):\n processed_record = {\n 'id': record['id'],\n 'type': record['type'],\n 'name': self._full_name(record['name']),\n 'ttl': record['ttl'],\n 'content': record['data'],\n 'priority': record['aux'] if 'aux' in record else ''\n }\n records.append(self._clean_TXT_record(processed_record))\n\n if type:\n records = [record for record in records if record['type'] == type]\n if name:\n fullName = self._full_name(name)\n records = [record for record in records if record['name'] == fullName]\n if content:\n records = [record for record in records if record['content'] == content]\n\n logger.debug('list_records: %s', records)\n return records\n\n def list_zone_records(self, zone_id):\n return self._get('/domain/{0}/version/{1}/zone'.format(self.domain_id, zone_id))\n\n def list_records(self, type=None, name=None, content=None):\n return self.find_zone_records(self.passive_zone, type, name, content)\n\n def find_record(self, type=None, name=None, content=None):\n record = None\n records = self.list_records(type, name, content)\n if len(records) < 1:\n return None\n else:\n return records[0]\n\n\n # Create or update a record.\n def update_record(self, id, type=None, name=None, content=None):\n record = self.find_record(type, name)\n if record is None:\n logger.debug(\"cannot find record to update: %s %s %s\", id, type, name)\n return True\n if type:\n record['type'] = type\n if name:\n record['name'] = self._fqdn_name(name)\n if content:\n record['data'] = to_data(type, content)\n if self.options.get('ttl'):\n record['ttl'] = self.options.get('ttl')\n # it is weird that 'aux' becomes 'priority' in online's api\n if self.options['priority']:\n record['priority'] = self.options['priority']\n\n if id is None:\n id = record['id']\n\n record.pop('id')\n\n try:\n payload = self._patch('/domain/{0}/version/{1}/zone/{2}'.format(\n self.domain_id,\n self.passive_zone,\n id\n ), record)\n\n except Exception as e:\n logger.debug(e)\n return False\n\n self.enable_zone()\n # If it didn't raise from the http status code, then we're good\n logger.debug('update_record: %s', id)\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def delete_record(self, id=None, type=None, name=None, content=None):\n records = self.list_records(type, name, content)\n if len(records) == 0:\n logger.debug(\"Cannot find records %s %s %s\", type, name, content)\n return False\n logger.debug('delete_records: %s records found', len(records))\n try:\n for record in records:\n payload = self._delete('/domain/{0}/version/{1}/zone/{2}'.format(\n self.domain_id,\n self.passive_zone,\n record['id']\n ))\n except Exception as e:\n logger.debug(e)\n return False\n\n self.enable_zone()\n # is always True at this point, if a non 200 response is returned an error is raised.\n logger.debug('delete_record: %s', True)\n return True\n\n def _patch(self, url='/', data=None, query_params=None):\n return self._request('PATCH', url, data=data, query_params=query_params)\n\n # Helpers\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if query_params is None:\n query_params = {}\n\n headers = {\n 'Accept': 'application/json',\n 'Authorization': 'Bearer {0}'.format(self.options['auth_token'])\n }\n if data is not None:\n if type(data) is str:\n headers['Content-Type'] = 'text/plain';\n else:\n headers['Content-Type'] = 'application/json';\n data = json.dumps(data)\n\n r = requests.request(\n action,\n self.api_endpoint + url,\n params=query_params,\n data=data,\n headers=headers\n )\n r.raise_for_status() # if the request fails for any reason, throw an error.\n\n return r.text and r.json() or ''\n", "path": "lexicon/providers/online.py"}]} | 2,944 | 250 |
gh_patches_debug_31627 | rasdani/github-patches | git_diff | ddionrails__ddionrails-798 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove StudyRedirectView
~blocked by: #126~
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddionrails/studies/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """ Views for ddionrails.studies app """
4
5 from django.http.request import HttpRequest
6 from django.http.response import HttpResponse
7 from django.shortcuts import get_object_or_404, render
8 from django.views.generic import DetailView
9 from django.views.generic.base import RedirectView
10
11 from ddionrails.data.models import Dataset, Variable
12 from ddionrails.instruments.models import Instrument, Question
13
14 from .models import Study
15
16
17 class StudyRedirectView(RedirectView):
18 """ RedirectView for studies.Study model """
19
20 permanent = False
21
22 def get_redirect_url(self, *args, **kwargs):
23 study = get_object_or_404(Study, id=kwargs["id"])
24 return study.get_absolute_url()
25
26
27 class StudyDetailView(DetailView):
28 """ DetailView for studies.Study model """
29
30 model = Study
31 template_name = "studies/study_detail.html"
32 slug_url_kwarg = "study_name"
33 slug_field = "name"
34
35 def get_queryset(self):
36 queryset = super(StudyDetailView, self).get_queryset()
37 return queryset.only("name", "label", "config", "description")
38
39 def get_context_data(self, **kwargs):
40 context = super().get_context_data(**kwargs)
41 context["num_datasets"] = Dataset.objects.filter(study=self.object).count()
42 context["num_variables"] = Variable.objects.filter(
43 dataset__study=self.object
44 ).count()
45 context["num_instruments"] = Instrument.objects.filter(study=self.object).count()
46 context["num_questions"] = Question.objects.filter(
47 instrument__study=self.object
48 ).count()
49
50 context["dataset_list"] = (
51 Dataset.objects.select_related(
52 "study", "conceptual_dataset", "period", "analysis_unit"
53 )
54 .filter(study=self.object)
55 .only(
56 "name",
57 "label",
58 "study__name",
59 "conceptual_dataset__name",
60 "conceptual_dataset__label",
61 "period__name",
62 "period__label",
63 "analysis_unit__name",
64 "analysis_unit__label",
65 )
66 )
67 context["instrument_list"] = (
68 Instrument.objects.select_related("study", "period", "analysis_unit")
69 .filter(study=self.object)
70 .only(
71 "name",
72 "label",
73 "study__name",
74 "period__name",
75 "period__label",
76 "analysis_unit__name",
77 "analysis_unit__label",
78 )
79 )
80 return context
81
82
83 def study_topics(request: HttpRequest, study_name: str, language: str) -> HttpResponse:
84 study = get_object_or_404(Study, name=study_name)
85 context = dict(study=study, language=language)
86 return render(request, "studies/study_topics.html", context=context)
87
```
Path: `config/urls.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """ Root URLConf for ddionrails project """
4
5 from django.conf import settings
6 from django.conf.urls.static import static
7 from django.contrib import admin
8 from django.urls import include, path, re_path
9 from django.views.generic.base import TemplateView
10
11 import ddionrails.instruments.views as instruments_views
12 import ddionrails.publications.views as publications_views
13 from config.views import HomePageView
14 from ddionrails.concepts.views import TopicRedirectView
15 from ddionrails.data.views import VariableRedirectView
16 from ddionrails.studies.views import StudyDetailView, StudyRedirectView, study_topics
17
18 # These variable names are desired by Django
19 handler400 = "config.views.bad_request" # pylint: disable=invalid-name
20 handler403 = "config.views.permission_denied" # pylint: disable=invalid-name
21 handler404 = "config.views.page_not_found" # pylint: disable=invalid-name
22 handler500 = "config.views.server_error" # pylint: disable=invalid-name
23
24 admin.site.site_header = "DDI on Rails Admin"
25 admin.site.site_title = "DDI on Rails Admin"
26 admin.site.index_title = "Welcome to DDI on Rails Admin"
27
28 urlpatterns = [
29 path("", HomePageView.as_view(), name="home"),
30 path(
31 "imprint/",
32 TemplateView.as_view(template_name="pages/imprint.html"),
33 name="imprint",
34 ),
35 path(
36 "contact/",
37 TemplateView.as_view(template_name="pages/contact.html"),
38 name="contact",
39 ),
40 path("admin/doc/", include("django.contrib.admindocs.urls")),
41 path("admin/", admin.site.urls),
42 path("concept/", include("ddionrails.concepts.urls", namespace="concepts")),
43 path("workspace/", include("ddionrails.workspace.urls", namespace="workspace")),
44 re_path(
45 (
46 r"^search/((?:all|variables|concepts|questions|publications|topics)"
47 r"\?{0,1}.*){0,1}$"
48 ),
49 TemplateView.as_view(template_name="search/search.html"),
50 name="search",
51 ),
52 path("api/", include("ddionrails.api.urls", namespace="api")),
53 path("django-rq/", include("django_rq.urls")),
54 path("user/", include("django.contrib.auth.urls")),
55 # Study by name
56 path("<slug:study_name>", StudyDetailView.as_view(), name="study_detail"),
57 # Study-specific links
58 path("<slug:study_name>/data/", include("ddionrails.data.urls", namespace="data")),
59 path(
60 "<slug:study_name>/publ/",
61 include("ddionrails.publications.urls", namespace="publ"),
62 ),
63 path(
64 "<slug:study_name>/inst/",
65 include("ddionrails.instruments.urls", namespace="inst"),
66 ),
67 path("<slug:study_name>/topics/<slug:language>", study_topics, name="study_topics"),
68 # Redirects for search interface
69 path(
70 "publication/<uuid:id>",
71 publications_views.PublicationRedirectView.as_view(),
72 name="publication_redirect",
73 ),
74 path("variable/<uuid:id>", VariableRedirectView.as_view(), name="variable_redirect"),
75 path("topic/<uuid:id>", TopicRedirectView.as_view(), name="topic_redirect"),
76 path(
77 "instrument/<uuid:id>",
78 instruments_views.InstrumentRedirectView.as_view(),
79 name="instrument_redirect",
80 ),
81 path(
82 "question/<uuid:id>",
83 instruments_views.QuestionRedirectView.as_view(),
84 name="question_redirect",
85 ),
86 path("study/<uuid:id>", StudyRedirectView.as_view(), name="study_redirect"),
87 ]
88
89 if settings.DEBUG:
90 import debug_toolbar
91
92 urlpatterns = urlpatterns + static(
93 settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
94 )
95 urlpatterns = [path(r"__debug__/", include(debug_toolbar.urls))] + urlpatterns
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/config/urls.py b/config/urls.py
--- a/config/urls.py
+++ b/config/urls.py
@@ -13,7 +13,7 @@
from config.views import HomePageView
from ddionrails.concepts.views import TopicRedirectView
from ddionrails.data.views import VariableRedirectView
-from ddionrails.studies.views import StudyDetailView, StudyRedirectView, study_topics
+from ddionrails.studies.views import StudyDetailView, study_topics
# These variable names are desired by Django
handler400 = "config.views.bad_request" # pylint: disable=invalid-name
@@ -83,7 +83,6 @@
instruments_views.QuestionRedirectView.as_view(),
name="question_redirect",
),
- path("study/<uuid:id>", StudyRedirectView.as_view(), name="study_redirect"),
]
if settings.DEBUG:
diff --git a/ddionrails/studies/views.py b/ddionrails/studies/views.py
--- a/ddionrails/studies/views.py
+++ b/ddionrails/studies/views.py
@@ -6,7 +6,6 @@
from django.http.response import HttpResponse
from django.shortcuts import get_object_or_404, render
from django.views.generic import DetailView
-from django.views.generic.base import RedirectView
from ddionrails.data.models import Dataset, Variable
from ddionrails.instruments.models import Instrument, Question
@@ -14,16 +13,6 @@
from .models import Study
-class StudyRedirectView(RedirectView):
- """ RedirectView for studies.Study model """
-
- permanent = False
-
- def get_redirect_url(self, *args, **kwargs):
- study = get_object_or_404(Study, id=kwargs["id"])
- return study.get_absolute_url()
-
-
class StudyDetailView(DetailView):
""" DetailView for studies.Study model """
| {"golden_diff": "diff --git a/config/urls.py b/config/urls.py\n--- a/config/urls.py\n+++ b/config/urls.py\n@@ -13,7 +13,7 @@\n from config.views import HomePageView\n from ddionrails.concepts.views import TopicRedirectView\n from ddionrails.data.views import VariableRedirectView\n-from ddionrails.studies.views import StudyDetailView, StudyRedirectView, study_topics\n+from ddionrails.studies.views import StudyDetailView, study_topics\n \n # These variable names are desired by Django\n handler400 = \"config.views.bad_request\" # pylint: disable=invalid-name\n@@ -83,7 +83,6 @@\n instruments_views.QuestionRedirectView.as_view(),\n name=\"question_redirect\",\n ),\n- path(\"study/<uuid:id>\", StudyRedirectView.as_view(), name=\"study_redirect\"),\n ]\n \n if settings.DEBUG:\ndiff --git a/ddionrails/studies/views.py b/ddionrails/studies/views.py\n--- a/ddionrails/studies/views.py\n+++ b/ddionrails/studies/views.py\n@@ -6,7 +6,6 @@\n from django.http.response import HttpResponse\n from django.shortcuts import get_object_or_404, render\n from django.views.generic import DetailView\n-from django.views.generic.base import RedirectView\n \n from ddionrails.data.models import Dataset, Variable\n from ddionrails.instruments.models import Instrument, Question\n@@ -14,16 +13,6 @@\n from .models import Study\n \n \n-class StudyRedirectView(RedirectView):\n- \"\"\" RedirectView for studies.Study model \"\"\"\n-\n- permanent = False\n-\n- def get_redirect_url(self, *args, **kwargs):\n- study = get_object_or_404(Study, id=kwargs[\"id\"])\n- return study.get_absolute_url()\n-\n-\n class StudyDetailView(DetailView):\n \"\"\" DetailView for studies.Study model \"\"\"\n", "issue": "Remove StudyRedirectView\n~blocked by: #126~\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\" Views for ddionrails.studies app \"\"\"\n\nfrom django.http.request import HttpRequest\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views.generic import DetailView\nfrom django.views.generic.base import RedirectView\n\nfrom ddionrails.data.models import Dataset, Variable\nfrom ddionrails.instruments.models import Instrument, Question\n\nfrom .models import Study\n\n\nclass StudyRedirectView(RedirectView):\n \"\"\" RedirectView for studies.Study model \"\"\"\n\n permanent = False\n\n def get_redirect_url(self, *args, **kwargs):\n study = get_object_or_404(Study, id=kwargs[\"id\"])\n return study.get_absolute_url()\n\n\nclass StudyDetailView(DetailView):\n \"\"\" DetailView for studies.Study model \"\"\"\n\n model = Study\n template_name = \"studies/study_detail.html\"\n slug_url_kwarg = \"study_name\"\n slug_field = \"name\"\n\n def get_queryset(self):\n queryset = super(StudyDetailView, self).get_queryset()\n return queryset.only(\"name\", \"label\", \"config\", \"description\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"num_datasets\"] = Dataset.objects.filter(study=self.object).count()\n context[\"num_variables\"] = Variable.objects.filter(\n dataset__study=self.object\n ).count()\n context[\"num_instruments\"] = Instrument.objects.filter(study=self.object).count()\n context[\"num_questions\"] = Question.objects.filter(\n instrument__study=self.object\n ).count()\n\n context[\"dataset_list\"] = (\n Dataset.objects.select_related(\n \"study\", \"conceptual_dataset\", \"period\", \"analysis_unit\"\n )\n .filter(study=self.object)\n .only(\n \"name\",\n \"label\",\n \"study__name\",\n \"conceptual_dataset__name\",\n \"conceptual_dataset__label\",\n \"period__name\",\n \"period__label\",\n \"analysis_unit__name\",\n \"analysis_unit__label\",\n )\n )\n context[\"instrument_list\"] = (\n Instrument.objects.select_related(\"study\", \"period\", \"analysis_unit\")\n .filter(study=self.object)\n .only(\n \"name\",\n \"label\",\n \"study__name\",\n \"period__name\",\n \"period__label\",\n \"analysis_unit__name\",\n \"analysis_unit__label\",\n )\n )\n return context\n\n\ndef study_topics(request: HttpRequest, study_name: str, language: str) -> HttpResponse:\n study = get_object_or_404(Study, name=study_name)\n context = dict(study=study, language=language)\n return render(request, \"studies/study_topics.html\", context=context)\n", "path": "ddionrails/studies/views.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\" Root URLConf for ddionrails project \"\"\"\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom django.views.generic.base import TemplateView\n\nimport ddionrails.instruments.views as instruments_views\nimport ddionrails.publications.views as publications_views\nfrom config.views import HomePageView\nfrom ddionrails.concepts.views import TopicRedirectView\nfrom ddionrails.data.views import VariableRedirectView\nfrom ddionrails.studies.views import StudyDetailView, StudyRedirectView, study_topics\n\n# These variable names are desired by Django\nhandler400 = \"config.views.bad_request\" # pylint: disable=invalid-name\nhandler403 = \"config.views.permission_denied\" # pylint: disable=invalid-name\nhandler404 = \"config.views.page_not_found\" # pylint: disable=invalid-name\nhandler500 = \"config.views.server_error\" # pylint: disable=invalid-name\n\nadmin.site.site_header = \"DDI on Rails Admin\"\nadmin.site.site_title = \"DDI on Rails Admin\"\nadmin.site.index_title = \"Welcome to DDI on Rails Admin\"\n\nurlpatterns = [\n path(\"\", HomePageView.as_view(), name=\"home\"),\n path(\n \"imprint/\",\n TemplateView.as_view(template_name=\"pages/imprint.html\"),\n name=\"imprint\",\n ),\n path(\n \"contact/\",\n TemplateView.as_view(template_name=\"pages/contact.html\"),\n name=\"contact\",\n ),\n path(\"admin/doc/\", include(\"django.contrib.admindocs.urls\")),\n path(\"admin/\", admin.site.urls),\n path(\"concept/\", include(\"ddionrails.concepts.urls\", namespace=\"concepts\")),\n path(\"workspace/\", include(\"ddionrails.workspace.urls\", namespace=\"workspace\")),\n re_path(\n (\n r\"^search/((?:all|variables|concepts|questions|publications|topics)\"\n r\"\\?{0,1}.*){0,1}$\"\n ),\n TemplateView.as_view(template_name=\"search/search.html\"),\n name=\"search\",\n ),\n path(\"api/\", include(\"ddionrails.api.urls\", namespace=\"api\")),\n path(\"django-rq/\", include(\"django_rq.urls\")),\n path(\"user/\", include(\"django.contrib.auth.urls\")),\n # Study by name\n path(\"<slug:study_name>\", StudyDetailView.as_view(), name=\"study_detail\"),\n # Study-specific links\n path(\"<slug:study_name>/data/\", include(\"ddionrails.data.urls\", namespace=\"data\")),\n path(\n \"<slug:study_name>/publ/\",\n include(\"ddionrails.publications.urls\", namespace=\"publ\"),\n ),\n path(\n \"<slug:study_name>/inst/\",\n include(\"ddionrails.instruments.urls\", namespace=\"inst\"),\n ),\n path(\"<slug:study_name>/topics/<slug:language>\", study_topics, name=\"study_topics\"),\n # Redirects for search interface\n path(\n \"publication/<uuid:id>\",\n publications_views.PublicationRedirectView.as_view(),\n name=\"publication_redirect\",\n ),\n path(\"variable/<uuid:id>\", VariableRedirectView.as_view(), name=\"variable_redirect\"),\n path(\"topic/<uuid:id>\", TopicRedirectView.as_view(), name=\"topic_redirect\"),\n path(\n \"instrument/<uuid:id>\",\n instruments_views.InstrumentRedirectView.as_view(),\n name=\"instrument_redirect\",\n ),\n path(\n \"question/<uuid:id>\",\n instruments_views.QuestionRedirectView.as_view(),\n name=\"question_redirect\",\n ),\n path(\"study/<uuid:id>\", StudyRedirectView.as_view(), name=\"study_redirect\"),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns = urlpatterns + static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n urlpatterns = [path(r\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n", "path": "config/urls.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\" Views for ddionrails.studies app \"\"\"\n\nfrom django.http.request import HttpRequest\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views.generic import DetailView\n\nfrom ddionrails.data.models import Dataset, Variable\nfrom ddionrails.instruments.models import Instrument, Question\n\nfrom .models import Study\n\n\nclass StudyDetailView(DetailView):\n \"\"\" DetailView for studies.Study model \"\"\"\n\n model = Study\n template_name = \"studies/study_detail.html\"\n slug_url_kwarg = \"study_name\"\n slug_field = \"name\"\n\n def get_queryset(self):\n queryset = super(StudyDetailView, self).get_queryset()\n return queryset.only(\"name\", \"label\", \"config\", \"description\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"num_datasets\"] = Dataset.objects.filter(study=self.object).count()\n context[\"num_variables\"] = Variable.objects.filter(\n dataset__study=self.object\n ).count()\n context[\"num_instruments\"] = Instrument.objects.filter(study=self.object).count()\n context[\"num_questions\"] = Question.objects.filter(\n instrument__study=self.object\n ).count()\n\n context[\"dataset_list\"] = (\n Dataset.objects.select_related(\n \"study\", \"conceptual_dataset\", \"period\", \"analysis_unit\"\n )\n .filter(study=self.object)\n .only(\n \"name\",\n \"label\",\n \"study__name\",\n \"conceptual_dataset__name\",\n \"conceptual_dataset__label\",\n \"period__name\",\n \"period__label\",\n \"analysis_unit__name\",\n \"analysis_unit__label\",\n )\n )\n context[\"instrument_list\"] = (\n Instrument.objects.select_related(\"study\", \"period\", \"analysis_unit\")\n .filter(study=self.object)\n .only(\n \"name\",\n \"label\",\n \"study__name\",\n \"period__name\",\n \"period__label\",\n \"analysis_unit__name\",\n \"analysis_unit__label\",\n )\n )\n return context\n\n\ndef study_topics(request: HttpRequest, study_name: str, language: str) -> HttpResponse:\n study = get_object_or_404(Study, name=study_name)\n context = dict(study=study, language=language)\n return render(request, \"studies/study_topics.html\", context=context)\n", "path": "ddionrails/studies/views.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\" Root URLConf for ddionrails project \"\"\"\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom django.views.generic.base import TemplateView\n\nimport ddionrails.instruments.views as instruments_views\nimport ddionrails.publications.views as publications_views\nfrom config.views import HomePageView\nfrom ddionrails.concepts.views import TopicRedirectView\nfrom ddionrails.data.views import VariableRedirectView\nfrom ddionrails.studies.views import StudyDetailView, study_topics\n\n# These variable names are desired by Django\nhandler400 = \"config.views.bad_request\" # pylint: disable=invalid-name\nhandler403 = \"config.views.permission_denied\" # pylint: disable=invalid-name\nhandler404 = \"config.views.page_not_found\" # pylint: disable=invalid-name\nhandler500 = \"config.views.server_error\" # pylint: disable=invalid-name\n\nadmin.site.site_header = \"DDI on Rails Admin\"\nadmin.site.site_title = \"DDI on Rails Admin\"\nadmin.site.index_title = \"Welcome to DDI on Rails Admin\"\n\nurlpatterns = [\n path(\"\", HomePageView.as_view(), name=\"home\"),\n path(\n \"imprint/\",\n TemplateView.as_view(template_name=\"pages/imprint.html\"),\n name=\"imprint\",\n ),\n path(\n \"contact/\",\n TemplateView.as_view(template_name=\"pages/contact.html\"),\n name=\"contact\",\n ),\n path(\"admin/doc/\", include(\"django.contrib.admindocs.urls\")),\n path(\"admin/\", admin.site.urls),\n path(\"concept/\", include(\"ddionrails.concepts.urls\", namespace=\"concepts\")),\n path(\"workspace/\", include(\"ddionrails.workspace.urls\", namespace=\"workspace\")),\n re_path(\n (\n r\"^search/((?:all|variables|concepts|questions|publications|topics)\"\n r\"\\?{0,1}.*){0,1}$\"\n ),\n TemplateView.as_view(template_name=\"search/search.html\"),\n name=\"search\",\n ),\n path(\"api/\", include(\"ddionrails.api.urls\", namespace=\"api\")),\n path(\"django-rq/\", include(\"django_rq.urls\")),\n path(\"user/\", include(\"django.contrib.auth.urls\")),\n # Study by name\n path(\"<slug:study_name>\", StudyDetailView.as_view(), name=\"study_detail\"),\n # Study-specific links\n path(\"<slug:study_name>/data/\", include(\"ddionrails.data.urls\", namespace=\"data\")),\n path(\n \"<slug:study_name>/publ/\",\n include(\"ddionrails.publications.urls\", namespace=\"publ\"),\n ),\n path(\n \"<slug:study_name>/inst/\",\n include(\"ddionrails.instruments.urls\", namespace=\"inst\"),\n ),\n path(\"<slug:study_name>/topics/<slug:language>\", study_topics, name=\"study_topics\"),\n # Redirects for search interface\n path(\n \"publication/<uuid:id>\",\n publications_views.PublicationRedirectView.as_view(),\n name=\"publication_redirect\",\n ),\n path(\"variable/<uuid:id>\", VariableRedirectView.as_view(), name=\"variable_redirect\"),\n path(\"topic/<uuid:id>\", TopicRedirectView.as_view(), name=\"topic_redirect\"),\n path(\n \"instrument/<uuid:id>\",\n instruments_views.InstrumentRedirectView.as_view(),\n name=\"instrument_redirect\",\n ),\n path(\n \"question/<uuid:id>\",\n instruments_views.QuestionRedirectView.as_view(),\n name=\"question_redirect\",\n ),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns = urlpatterns + static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n urlpatterns = [path(r\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n", "path": "config/urls.py"}]} | 2,078 | 407 |
gh_patches_debug_41159 | rasdani/github-patches | git_diff | azavea__raster-vision-328 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix raster stats bug
If you run compute_raster_stats on 4-channel imagery (yielding stats for 4 channels), and use a `channel_order` of [0, 1, 2] in your raster_transformer, and then switch to using 3-channel imagery, it leads to an error because currently the `means` do not have the `channel_order` applied to them before being subtracted from the raster. In other words, 4 channel means is subtracted from a 3 channel raster.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/rastervision/builders/raster_transformer_builder.py`
Content:
```
1 from rastervision.core.raster_transformer import RasterTransformer
2
3
4 def build(config):
5 return RasterTransformer(config)
6
```
Path: `src/rastervision/core/raster_transformer.py`
Content:
```
1 import numpy as np
2
3 from rastervision.core.raster_stats import RasterStats
4
5
6 class RasterTransformer(object):
7 """Transforms chips according to a config."""
8
9 def __init__(self, options):
10 """Construct a new RasterTransformer.
11
12 Args:
13 options: protos.raster_transformer_pb2.RasterTransformer
14 """
15 self.options = options
16 self.raster_stats = None
17 if options.stats_uri:
18 self.raster_stats = RasterStats()
19 self.raster_stats.load(options.stats_uri)
20
21 def transform(self, chip):
22 """Transform a chip.
23
24 Selects a subset of the channels and transforms non-uint8 to
25 uint8 values using options.stats_uri
26
27 Args:
28 chip: [height, width, channels] numpy array
29
30 Returns:
31 [height, width, channels] uint8 numpy array where channels is equal
32 to len(self.options.channel_order)
33 """
34 if chip.dtype != np.uint8:
35 if self.raster_stats:
36 # Subtract mean and divide by std to get zscores.
37 means = np.array(self.raster_stats.means)
38 means = means[np.newaxis, np.newaxis, :].astype(np.float)
39 stds = np.array(self.raster_stats.stds)
40 stds = stds[np.newaxis, np.newaxis, :].astype(np.float)
41
42 # Don't transform NODATA zero values.
43 nodata = chip == 0
44
45 chip = chip - means
46 chip = chip / stds
47
48 # Make zscores that fall between -3 and 3 span 0 to 255.
49 chip += 3
50 chip /= 6
51
52 chip = np.clip(chip, 0, 1)
53 chip *= 255
54 chip = chip.astype(np.uint8)
55
56 chip[nodata] = 0
57 else:
58 raise ValueError(
59 'Need to provide stats_uri for non-uint8 rasters.')
60
61 if self.options.channel_order:
62 return chip[:, :, self.options.channel_order]
63 return chip
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/rastervision/builders/raster_transformer_builder.py b/src/rastervision/builders/raster_transformer_builder.py
--- a/src/rastervision/builders/raster_transformer_builder.py
+++ b/src/rastervision/builders/raster_transformer_builder.py
@@ -1,5 +1,12 @@
from rastervision.core.raster_transformer import RasterTransformer
+from rastervision.core.raster_stats import RasterStats
def build(config):
- return RasterTransformer(config)
+ raster_stats = None
+ if config.stats_uri:
+ raster_stats = RasterStats()
+ raster_stats.load(config.stats_uri)
+
+ return RasterTransformer(
+ channel_order=config.channel_order, raster_stats=raster_stats)
diff --git a/src/rastervision/core/raster_transformer.py b/src/rastervision/core/raster_transformer.py
--- a/src/rastervision/core/raster_transformer.py
+++ b/src/rastervision/core/raster_transformer.py
@@ -1,43 +1,50 @@
import numpy as np
-from rastervision.core.raster_stats import RasterStats
-
class RasterTransformer(object):
- """Transforms chips according to a config."""
+ """Transforms raw chips to be input to a neural network."""
- def __init__(self, options):
+ def __init__(self, channel_order=None, raster_stats=None):
"""Construct a new RasterTransformer.
Args:
- options: protos.raster_transformer_pb2.RasterTransformer
+ channel_order: numpy array of length n where n is the number of
+ channels to use and the values are channel indices
+ raster_stats: (RasterStats) used to transform chip to have
+ desired statistics
"""
- self.options = options
- self.raster_stats = None
- if options.stats_uri:
- self.raster_stats = RasterStats()
- self.raster_stats.load(options.stats_uri)
+ self.channel_order = channel_order
+ self.raster_stats = raster_stats
def transform(self, chip):
"""Transform a chip.
Selects a subset of the channels and transforms non-uint8 to
- uint8 values using options.stats_uri
+ uint8 values using raster_stats.
Args:
chip: [height, width, channels] numpy array
Returns:
[height, width, channels] uint8 numpy array where channels is equal
- to len(self.options.channel_order)
+ to len(channel_order)
"""
+ if self.channel_order is None:
+ channel_order = np.arange(chip.shape[2])
+ else:
+ channel_order = self.channel_order
+
+ chip = chip[:, :, channel_order]
+
if chip.dtype != np.uint8:
if self.raster_stats:
# Subtract mean and divide by std to get zscores.
means = np.array(self.raster_stats.means)
- means = means[np.newaxis, np.newaxis, :].astype(np.float)
+ means = means[np.newaxis, np.newaxis, channel_order].astype(
+ np.float)
stds = np.array(self.raster_stats.stds)
- stds = stds[np.newaxis, np.newaxis, :].astype(np.float)
+ stds = stds[np.newaxis, np.newaxis, channel_order].astype(
+ np.float)
# Don't transform NODATA zero values.
nodata = chip == 0
@@ -56,8 +63,6 @@
chip[nodata] = 0
else:
raise ValueError(
- 'Need to provide stats_uri for non-uint8 rasters.')
+ 'Need to provide raster_stats for non-uint8 rasters.')
- if self.options.channel_order:
- return chip[:, :, self.options.channel_order]
return chip
| {"golden_diff": "diff --git a/src/rastervision/builders/raster_transformer_builder.py b/src/rastervision/builders/raster_transformer_builder.py\n--- a/src/rastervision/builders/raster_transformer_builder.py\n+++ b/src/rastervision/builders/raster_transformer_builder.py\n@@ -1,5 +1,12 @@\n from rastervision.core.raster_transformer import RasterTransformer\n+from rastervision.core.raster_stats import RasterStats\n \n \n def build(config):\n- return RasterTransformer(config)\n+ raster_stats = None\n+ if config.stats_uri:\n+ raster_stats = RasterStats()\n+ raster_stats.load(config.stats_uri)\n+\n+ return RasterTransformer(\n+ channel_order=config.channel_order, raster_stats=raster_stats)\ndiff --git a/src/rastervision/core/raster_transformer.py b/src/rastervision/core/raster_transformer.py\n--- a/src/rastervision/core/raster_transformer.py\n+++ b/src/rastervision/core/raster_transformer.py\n@@ -1,43 +1,50 @@\n import numpy as np\n \n-from rastervision.core.raster_stats import RasterStats\n-\n \n class RasterTransformer(object):\n- \"\"\"Transforms chips according to a config.\"\"\"\n+ \"\"\"Transforms raw chips to be input to a neural network.\"\"\"\n \n- def __init__(self, options):\n+ def __init__(self, channel_order=None, raster_stats=None):\n \"\"\"Construct a new RasterTransformer.\n \n Args:\n- options: protos.raster_transformer_pb2.RasterTransformer\n+ channel_order: numpy array of length n where n is the number of\n+ channels to use and the values are channel indices\n+ raster_stats: (RasterStats) used to transform chip to have\n+ desired statistics\n \"\"\"\n- self.options = options\n- self.raster_stats = None\n- if options.stats_uri:\n- self.raster_stats = RasterStats()\n- self.raster_stats.load(options.stats_uri)\n+ self.channel_order = channel_order\n+ self.raster_stats = raster_stats\n \n def transform(self, chip):\n \"\"\"Transform a chip.\n \n Selects a subset of the channels and transforms non-uint8 to\n- uint8 values using options.stats_uri\n+ uint8 values using raster_stats.\n \n Args:\n chip: [height, width, channels] numpy array\n \n Returns:\n [height, width, channels] uint8 numpy array where channels is equal\n- to len(self.options.channel_order)\n+ to len(channel_order)\n \"\"\"\n+ if self.channel_order is None:\n+ channel_order = np.arange(chip.shape[2])\n+ else:\n+ channel_order = self.channel_order\n+\n+ chip = chip[:, :, channel_order]\n+\n if chip.dtype != np.uint8:\n if self.raster_stats:\n # Subtract mean and divide by std to get zscores.\n means = np.array(self.raster_stats.means)\n- means = means[np.newaxis, np.newaxis, :].astype(np.float)\n+ means = means[np.newaxis, np.newaxis, channel_order].astype(\n+ np.float)\n stds = np.array(self.raster_stats.stds)\n- stds = stds[np.newaxis, np.newaxis, :].astype(np.float)\n+ stds = stds[np.newaxis, np.newaxis, channel_order].astype(\n+ np.float)\n \n # Don't transform NODATA zero values.\n nodata = chip == 0\n@@ -56,8 +63,6 @@\n chip[nodata] = 0\n else:\n raise ValueError(\n- 'Need to provide stats_uri for non-uint8 rasters.')\n+ 'Need to provide raster_stats for non-uint8 rasters.')\n \n- if self.options.channel_order:\n- return chip[:, :, self.options.channel_order]\n return chip\n", "issue": "Fix raster stats bug\nIf you run compute_raster_stats on 4-channel imagery (yielding stats for 4 channels), and use a `channel_order` of [0, 1, 2] in your raster_transformer, and then switch to using 3-channel imagery, it leads to an error because currently the `means` do not have the `channel_order` applied to them before being subtracted from the raster. In other words, 4 channel means is subtracted from a 3 channel raster.\n", "before_files": [{"content": "from rastervision.core.raster_transformer import RasterTransformer\n\n\ndef build(config):\n return RasterTransformer(config)\n", "path": "src/rastervision/builders/raster_transformer_builder.py"}, {"content": "import numpy as np\n\nfrom rastervision.core.raster_stats import RasterStats\n\n\nclass RasterTransformer(object):\n \"\"\"Transforms chips according to a config.\"\"\"\n\n def __init__(self, options):\n \"\"\"Construct a new RasterTransformer.\n\n Args:\n options: protos.raster_transformer_pb2.RasterTransformer\n \"\"\"\n self.options = options\n self.raster_stats = None\n if options.stats_uri:\n self.raster_stats = RasterStats()\n self.raster_stats.load(options.stats_uri)\n\n def transform(self, chip):\n \"\"\"Transform a chip.\n\n Selects a subset of the channels and transforms non-uint8 to\n uint8 values using options.stats_uri\n\n Args:\n chip: [height, width, channels] numpy array\n\n Returns:\n [height, width, channels] uint8 numpy array where channels is equal\n to len(self.options.channel_order)\n \"\"\"\n if chip.dtype != np.uint8:\n if self.raster_stats:\n # Subtract mean and divide by std to get zscores.\n means = np.array(self.raster_stats.means)\n means = means[np.newaxis, np.newaxis, :].astype(np.float)\n stds = np.array(self.raster_stats.stds)\n stds = stds[np.newaxis, np.newaxis, :].astype(np.float)\n\n # Don't transform NODATA zero values.\n nodata = chip == 0\n\n chip = chip - means\n chip = chip / stds\n\n # Make zscores that fall between -3 and 3 span 0 to 255.\n chip += 3\n chip /= 6\n\n chip = np.clip(chip, 0, 1)\n chip *= 255\n chip = chip.astype(np.uint8)\n\n chip[nodata] = 0\n else:\n raise ValueError(\n 'Need to provide stats_uri for non-uint8 rasters.')\n\n if self.options.channel_order:\n return chip[:, :, self.options.channel_order]\n return chip\n", "path": "src/rastervision/core/raster_transformer.py"}], "after_files": [{"content": "from rastervision.core.raster_transformer import RasterTransformer\nfrom rastervision.core.raster_stats import RasterStats\n\n\ndef build(config):\n raster_stats = None\n if config.stats_uri:\n raster_stats = RasterStats()\n raster_stats.load(config.stats_uri)\n\n return RasterTransformer(\n channel_order=config.channel_order, raster_stats=raster_stats)\n", "path": "src/rastervision/builders/raster_transformer_builder.py"}, {"content": "import numpy as np\n\n\nclass RasterTransformer(object):\n \"\"\"Transforms raw chips to be input to a neural network.\"\"\"\n\n def __init__(self, channel_order=None, raster_stats=None):\n \"\"\"Construct a new RasterTransformer.\n\n Args:\n channel_order: numpy array of length n where n is the number of\n channels to use and the values are channel indices\n raster_stats: (RasterStats) used to transform chip to have\n desired statistics\n \"\"\"\n self.channel_order = channel_order\n self.raster_stats = raster_stats\n\n def transform(self, chip):\n \"\"\"Transform a chip.\n\n Selects a subset of the channels and transforms non-uint8 to\n uint8 values using raster_stats.\n\n Args:\n chip: [height, width, channels] numpy array\n\n Returns:\n [height, width, channels] uint8 numpy array where channels is equal\n to len(channel_order)\n \"\"\"\n if self.channel_order is None:\n channel_order = np.arange(chip.shape[2])\n else:\n channel_order = self.channel_order\n\n chip = chip[:, :, channel_order]\n\n if chip.dtype != np.uint8:\n if self.raster_stats:\n # Subtract mean and divide by std to get zscores.\n means = np.array(self.raster_stats.means)\n means = means[np.newaxis, np.newaxis, channel_order].astype(\n np.float)\n stds = np.array(self.raster_stats.stds)\n stds = stds[np.newaxis, np.newaxis, channel_order].astype(\n np.float)\n\n # Don't transform NODATA zero values.\n nodata = chip == 0\n\n chip = chip - means\n chip = chip / stds\n\n # Make zscores that fall between -3 and 3 span 0 to 255.\n chip += 3\n chip /= 6\n\n chip = np.clip(chip, 0, 1)\n chip *= 255\n chip = chip.astype(np.uint8)\n\n chip[nodata] = 0\n else:\n raise ValueError(\n 'Need to provide raster_stats for non-uint8 rasters.')\n\n return chip\n", "path": "src/rastervision/core/raster_transformer.py"}]} | 1,000 | 854 |
gh_patches_debug_11754 | rasdani/github-patches | git_diff | svthalia__concrexit-1826 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add search parameter to event registrations admin api
### Is your feature request related to a problem? Please describe.
I'm always frustrated when I can't search for a registration in the event admin.
### Describe the solution you'd like
A search parameter (by member.name or name) on `api/v2/admin/events/<id>/registrations/`.
### Motivation
Then we can search for registrations. A parameter is desirable for consistency in making pagination available.
### Describe alternatives you've considered
Local search.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/events/api/v2/admin/views.py`
Content:
```
1 from django.http import Http404
2 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
3 from rest_framework import status
4 from rest_framework.exceptions import ValidationError, PermissionDenied
5 from rest_framework.generics import get_object_or_404
6 from rest_framework.response import Response
7 from rest_framework.views import APIView
8 from rest_framework import filters as framework_filters
9
10 from events import services
11 from events.api.v2.admin import filters
12 from events.api.v2.admin.permissions import IsOrganiser
13 from events.api.v2.admin.serializers.event import (
14 EventListAdminSerializer,
15 EventAdminSerializer,
16 )
17 from events.api.v2.admin.serializers.event_registration import (
18 EventRegistrationAdminSerializer,
19 )
20 from events.exceptions import RegistrationError
21 from events.models import Event, EventRegistration
22 from thaliawebsite.api.v2.admin.views import (
23 AdminListAPIView,
24 AdminRetrieveAPIView,
25 AdminCreateAPIView,
26 AdminUpdateAPIView,
27 AdminDestroyAPIView,
28 AdminPermissionsMixin,
29 )
30 import events.api.v2.filters as normal_filters
31
32
33 class EventAdminListCreateAPIView(AdminListAPIView, AdminCreateAPIView):
34 queryset = Event.objects.prefetch_related("organiser")
35 permission_classes = [IsAuthenticatedOrTokenHasScope]
36 required_scopes = ["events:admin"]
37 filter_backends = [
38 framework_filters.OrderingFilter,
39 normal_filters.CategoryFilter,
40 normal_filters.OrganiserFilter,
41 normal_filters.EventDateFilter,
42 filters.PublishedFilter,
43 ]
44 ordering_fields = (
45 "start",
46 "end",
47 "published",
48 "registration_start",
49 "registration_end",
50 )
51
52 def get_serializer_class(self):
53 if self.request.method.lower() == "post":
54 return EventAdminSerializer
55 return EventListAdminSerializer
56
57
58 class EventAdminDetailAPIView(
59 AdminRetrieveAPIView, AdminUpdateAPIView, AdminDestroyAPIView
60 ):
61 queryset = Event.objects.all()
62 serializer_class = EventAdminSerializer
63 permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]
64 required_scopes = ["events:admin"]
65
66
67 class EventRegistrationAdminListView(AdminListAPIView, AdminCreateAPIView):
68 """Returns a list of registrations."""
69
70 serializer_class = EventRegistrationAdminSerializer
71 permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]
72 required_scopes = ["events:admin"]
73 filter_backends = (
74 framework_filters.OrderingFilter,
75 filters.EventRegistrationCancelledFilter,
76 )
77 ordering_fields = ("queue_position", "date", "date_cancelled")
78
79 def get_queryset(self):
80 event = get_object_or_404(Event, pk=self.kwargs.get("pk"))
81 if event:
82 return EventRegistration.objects.filter(event_id=event).prefetch_related(
83 "member", "member__profile"
84 )
85 return EventRegistration.objects.none()
86
87
88 class EventRegistrationAdminDetailView(
89 AdminRetrieveAPIView, AdminUpdateAPIView, AdminDestroyAPIView
90 ):
91 """Returns details of an event registration."""
92
93 serializer_class = EventRegistrationAdminSerializer
94 queryset = EventRegistration.objects.all()
95 permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]
96 required_scopes = ["events:admin"]
97 event_lookup_field = "event_id"
98
99 def get_queryset(self):
100 return super().get_queryset().filter(event=self.kwargs["event_id"])
101
102
103 class EventRegistrationAdminFieldsView(AdminPermissionsMixin, APIView):
104 """Returns details of an event registration."""
105
106 permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]
107 required_scopes = ["events:admin"]
108
109 def get_queryset(self):
110 return EventRegistration.objects.filter(event=self.kwargs["event_id"])
111
112 def get_object(self):
113 event_registration = get_object_or_404(
114 EventRegistration,
115 event=self.kwargs["event_id"],
116 pk=self.kwargs["registration_id"],
117 )
118
119 if not event_registration.event.has_fields:
120 raise Http404
121
122 return event_registration
123
124 def get(self, request, *args, **kwargs):
125 return Response(
126 data=services.registration_fields(request, registration=self.get_object()),
127 status=status.HTTP_200_OK,
128 )
129
130 def put(self, request, *args, **kwargs):
131 original = services.registration_fields(request, registration=self.get_object())
132 required_keys = set(original.keys()) - set(request.data.keys())
133 if len(required_keys) > 0:
134 raise ValidationError(
135 f"Missing keys '{', '.join(required_keys)}' in request",
136 status.HTTP_400_BAD_REQUEST,
137 )
138
139 services.update_registration(
140 registration=self.get_object(), field_values=request.data.items()
141 )
142
143 return Response(
144 data=services.registration_fields(request, registration=self.get_object()),
145 status=status.HTTP_200_OK,
146 )
147
148 def patch(self, request, *args, **kwargs):
149 services.update_registration(
150 registration=self.get_object(), field_values=request.data.items()
151 )
152
153 return Response(
154 data=services.registration_fields(request, registration=self.get_object()),
155 status=status.HTTP_200_OK,
156 )
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/events/api/v2/admin/views.py b/website/events/api/v2/admin/views.py
--- a/website/events/api/v2/admin/views.py
+++ b/website/events/api/v2/admin/views.py
@@ -72,9 +72,15 @@
required_scopes = ["events:admin"]
filter_backends = (
framework_filters.OrderingFilter,
+ framework_filters.SearchFilter,
filters.EventRegistrationCancelledFilter,
)
ordering_fields = ("queue_position", "date", "date_cancelled")
+ search_fields = (
+ "member__first_name",
+ "member__last_name",
+ "name",
+ )
def get_queryset(self):
event = get_object_or_404(Event, pk=self.kwargs.get("pk"))
| {"golden_diff": "diff --git a/website/events/api/v2/admin/views.py b/website/events/api/v2/admin/views.py\n--- a/website/events/api/v2/admin/views.py\n+++ b/website/events/api/v2/admin/views.py\n@@ -72,9 +72,15 @@\n required_scopes = [\"events:admin\"]\n filter_backends = (\n framework_filters.OrderingFilter,\n+ framework_filters.SearchFilter,\n filters.EventRegistrationCancelledFilter,\n )\n ordering_fields = (\"queue_position\", \"date\", \"date_cancelled\")\n+ search_fields = (\n+ \"member__first_name\",\n+ \"member__last_name\",\n+ \"name\",\n+ )\n \n def get_queryset(self):\n event = get_object_or_404(Event, pk=self.kwargs.get(\"pk\"))\n", "issue": "Add search parameter to event registrations admin api\n### Is your feature request related to a problem? Please describe.\r\nI'm always frustrated when I can't search for a registration in the event admin.\r\n\r\n### Describe the solution you'd like\r\nA search parameter (by member.name or name) on `api/v2/admin/events/<id>/registrations/`.\r\n\r\n### Motivation\r\nThen we can search for registrations. A parameter is desirable for consistency in making pagination available.\r\n\r\n### Describe alternatives you've considered\r\nLocal search.\r\n\n", "before_files": [{"content": "from django.http import Http404\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import status\nfrom rest_framework.exceptions import ValidationError, PermissionDenied\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import filters as framework_filters\n\nfrom events import services\nfrom events.api.v2.admin import filters\nfrom events.api.v2.admin.permissions import IsOrganiser\nfrom events.api.v2.admin.serializers.event import (\n EventListAdminSerializer,\n EventAdminSerializer,\n)\nfrom events.api.v2.admin.serializers.event_registration import (\n EventRegistrationAdminSerializer,\n)\nfrom events.exceptions import RegistrationError\nfrom events.models import Event, EventRegistration\nfrom thaliawebsite.api.v2.admin.views import (\n AdminListAPIView,\n AdminRetrieveAPIView,\n AdminCreateAPIView,\n AdminUpdateAPIView,\n AdminDestroyAPIView,\n AdminPermissionsMixin,\n)\nimport events.api.v2.filters as normal_filters\n\n\nclass EventAdminListCreateAPIView(AdminListAPIView, AdminCreateAPIView):\n queryset = Event.objects.prefetch_related(\"organiser\")\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n filter_backends = [\n framework_filters.OrderingFilter,\n normal_filters.CategoryFilter,\n normal_filters.OrganiserFilter,\n normal_filters.EventDateFilter,\n filters.PublishedFilter,\n ]\n ordering_fields = (\n \"start\",\n \"end\",\n \"published\",\n \"registration_start\",\n \"registration_end\",\n )\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"post\":\n return EventAdminSerializer\n return EventListAdminSerializer\n\n\nclass EventAdminDetailAPIView(\n AdminRetrieveAPIView, AdminUpdateAPIView, AdminDestroyAPIView\n):\n queryset = Event.objects.all()\n serializer_class = EventAdminSerializer\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n\n\nclass EventRegistrationAdminListView(AdminListAPIView, AdminCreateAPIView):\n \"\"\"Returns a list of registrations.\"\"\"\n\n serializer_class = EventRegistrationAdminSerializer\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.EventRegistrationCancelledFilter,\n )\n ordering_fields = (\"queue_position\", \"date\", \"date_cancelled\")\n\n def get_queryset(self):\n event = get_object_or_404(Event, pk=self.kwargs.get(\"pk\"))\n if event:\n return EventRegistration.objects.filter(event_id=event).prefetch_related(\n \"member\", \"member__profile\"\n )\n return EventRegistration.objects.none()\n\n\nclass EventRegistrationAdminDetailView(\n AdminRetrieveAPIView, AdminUpdateAPIView, AdminDestroyAPIView\n):\n \"\"\"Returns details of an event registration.\"\"\"\n\n serializer_class = EventRegistrationAdminSerializer\n queryset = EventRegistration.objects.all()\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n event_lookup_field = \"event_id\"\n\n def get_queryset(self):\n return super().get_queryset().filter(event=self.kwargs[\"event_id\"])\n\n\nclass EventRegistrationAdminFieldsView(AdminPermissionsMixin, APIView):\n \"\"\"Returns details of an event registration.\"\"\"\n\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n\n def get_queryset(self):\n return EventRegistration.objects.filter(event=self.kwargs[\"event_id\"])\n\n def get_object(self):\n event_registration = get_object_or_404(\n EventRegistration,\n event=self.kwargs[\"event_id\"],\n pk=self.kwargs[\"registration_id\"],\n )\n\n if not event_registration.event.has_fields:\n raise Http404\n\n return event_registration\n\n def get(self, request, *args, **kwargs):\n return Response(\n data=services.registration_fields(request, registration=self.get_object()),\n status=status.HTTP_200_OK,\n )\n\n def put(self, request, *args, **kwargs):\n original = services.registration_fields(request, registration=self.get_object())\n required_keys = set(original.keys()) - set(request.data.keys())\n if len(required_keys) > 0:\n raise ValidationError(\n f\"Missing keys '{', '.join(required_keys)}' in request\",\n status.HTTP_400_BAD_REQUEST,\n )\n\n services.update_registration(\n registration=self.get_object(), field_values=request.data.items()\n )\n\n return Response(\n data=services.registration_fields(request, registration=self.get_object()),\n status=status.HTTP_200_OK,\n )\n\n def patch(self, request, *args, **kwargs):\n services.update_registration(\n registration=self.get_object(), field_values=request.data.items()\n )\n\n return Response(\n data=services.registration_fields(request, registration=self.get_object()),\n status=status.HTTP_200_OK,\n )\n", "path": "website/events/api/v2/admin/views.py"}], "after_files": [{"content": "from django.http import Http404\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import status\nfrom rest_framework.exceptions import ValidationError, PermissionDenied\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import filters as framework_filters\n\nfrom events import services\nfrom events.api.v2.admin import filters\nfrom events.api.v2.admin.permissions import IsOrganiser\nfrom events.api.v2.admin.serializers.event import (\n EventListAdminSerializer,\n EventAdminSerializer,\n)\nfrom events.api.v2.admin.serializers.event_registration import (\n EventRegistrationAdminSerializer,\n)\nfrom events.exceptions import RegistrationError\nfrom events.models import Event, EventRegistration\nfrom thaliawebsite.api.v2.admin.views import (\n AdminListAPIView,\n AdminRetrieveAPIView,\n AdminCreateAPIView,\n AdminUpdateAPIView,\n AdminDestroyAPIView,\n AdminPermissionsMixin,\n)\nimport events.api.v2.filters as normal_filters\n\n\nclass EventAdminListCreateAPIView(AdminListAPIView, AdminCreateAPIView):\n queryset = Event.objects.prefetch_related(\"organiser\")\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n filter_backends = [\n framework_filters.OrderingFilter,\n normal_filters.CategoryFilter,\n normal_filters.OrganiserFilter,\n normal_filters.EventDateFilter,\n filters.PublishedFilter,\n ]\n ordering_fields = (\n \"start\",\n \"end\",\n \"published\",\n \"registration_start\",\n \"registration_end\",\n )\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"post\":\n return EventAdminSerializer\n return EventListAdminSerializer\n\n\nclass EventAdminDetailAPIView(\n AdminRetrieveAPIView, AdminUpdateAPIView, AdminDestroyAPIView\n):\n queryset = Event.objects.all()\n serializer_class = EventAdminSerializer\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n\n\nclass EventRegistrationAdminListView(AdminListAPIView, AdminCreateAPIView):\n \"\"\"Returns a list of registrations.\"\"\"\n\n serializer_class = EventRegistrationAdminSerializer\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n filters.EventRegistrationCancelledFilter,\n )\n ordering_fields = (\"queue_position\", \"date\", \"date_cancelled\")\n search_fields = (\n \"member__first_name\",\n \"member__last_name\",\n \"name\",\n )\n\n def get_queryset(self):\n event = get_object_or_404(Event, pk=self.kwargs.get(\"pk\"))\n if event:\n return EventRegistration.objects.filter(event_id=event).prefetch_related(\n \"member\", \"member__profile\"\n )\n return EventRegistration.objects.none()\n\n\nclass EventRegistrationAdminDetailView(\n AdminRetrieveAPIView, AdminUpdateAPIView, AdminDestroyAPIView\n):\n \"\"\"Returns details of an event registration.\"\"\"\n\n serializer_class = EventRegistrationAdminSerializer\n queryset = EventRegistration.objects.all()\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n event_lookup_field = \"event_id\"\n\n def get_queryset(self):\n return super().get_queryset().filter(event=self.kwargs[\"event_id\"])\n\n\nclass EventRegistrationAdminFieldsView(AdminPermissionsMixin, APIView):\n \"\"\"Returns details of an event registration.\"\"\"\n\n permission_classes = [IsOrganiser, IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"events:admin\"]\n\n def get_queryset(self):\n return EventRegistration.objects.filter(event=self.kwargs[\"event_id\"])\n\n def get_object(self):\n event_registration = get_object_or_404(\n EventRegistration,\n event=self.kwargs[\"event_id\"],\n pk=self.kwargs[\"registration_id\"],\n )\n\n if not event_registration.event.has_fields:\n raise Http404\n\n return event_registration\n\n def get(self, request, *args, **kwargs):\n return Response(\n data=services.registration_fields(request, registration=self.get_object()),\n status=status.HTTP_200_OK,\n )\n\n def put(self, request, *args, **kwargs):\n original = services.registration_fields(request, registration=self.get_object())\n required_keys = set(original.keys()) - set(request.data.keys())\n if len(required_keys) > 0:\n raise ValidationError(\n f\"Missing keys '{', '.join(required_keys)}' in request\",\n status.HTTP_400_BAD_REQUEST,\n )\n\n services.update_registration(\n registration=self.get_object(), field_values=request.data.items()\n )\n\n return Response(\n data=services.registration_fields(request, registration=self.get_object()),\n status=status.HTTP_200_OK,\n )\n\n def patch(self, request, *args, **kwargs):\n services.update_registration(\n registration=self.get_object(), field_values=request.data.items()\n )\n\n return Response(\n data=services.registration_fields(request, registration=self.get_object()),\n status=status.HTTP_200_OK,\n )\n", "path": "website/events/api/v2/admin/views.py"}]} | 1,805 | 172 |
gh_patches_debug_30375 | rasdani/github-patches | git_diff | sublimelsp__LSP-796 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'lsp_symbol_definition' via keybinding using `ccls' as client.
While I was trying to bind the 'lsp_symbol_definition'-command to my custom keys, I had so suffer from a `... type str does not support Buffer API´.
On revisiting the relevant file ("plugin/goto.py") I found the TODO comment which mentions the future implementation of DocumentLink-support.
By dumping the response-payload of the 'lsp_symbol_definition'-command I found out that the received payload has different keys as beeing parsed by the 'handle_response()' function.
See here:
```
[{targetUri: file:///usr/include/bash/builtins/bashgetopt.h, targetRange: {end: {line: 38, character: 30}, start: {line:
38, character: 0}}, targetSelectionRange: {end: {line: 38, character: 26}, start: {line: 38, character: 11}}}]
```
### Solution
So I added the following snippet to provide the right parsing method in case of this setup/scenario:
```python
if 'targetUri' in location:
file_path = uri_to_filename(location['targetUri'])
start = Point.from_lsp(location['targetRange']['start'])
else:
file_path = uri_to_filename(location["uri"])
start = Point.from_lsp(location['range']['start'])
```
By adding this the desired function of my keybinding worked.
I just wanted to share this in the hope it might be useful.
Best regards !
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/goto.py`
Content:
```
1 import sublime
2
3 from .core.registry import LspTextCommand
4 from .core.protocol import Request, Point
5 from .core.documents import get_document_position, get_position, is_at_word
6 from .core.url import uri_to_filename
7 from .core.logging import debug
8 from Default.history_list import get_jump_history_for_view
9
10 try:
11 from typing import List, Dict, Optional, Any
12 assert List and Dict and Optional and Any
13 except ImportError:
14 pass
15
16
17 class LspGotoCommand(LspTextCommand):
18
19 def __init__(self, view: sublime.View) -> None:
20 super().__init__(view)
21 self.goto_kind = "definition"
22
23 def is_enabled(self, event: 'Optional[dict]' = None) -> bool:
24 if self.has_client_with_capability(self.goto_kind + "Provider"):
25 return is_at_word(self.view, event)
26 return False
27
28 def run(self, edit: sublime.Edit, event: 'Optional[dict]' = None) -> None:
29 client = self.client_with_capability(self.goto_kind + "Provider")
30 if client:
31 pos = get_position(self.view, event)
32 document_position = get_document_position(self.view, pos)
33 if document_position:
34 request_type = getattr(Request, self.goto_kind)
35 if not request_type:
36 debug("unrecognized goto kind:", self.goto_kind)
37 return
38 request = request_type(document_position)
39 client.send_request(
40 request, lambda response: self.handle_response(response, pos))
41
42 def handle_response(self, response: 'Optional[Any]', position: int) -> None:
43 window = sublime.active_window()
44 if response:
45 # Save to jump back history.
46 get_jump_history_for_view(self.view).push_selection(self.view)
47 # TODO: DocumentLink support.
48 location = response if isinstance(response, dict) else response[0]
49 file_path = uri_to_filename(location.get("uri"))
50 start = Point.from_lsp(location['range']['start'])
51 file_location = "{}:{}:{}".format(file_path, start.row + 1, start.col + 1)
52 debug("opening location", location)
53 window.open_file(file_location, sublime.ENCODED_POSITION)
54 # TODO: can add region here.
55 else:
56 sublime.status_message("Empty response from language server, "
57 "reverting to Sublime's built-in Goto Definition")
58 window.run_command("goto_definition")
59
60 def want_event(self) -> bool:
61 return True
62
63
64 class LspSymbolDefinitionCommand(LspGotoCommand):
65
66 def __init__(self, view: sublime.View) -> None:
67 super().__init__(view)
68 self.goto_kind = "definition"
69
70
71 class LspSymbolTypeDefinitionCommand(LspGotoCommand):
72
73 def __init__(self, view: sublime.View) -> None:
74 super().__init__(view)
75 self.goto_kind = "typeDefinition"
76
77
78 class LspSymbolDeclarationCommand(LspGotoCommand):
79
80 def __init__(self, view: sublime.View) -> None:
81 super().__init__(view)
82 self.goto_kind = "declaration"
83
84
85 class LspSymbolImplementationCommand(LspGotoCommand):
86
87 def __init__(self, view: sublime.View) -> None:
88 super().__init__(view)
89 self.goto_kind = "implementation"
90
```
Path: `plugin/core/sessions.py`
Content:
```
1 from .types import ClientConfig, ClientStates, Settings
2 from .protocol import Request
3 from .transports import start_tcp_transport, start_tcp_listener, TCPTransport, Transport
4 from .rpc import Client, attach_stdio_client
5 from .process import start_server
6 from .url import filename_to_uri
7 from .logging import debug
8 import os
9 from .protocol import completion_item_kinds, symbol_kinds
10 try:
11 from typing import Callable, Dict, Any, Optional
12 assert Callable and Dict and Any and Optional and Transport
13 except ImportError:
14 pass
15
16
17 def create_session(config: ClientConfig,
18 project_path: str,
19 env: dict,
20 settings: Settings,
21 on_pre_initialize: 'Optional[Callable[[Session], None]]' = None,
22 on_post_initialize: 'Optional[Callable[[Session], None]]' = None,
23 on_post_exit: 'Optional[Callable[[str], None]]' = None,
24 bootstrap_client: 'Optional[Any]' = None) -> 'Optional[Session]':
25
26 def with_client(client: Client) -> 'Session':
27 return Session(
28 config=config,
29 project_path=project_path,
30 client=client,
31 on_pre_initialize=on_pre_initialize,
32 on_post_initialize=on_post_initialize,
33 on_post_exit=on_post_exit)
34
35 session = None
36 if config.binary_args:
37 tcp_port = config.tcp_port
38 server_args = config.binary_args
39
40 if config.tcp_mode == "host":
41 socket = start_tcp_listener(tcp_port or 0)
42 tcp_port = socket.getsockname()[1]
43 server_args = list(s.replace("{port}", str(tcp_port)) for s in config.binary_args)
44
45 process = start_server(server_args, project_path, env, settings.log_stderr)
46 if process:
47 if config.tcp_mode == "host":
48 client_socket, address = socket.accept()
49 transport = TCPTransport(client_socket) # type: Transport
50 session = with_client(Client(transport, settings))
51 elif tcp_port:
52 transport = start_tcp_transport(tcp_port, config.tcp_host)
53 if transport:
54 session = with_client(Client(transport, settings))
55 else:
56 # try to terminate the process
57 try:
58 process.terminate()
59 except Exception:
60 pass
61 else:
62 session = with_client(attach_stdio_client(process, settings))
63 else:
64 if config.tcp_port:
65 transport = start_tcp_transport(config.tcp_port)
66 session = with_client(Client(transport, settings))
67 elif bootstrap_client:
68 session = with_client(bootstrap_client)
69 else:
70 debug("No way to start session")
71 return session
72
73
74 def get_initialize_params(project_path: str, config: ClientConfig) -> dict:
75 initializeParams = {
76 "processId": os.getpid(),
77 "rootUri": filename_to_uri(project_path),
78 "rootPath": project_path,
79 "capabilities": {
80 "textDocument": {
81 "synchronization": {
82 "didSave": True,
83 "willSaveWaitUntil": True
84 },
85 "hover": {
86 "contentFormat": ["markdown", "plaintext"]
87 },
88 "completion": {
89 "completionItem": {
90 "snippetSupport": True
91 },
92 "completionItemKind": {
93 "valueSet": completion_item_kinds
94 }
95 },
96 "signatureHelp": {
97 "signatureInformation": {
98 "documentationFormat": ["markdown", "plaintext"],
99 "parameterInformation": {
100 "labelOffsetSupport": True
101 }
102 }
103 },
104 "references": {},
105 "documentHighlight": {},
106 "documentSymbol": {
107 "symbolKind": {
108 "valueSet": symbol_kinds
109 }
110 },
111 "formatting": {},
112 "rangeFormatting": {},
113 "declaration": {},
114 "definition": {},
115 "typeDefinition": {},
116 "implementation": {},
117 "codeAction": {
118 "codeActionLiteralSupport": {
119 "codeActionKind": {
120 "valueSet": []
121 }
122 }
123 },
124 "rename": {},
125 "colorProvider": {},
126 "publishDiagnostics": {
127 "relatedInformation": True
128 }
129 },
130 "workspace": {
131 "applyEdit": True,
132 "didChangeConfiguration": {},
133 "executeCommand": {},
134 "symbol": {
135 "symbolKind": {
136 "valueSet": symbol_kinds
137 }
138 }
139 }
140 }
141 }
142 if config.init_options:
143 initializeParams['initializationOptions'] = config.init_options
144
145 return initializeParams
146
147
148 class Session(object):
149 def __init__(self,
150 config: ClientConfig,
151 project_path: str,
152 client: Client,
153 on_pre_initialize: 'Optional[Callable[[Session], None]]' = None,
154 on_post_initialize: 'Optional[Callable[[Session], None]]' = None,
155 on_post_exit: 'Optional[Callable[[str], None]]' = None) -> None:
156 self.config = config
157 self.state = ClientStates.STARTING
158 self._on_post_initialize = on_post_initialize
159 self._on_post_exit = on_post_exit
160 self.capabilities = dict() # type: Dict[str, Any]
161 self.client = client
162 if on_pre_initialize:
163 on_pre_initialize(self)
164 self._initialize(project_path)
165
166 def has_capability(self, capability: str) -> bool:
167 return capability in self.capabilities and self.capabilities[capability] is not False
168
169 def get_capability(self, capability: str) -> 'Optional[Any]':
170 return self.capabilities.get(capability)
171
172 def _initialize(self, project_path: str) -> None:
173 params = get_initialize_params(project_path, self.config)
174 self.client.send_request(
175 Request.initialize(params),
176 lambda result: self._handle_initialize_result(result))
177
178 def _handle_initialize_result(self, result: 'Any') -> None:
179 self.state = ClientStates.READY
180 self.capabilities = result.get('capabilities', dict())
181 if self._on_post_initialize:
182 self._on_post_initialize(self)
183
184 def end(self) -> None:
185 self.state = ClientStates.STOPPING
186 self.client.send_request(
187 Request.shutdown(),
188 lambda result: self._handle_shutdown_result(),
189 lambda error: self._handle_shutdown_result())
190
191 def _handle_shutdown_result(self) -> None:
192 self.client.exit()
193 self.client = None # type: ignore
194 self.capabilities = dict()
195 if self._on_post_exit:
196 self._on_post_exit(self.config.name)
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/core/sessions.py b/plugin/core/sessions.py
--- a/plugin/core/sessions.py
+++ b/plugin/core/sessions.py
@@ -110,10 +110,10 @@
},
"formatting": {},
"rangeFormatting": {},
- "declaration": {},
- "definition": {},
- "typeDefinition": {},
- "implementation": {},
+ "declaration": {"linkSupport": True},
+ "definition": {"linkSupport": True},
+ "typeDefinition": {"linkSupport": True},
+ "implementation": {"linkSupport": True},
"codeAction": {
"codeActionLiteralSupport": {
"codeActionKind": {
diff --git a/plugin/goto.py b/plugin/goto.py
--- a/plugin/goto.py
+++ b/plugin/goto.py
@@ -46,8 +46,13 @@
get_jump_history_for_view(self.view).push_selection(self.view)
# TODO: DocumentLink support.
location = response if isinstance(response, dict) else response[0]
- file_path = uri_to_filename(location.get("uri"))
- start = Point.from_lsp(location['range']['start'])
+ if "targetUri" in location:
+ # TODO: Do something clever with originSelectionRange and targetRange.
+ file_path = uri_to_filename(location["targetUri"])
+ start = Point.from_lsp(location["targetSelectionRange"]["start"])
+ else:
+ file_path = uri_to_filename(location["uri"])
+ start = Point.from_lsp(location["range"]["start"])
file_location = "{}:{}:{}".format(file_path, start.row + 1, start.col + 1)
debug("opening location", location)
window.open_file(file_location, sublime.ENCODED_POSITION)
| {"golden_diff": "diff --git a/plugin/core/sessions.py b/plugin/core/sessions.py\n--- a/plugin/core/sessions.py\n+++ b/plugin/core/sessions.py\n@@ -110,10 +110,10 @@\n },\n \"formatting\": {},\n \"rangeFormatting\": {},\n- \"declaration\": {},\n- \"definition\": {},\n- \"typeDefinition\": {},\n- \"implementation\": {},\n+ \"declaration\": {\"linkSupport\": True},\n+ \"definition\": {\"linkSupport\": True},\n+ \"typeDefinition\": {\"linkSupport\": True},\n+ \"implementation\": {\"linkSupport\": True},\n \"codeAction\": {\n \"codeActionLiteralSupport\": {\n \"codeActionKind\": {\ndiff --git a/plugin/goto.py b/plugin/goto.py\n--- a/plugin/goto.py\n+++ b/plugin/goto.py\n@@ -46,8 +46,13 @@\n get_jump_history_for_view(self.view).push_selection(self.view)\n # TODO: DocumentLink support.\n location = response if isinstance(response, dict) else response[0]\n- file_path = uri_to_filename(location.get(\"uri\"))\n- start = Point.from_lsp(location['range']['start'])\n+ if \"targetUri\" in location:\n+ # TODO: Do something clever with originSelectionRange and targetRange.\n+ file_path = uri_to_filename(location[\"targetUri\"])\n+ start = Point.from_lsp(location[\"targetSelectionRange\"][\"start\"])\n+ else:\n+ file_path = uri_to_filename(location[\"uri\"])\n+ start = Point.from_lsp(location[\"range\"][\"start\"])\n file_location = \"{}:{}:{}\".format(file_path, start.row + 1, start.col + 1)\n debug(\"opening location\", location)\n window.open_file(file_location, sublime.ENCODED_POSITION)\n", "issue": "'lsp_symbol_definition' via keybinding using `ccls' as client.\nWhile I was trying to bind the 'lsp_symbol_definition'-command to my custom keys, I had so suffer from a `... type str does not support Buffer API\u00b4.\r\nOn revisiting the relevant file (\"plugin/goto.py\") I found the TODO comment which mentions the future implementation of DocumentLink-support.\r\n\r\nBy dumping the response-payload of the 'lsp_symbol_definition'-command I found out that the received payload has different keys as beeing parsed by the 'handle_response()' function.\r\nSee here:\r\n```\r\n[{targetUri: file:///usr/include/bash/builtins/bashgetopt.h, targetRange: {end: {line: 38, character: 30}, start: {line:\r\n38, character: 0}}, targetSelectionRange: {end: {line: 38, character: 26}, start: {line: 38, character: 11}}}]\r\n```\r\n\r\n### Solution\r\n\r\nSo I added the following snippet to provide the right parsing method in case of this setup/scenario:\r\n\r\n```python\r\n if 'targetUri' in location:\r\n file_path = uri_to_filename(location['targetUri'])\r\n start = Point.from_lsp(location['targetRange']['start'])\r\n else:\r\n file_path = uri_to_filename(location[\"uri\"])\r\n start = Point.from_lsp(location['range']['start'])\r\n```\r\n\r\nBy adding this the desired function of my keybinding worked.\r\n\r\nI just wanted to share this in the hope it might be useful.\r\nBest regards !\r\n\n", "before_files": [{"content": "import sublime\n\nfrom .core.registry import LspTextCommand\nfrom .core.protocol import Request, Point\nfrom .core.documents import get_document_position, get_position, is_at_word\nfrom .core.url import uri_to_filename\nfrom .core.logging import debug\nfrom Default.history_list import get_jump_history_for_view\n\ntry:\n from typing import List, Dict, Optional, Any\n assert List and Dict and Optional and Any\nexcept ImportError:\n pass\n\n\nclass LspGotoCommand(LspTextCommand):\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.goto_kind = \"definition\"\n\n def is_enabled(self, event: 'Optional[dict]' = None) -> bool:\n if self.has_client_with_capability(self.goto_kind + \"Provider\"):\n return is_at_word(self.view, event)\n return False\n\n def run(self, edit: sublime.Edit, event: 'Optional[dict]' = None) -> None:\n client = self.client_with_capability(self.goto_kind + \"Provider\")\n if client:\n pos = get_position(self.view, event)\n document_position = get_document_position(self.view, pos)\n if document_position:\n request_type = getattr(Request, self.goto_kind)\n if not request_type:\n debug(\"unrecognized goto kind:\", self.goto_kind)\n return\n request = request_type(document_position)\n client.send_request(\n request, lambda response: self.handle_response(response, pos))\n\n def handle_response(self, response: 'Optional[Any]', position: int) -> None:\n window = sublime.active_window()\n if response:\n # Save to jump back history.\n get_jump_history_for_view(self.view).push_selection(self.view)\n # TODO: DocumentLink support.\n location = response if isinstance(response, dict) else response[0]\n file_path = uri_to_filename(location.get(\"uri\"))\n start = Point.from_lsp(location['range']['start'])\n file_location = \"{}:{}:{}\".format(file_path, start.row + 1, start.col + 1)\n debug(\"opening location\", location)\n window.open_file(file_location, sublime.ENCODED_POSITION)\n # TODO: can add region here.\n else:\n sublime.status_message(\"Empty response from language server, \"\n \"reverting to Sublime's built-in Goto Definition\")\n window.run_command(\"goto_definition\")\n\n def want_event(self) -> bool:\n return True\n\n\nclass LspSymbolDefinitionCommand(LspGotoCommand):\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.goto_kind = \"definition\"\n\n\nclass LspSymbolTypeDefinitionCommand(LspGotoCommand):\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.goto_kind = \"typeDefinition\"\n\n\nclass LspSymbolDeclarationCommand(LspGotoCommand):\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.goto_kind = \"declaration\"\n\n\nclass LspSymbolImplementationCommand(LspGotoCommand):\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.goto_kind = \"implementation\"\n", "path": "plugin/goto.py"}, {"content": "from .types import ClientConfig, ClientStates, Settings\nfrom .protocol import Request\nfrom .transports import start_tcp_transport, start_tcp_listener, TCPTransport, Transport\nfrom .rpc import Client, attach_stdio_client\nfrom .process import start_server\nfrom .url import filename_to_uri\nfrom .logging import debug\nimport os\nfrom .protocol import completion_item_kinds, symbol_kinds\ntry:\n from typing import Callable, Dict, Any, Optional\n assert Callable and Dict and Any and Optional and Transport\nexcept ImportError:\n pass\n\n\ndef create_session(config: ClientConfig,\n project_path: str,\n env: dict,\n settings: Settings,\n on_pre_initialize: 'Optional[Callable[[Session], None]]' = None,\n on_post_initialize: 'Optional[Callable[[Session], None]]' = None,\n on_post_exit: 'Optional[Callable[[str], None]]' = None,\n bootstrap_client: 'Optional[Any]' = None) -> 'Optional[Session]':\n\n def with_client(client: Client) -> 'Session':\n return Session(\n config=config,\n project_path=project_path,\n client=client,\n on_pre_initialize=on_pre_initialize,\n on_post_initialize=on_post_initialize,\n on_post_exit=on_post_exit)\n\n session = None\n if config.binary_args:\n tcp_port = config.tcp_port\n server_args = config.binary_args\n\n if config.tcp_mode == \"host\":\n socket = start_tcp_listener(tcp_port or 0)\n tcp_port = socket.getsockname()[1]\n server_args = list(s.replace(\"{port}\", str(tcp_port)) for s in config.binary_args)\n\n process = start_server(server_args, project_path, env, settings.log_stderr)\n if process:\n if config.tcp_mode == \"host\":\n client_socket, address = socket.accept()\n transport = TCPTransport(client_socket) # type: Transport\n session = with_client(Client(transport, settings))\n elif tcp_port:\n transport = start_tcp_transport(tcp_port, config.tcp_host)\n if transport:\n session = with_client(Client(transport, settings))\n else:\n # try to terminate the process\n try:\n process.terminate()\n except Exception:\n pass\n else:\n session = with_client(attach_stdio_client(process, settings))\n else:\n if config.tcp_port:\n transport = start_tcp_transport(config.tcp_port)\n session = with_client(Client(transport, settings))\n elif bootstrap_client:\n session = with_client(bootstrap_client)\n else:\n debug(\"No way to start session\")\n return session\n\n\ndef get_initialize_params(project_path: str, config: ClientConfig) -> dict:\n initializeParams = {\n \"processId\": os.getpid(),\n \"rootUri\": filename_to_uri(project_path),\n \"rootPath\": project_path,\n \"capabilities\": {\n \"textDocument\": {\n \"synchronization\": {\n \"didSave\": True,\n \"willSaveWaitUntil\": True\n },\n \"hover\": {\n \"contentFormat\": [\"markdown\", \"plaintext\"]\n },\n \"completion\": {\n \"completionItem\": {\n \"snippetSupport\": True\n },\n \"completionItemKind\": {\n \"valueSet\": completion_item_kinds\n }\n },\n \"signatureHelp\": {\n \"signatureInformation\": {\n \"documentationFormat\": [\"markdown\", \"plaintext\"],\n \"parameterInformation\": {\n \"labelOffsetSupport\": True\n }\n }\n },\n \"references\": {},\n \"documentHighlight\": {},\n \"documentSymbol\": {\n \"symbolKind\": {\n \"valueSet\": symbol_kinds\n }\n },\n \"formatting\": {},\n \"rangeFormatting\": {},\n \"declaration\": {},\n \"definition\": {},\n \"typeDefinition\": {},\n \"implementation\": {},\n \"codeAction\": {\n \"codeActionLiteralSupport\": {\n \"codeActionKind\": {\n \"valueSet\": []\n }\n }\n },\n \"rename\": {},\n \"colorProvider\": {},\n \"publishDiagnostics\": {\n \"relatedInformation\": True\n }\n },\n \"workspace\": {\n \"applyEdit\": True,\n \"didChangeConfiguration\": {},\n \"executeCommand\": {},\n \"symbol\": {\n \"symbolKind\": {\n \"valueSet\": symbol_kinds\n }\n }\n }\n }\n }\n if config.init_options:\n initializeParams['initializationOptions'] = config.init_options\n\n return initializeParams\n\n\nclass Session(object):\n def __init__(self,\n config: ClientConfig,\n project_path: str,\n client: Client,\n on_pre_initialize: 'Optional[Callable[[Session], None]]' = None,\n on_post_initialize: 'Optional[Callable[[Session], None]]' = None,\n on_post_exit: 'Optional[Callable[[str], None]]' = None) -> None:\n self.config = config\n self.state = ClientStates.STARTING\n self._on_post_initialize = on_post_initialize\n self._on_post_exit = on_post_exit\n self.capabilities = dict() # type: Dict[str, Any]\n self.client = client\n if on_pre_initialize:\n on_pre_initialize(self)\n self._initialize(project_path)\n\n def has_capability(self, capability: str) -> bool:\n return capability in self.capabilities and self.capabilities[capability] is not False\n\n def get_capability(self, capability: str) -> 'Optional[Any]':\n return self.capabilities.get(capability)\n\n def _initialize(self, project_path: str) -> None:\n params = get_initialize_params(project_path, self.config)\n self.client.send_request(\n Request.initialize(params),\n lambda result: self._handle_initialize_result(result))\n\n def _handle_initialize_result(self, result: 'Any') -> None:\n self.state = ClientStates.READY\n self.capabilities = result.get('capabilities', dict())\n if self._on_post_initialize:\n self._on_post_initialize(self)\n\n def end(self) -> None:\n self.state = ClientStates.STOPPING\n self.client.send_request(\n Request.shutdown(),\n lambda result: self._handle_shutdown_result(),\n lambda error: self._handle_shutdown_result())\n\n def _handle_shutdown_result(self) -> None:\n self.client.exit()\n self.client = None # type: ignore\n self.capabilities = dict()\n if self._on_post_exit:\n self._on_post_exit(self.config.name)\n", "path": "plugin/core/sessions.py"}], "after_files": [{"content": "import sublime\n\nfrom .core.registry import LspTextCommand\nfrom .core.protocol import Request, Point\nfrom .core.documents import get_document_position, get_position, is_at_word\nfrom .core.url import uri_to_filename\nfrom .core.logging import debug\nfrom Default.history_list import get_jump_history_for_view\n\ntry:\n from typing import List, Dict, Optional, Any\n assert List and Dict and Optional and Any\nexcept ImportError:\n pass\n\n\nclass LspGotoCommand(LspTextCommand):\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.goto_kind = \"definition\"\n\n def is_enabled(self, event: 'Optional[dict]' = None) -> bool:\n if self.has_client_with_capability(self.goto_kind + \"Provider\"):\n return is_at_word(self.view, event)\n return False\n\n def run(self, edit: sublime.Edit, event: 'Optional[dict]' = None) -> None:\n client = self.client_with_capability(self.goto_kind + \"Provider\")\n if client:\n pos = get_position(self.view, event)\n document_position = get_document_position(self.view, pos)\n if document_position:\n request_type = getattr(Request, self.goto_kind)\n if not request_type:\n debug(\"unrecognized goto kind:\", self.goto_kind)\n return\n request = request_type(document_position)\n client.send_request(\n request, lambda response: self.handle_response(response, pos))\n\n def handle_response(self, response: 'Optional[Any]', position: int) -> None:\n window = sublime.active_window()\n if response:\n # Save to jump back history.\n get_jump_history_for_view(self.view).push_selection(self.view)\n # TODO: DocumentLink support.\n location = response if isinstance(response, dict) else response[0]\n if \"targetUri\" in location:\n # TODO: Do something clever with originSelectionRange and targetRange.\n file_path = uri_to_filename(location[\"targetUri\"])\n start = Point.from_lsp(location[\"targetSelectionRange\"][\"start\"])\n else:\n file_path = uri_to_filename(location[\"uri\"])\n start = Point.from_lsp(location[\"range\"][\"start\"])\n file_location = \"{}:{}:{}\".format(file_path, start.row + 1, start.col + 1)\n debug(\"opening location\", location)\n window.open_file(file_location, sublime.ENCODED_POSITION)\n # TODO: can add region here.\n else:\n sublime.status_message(\"Empty response from language server, \"\n \"reverting to Sublime's built-in Goto Definition\")\n window.run_command(\"goto_definition\")\n\n def want_event(self) -> bool:\n return True\n\n\nclass LspSymbolDefinitionCommand(LspGotoCommand):\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.goto_kind = \"definition\"\n\n\nclass LspSymbolTypeDefinitionCommand(LspGotoCommand):\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.goto_kind = \"typeDefinition\"\n\n\nclass LspSymbolDeclarationCommand(LspGotoCommand):\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.goto_kind = \"declaration\"\n\n\nclass LspSymbolImplementationCommand(LspGotoCommand):\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.goto_kind = \"implementation\"\n", "path": "plugin/goto.py"}, {"content": "from .types import ClientConfig, ClientStates, Settings\nfrom .protocol import Request\nfrom .transports import start_tcp_transport, start_tcp_listener, TCPTransport, Transport\nfrom .rpc import Client, attach_stdio_client\nfrom .process import start_server\nfrom .url import filename_to_uri\nfrom .logging import debug\nimport os\nfrom .protocol import completion_item_kinds, symbol_kinds\ntry:\n from typing import Callable, Dict, Any, Optional\n assert Callable and Dict and Any and Optional and Transport\nexcept ImportError:\n pass\n\n\ndef create_session(config: ClientConfig,\n project_path: str,\n env: dict,\n settings: Settings,\n on_pre_initialize: 'Optional[Callable[[Session], None]]' = None,\n on_post_initialize: 'Optional[Callable[[Session], None]]' = None,\n on_post_exit: 'Optional[Callable[[str], None]]' = None,\n bootstrap_client: 'Optional[Any]' = None) -> 'Optional[Session]':\n\n def with_client(client: Client) -> 'Session':\n return Session(\n config=config,\n project_path=project_path,\n client=client,\n on_pre_initialize=on_pre_initialize,\n on_post_initialize=on_post_initialize,\n on_post_exit=on_post_exit)\n\n session = None\n if config.binary_args:\n tcp_port = config.tcp_port\n server_args = config.binary_args\n\n if config.tcp_mode == \"host\":\n socket = start_tcp_listener(tcp_port or 0)\n tcp_port = socket.getsockname()[1]\n server_args = list(s.replace(\"{port}\", str(tcp_port)) for s in config.binary_args)\n\n process = start_server(server_args, project_path, env, settings.log_stderr)\n if process:\n if config.tcp_mode == \"host\":\n client_socket, address = socket.accept()\n transport = TCPTransport(client_socket) # type: Transport\n session = with_client(Client(transport, settings))\n elif tcp_port:\n transport = start_tcp_transport(tcp_port, config.tcp_host)\n if transport:\n session = with_client(Client(transport, settings))\n else:\n # try to terminate the process\n try:\n process.terminate()\n except Exception:\n pass\n else:\n session = with_client(attach_stdio_client(process, settings))\n else:\n if config.tcp_port:\n transport = start_tcp_transport(config.tcp_port)\n session = with_client(Client(transport, settings))\n elif bootstrap_client:\n session = with_client(bootstrap_client)\n else:\n debug(\"No way to start session\")\n return session\n\n\ndef get_initialize_params(project_path: str, config: ClientConfig) -> dict:\n initializeParams = {\n \"processId\": os.getpid(),\n \"rootUri\": filename_to_uri(project_path),\n \"rootPath\": project_path,\n \"capabilities\": {\n \"textDocument\": {\n \"synchronization\": {\n \"didSave\": True,\n \"willSaveWaitUntil\": True\n },\n \"hover\": {\n \"contentFormat\": [\"markdown\", \"plaintext\"]\n },\n \"completion\": {\n \"completionItem\": {\n \"snippetSupport\": True\n },\n \"completionItemKind\": {\n \"valueSet\": completion_item_kinds\n }\n },\n \"signatureHelp\": {\n \"signatureInformation\": {\n \"documentationFormat\": [\"markdown\", \"plaintext\"],\n \"parameterInformation\": {\n \"labelOffsetSupport\": True\n }\n }\n },\n \"references\": {},\n \"documentHighlight\": {},\n \"documentSymbol\": {\n \"symbolKind\": {\n \"valueSet\": symbol_kinds\n }\n },\n \"formatting\": {},\n \"rangeFormatting\": {},\n \"declaration\": {\"linkSupport\": True},\n \"definition\": {\"linkSupport\": True},\n \"typeDefinition\": {\"linkSupport\": True},\n \"implementation\": {\"linkSupport\": True},\n \"codeAction\": {\n \"codeActionLiteralSupport\": {\n \"codeActionKind\": {\n \"valueSet\": []\n }\n }\n },\n \"rename\": {},\n \"colorProvider\": {},\n \"publishDiagnostics\": {\n \"relatedInformation\": True\n }\n },\n \"workspace\": {\n \"applyEdit\": True,\n \"didChangeConfiguration\": {},\n \"executeCommand\": {},\n \"symbol\": {\n \"symbolKind\": {\n \"valueSet\": symbol_kinds\n }\n }\n }\n }\n }\n if config.init_options:\n initializeParams['initializationOptions'] = config.init_options\n\n return initializeParams\n\n\nclass Session(object):\n def __init__(self,\n config: ClientConfig,\n project_path: str,\n client: Client,\n on_pre_initialize: 'Optional[Callable[[Session], None]]' = None,\n on_post_initialize: 'Optional[Callable[[Session], None]]' = None,\n on_post_exit: 'Optional[Callable[[str], None]]' = None) -> None:\n self.config = config\n self.state = ClientStates.STARTING\n self._on_post_initialize = on_post_initialize\n self._on_post_exit = on_post_exit\n self.capabilities = dict() # type: Dict[str, Any]\n self.client = client\n if on_pre_initialize:\n on_pre_initialize(self)\n self._initialize(project_path)\n\n def has_capability(self, capability: str) -> bool:\n return capability in self.capabilities and self.capabilities[capability] is not False\n\n def get_capability(self, capability: str) -> 'Optional[Any]':\n return self.capabilities.get(capability)\n\n def _initialize(self, project_path: str) -> None:\n params = get_initialize_params(project_path, self.config)\n self.client.send_request(\n Request.initialize(params),\n lambda result: self._handle_initialize_result(result))\n\n def _handle_initialize_result(self, result: 'Any') -> None:\n self.state = ClientStates.READY\n self.capabilities = result.get('capabilities', dict())\n if self._on_post_initialize:\n self._on_post_initialize(self)\n\n def end(self) -> None:\n self.state = ClientStates.STOPPING\n self.client.send_request(\n Request.shutdown(),\n lambda result: self._handle_shutdown_result(),\n lambda error: self._handle_shutdown_result())\n\n def _handle_shutdown_result(self) -> None:\n self.client.exit()\n self.client = None # type: ignore\n self.capabilities = dict()\n if self._on_post_exit:\n self._on_post_exit(self.config.name)\n", "path": "plugin/core/sessions.py"}]} | 3,342 | 384 |
gh_patches_debug_19041 | rasdani/github-patches | git_diff | spacetelescope__jwql-530 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a 'conda env export' to logging module
We could add the output of `conda env export` to the beginning of log files so that the exact environment being used to run a script is logged.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jwql/utils/logging_functions.py`
Content:
```
1 """ Logging functions for the ``jwql`` automation platform.
2
3 This module provides decorators to log the execution of modules. Log
4 files are written to the ``logs/`` directory in the ``jwql`` central
5 storage area, named by module name and timestamp, e.g.
6 ``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log``
7
8
9 Authors
10 -------
11
12 - Catherine Martlin
13 - Alex Viana (WFC3 QL Version)
14 - Matthew Bourque
15
16 Use
17 ---
18
19 To log the execution of a module, use:
20 ::
21
22 import os
23 import logging
24
25 from jwql.logging.logging_functions import configure_logging
26 from jwql.logging.logging_functions import log_info
27 from jwql.logging.logging_functions import log_fail
28
29 @log_info
30 @log_fail
31 def my_main_function():
32 pass
33
34 if __name__ == '__main__':
35
36 module = os.path.basename(__file__).replace('.py', '')
37 configure_logging(module)
38
39 my_main_function()
40
41 Dependencies
42 ------------
43
44 The user must have a configuration file named ``config.json``
45 placed in the ``utils`` directory and it must contain keys for
46 ``log_dir`` and ``admin_account``.
47
48 References
49 ----------
50 This code is adopted and updated from python routine
51 ``logging_functions.py`` written by Alex Viana, 2013 for the WFC3
52 Quicklook automation platform.
53 """
54
55 import datetime
56 import getpass
57 import importlib
58 import logging
59 import os
60 import pwd
61 import socket
62 import sys
63 import time
64 import traceback
65
66 from functools import wraps
67
68 from jwql.utils.permissions import set_permissions
69 from jwql.utils.utils import get_config, ensure_dir_exists
70
71
72 def configure_logging(module):
73 """Configure the log file with a standard logging format.
74
75 Parameters
76 ----------
77 module : str
78 The name of the module being logged.
79 production_mode : bool
80 Whether or not the output should be written to the production
81 environement.
82 path : str
83 Where to write the log if user-supplied path; default to working dir.
84
85 Returns
86 -------
87 log_file : str
88 The path to the file where the log is written to.
89 """
90
91 # Determine log file location
92 log_file = make_log_file(module)
93
94 # Make sure no other root lhandlers exist before configuring the logger
95 for handler in logging.root.handlers[:]:
96 logging.root.removeHandler(handler)
97
98 # Create the log file and set the permissions
99 logging.basicConfig(filename=log_file,
100 format='%(asctime)s %(levelname)s: %(message)s',
101 datefmt='%m/%d/%Y %H:%M:%S %p',
102 level=logging.INFO)
103 print('Log file initialized to {}'.format(log_file))
104 set_permissions(log_file)
105
106 return log_file
107
108
109 def get_log_status(log_file):
110 """Returns the end status of the given ``log_file`` (i.e.
111 ``SUCCESS`` or ``FAILURE``)
112
113 Parameters
114 ----------
115 log_file : str
116 The path to the file where the log is written to
117
118 Returns
119 -------
120 status : bool
121 The status of the execution of the script described by the log
122 file (i.e. ``SUCCESS`` or ``FAILURE``)
123 """
124
125 with open(log_file, 'r') as f:
126 data = f.readlines()
127 last_line = data[-1].strip()
128
129 if 'Completed Successfully' in last_line:
130 return 'SUCCESS'
131 else:
132 return 'FAILURE'
133
134
135 def make_log_file(module):
136 """Create the log file name based on the module name.
137
138 The name of the ``log_file`` is a combination of the name of the
139 module being logged and the current datetime.
140
141 Parameters
142 ----------
143 module : str
144 The name of the module being logged.
145 production_mode : bool
146 Whether or not the output should be written to the production
147 environment.
148 path : str
149 Where to write the log if user-supplied path; default to
150 working dir.
151
152 Returns
153 -------
154 log_file : str
155 The full path to where the log file will be written to.
156 """
157
158 # Build filename
159 timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')
160 filename = '{0}_{1}.log'.format(module, timestamp)
161
162 # Determine save location
163 user = pwd.getpwuid(os.getuid()).pw_name
164 admin_account = get_config()['admin_account']
165 log_path = get_config()['log_dir']
166
167 # For production
168 if user == admin_account and socket.gethostname()[0] == 'p':
169 log_file = os.path.join(log_path, 'prod', module, filename)
170
171 # For test
172 elif user == admin_account and socket.gethostname()[0] == 't':
173 log_file = os.path.join(log_path, 'test', module, filename)
174
175 # For dev
176 elif user == admin_account and socket.gethostname()[0] == 'd':
177 log_file = os.path.join(log_path, 'dev', module, filename)
178
179 # For local (also write to dev)
180 else:
181 log_file = os.path.join(log_path, 'dev', module, filename)
182
183 # Make sure parent directory exists
184 ensure_dir_exists(os.path.dirname(log_file))
185
186 return log_file
187
188
189 def log_info(func):
190 """Decorator to log useful system information.
191
192 This function can be used as a decorator to log user environment
193 and system information. Future packages we want to track can be
194 added or removed as necessary.
195
196 Parameters
197 ----------
198 func : func
199 The function to decorate.
200
201 Returns
202 -------
203 wrapped : func
204 The wrapped function.
205 """
206
207 @wraps(func)
208 def wrapped(*args, **kwargs):
209
210 # Log environment information
211 logging.info('User: ' + getpass.getuser())
212 logging.info('System: ' + socket.gethostname())
213 logging.info('Python Version: ' + sys.version.replace('\n', ''))
214 logging.info('Python Executable Path: ' + sys.executable)
215
216 # Read in setup.py file to build list of required modules
217 with open(get_config()['setup_file']) as f:
218 data = f.readlines()
219
220 for i, line in enumerate(data):
221 if 'REQUIRES = [' in line:
222 begin = i + 1
223 elif 'setup(' in line:
224 end = i - 2
225 required_modules = data[begin:end]
226
227 # Clean up the module list
228 module_list = [item.strip().replace("'", "").replace(",", "").split("=")[0].split(">")[0].split("<")[0] for item in required_modules]
229
230 # Log common module version information
231 for module in module_list:
232 try:
233 mod = importlib.import_module(module)
234 logging.info(module + ' Version: ' + mod.__version__)
235 logging.info(module + ' Path: ' + mod.__path__[0])
236 except (ImportError, AttributeError) as err:
237 logging.warning(err)
238
239 logging.info('')
240
241 # Call the function and time it
242 t1_cpu = time.clock()
243 t1_time = time.time()
244 func(*args, **kwargs)
245 t2_cpu = time.clock()
246 t2_time = time.time()
247
248 # Log execution time
249 hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)
250 minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)
251 hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)
252 minutes_time, seconds_time = divmod(remainder_time, 60)
253 logging.info('Elapsed Real Time: {}:{}:{}'.format(int(hours_time), int(minutes_time), int(seconds_time)))
254 logging.info('Elapsed CPU Time: {}:{}:{}'.format(int(hours_cpu), int(minutes_cpu), int(seconds_cpu)))
255
256 return wrapped
257
258
259 def log_fail(func):
260 """Decorator to log crashes in the decorated code.
261
262 Parameters
263 ----------
264 func : func
265 The function to decorate.
266
267 Returns
268 -------
269 wrapped : func
270 The wrapped function.
271 """
272
273 @wraps(func)
274 def wrapped(*args, **kwargs):
275
276 try:
277
278 # Run the function
279 func(*args, **kwargs)
280 logging.info('Completed Successfully')
281
282 except Exception:
283 logging.critical(traceback.format_exc())
284 logging.critical('CRASHED')
285
286 return wrapped
287
288
289 def log_timing(func):
290 """Decorator to time a module or function within a code.
291
292 Parameters
293 ----------
294 func : func
295 The function to time.
296
297 Returns
298 -------
299 wrapped : func
300 The wrapped function. Will log the time."""
301
302 def wrapped(*args, **kwargs):
303
304 # Call the function and time it
305 t1_cpu = time.process_time()
306 t1_time = time.time()
307 func(*args, **kwargs)
308 t2_cpu = time.process_time()
309 t2_time = time.time()
310
311 # Log execution time
312 hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)
313 minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)
314 hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)
315 minutes_time, seconds_time = divmod(remainder_time, 60)
316 logging.info('Elapsed Real Time of {}: {}:{}:{}'.format(func.__name__, int(hours_time), int(minutes_time), int(seconds_time)))
317 logging.info('Elapsed CPU Time of {}: {}:{}:{}'.format(func.__name__, int(hours_cpu), int(minutes_cpu), int(seconds_cpu)))
318
319 return wrapped
320
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jwql/utils/logging_functions.py b/jwql/utils/logging_functions.py
--- a/jwql/utils/logging_functions.py
+++ b/jwql/utils/logging_functions.py
@@ -10,8 +10,9 @@
-------
- Catherine Martlin
- - Alex Viana (WFC3 QL Version)
+ - Alex Viana (wfc3ql Version)
- Matthew Bourque
+ - Jason Neal
Use
---
@@ -59,6 +60,7 @@
import os
import pwd
import socket
+import subprocess
import sys
import time
import traceback
@@ -236,7 +238,10 @@
except (ImportError, AttributeError) as err:
logging.warning(err)
- logging.info('')
+ environment = subprocess.check_output(['conda', 'env', 'export'], universal_newlines=True)
+ logging.info('Environment:')
+ for line in environment.split('\n'):
+ logging.info(line)
# Call the function and time it
t1_cpu = time.clock()
| {"golden_diff": "diff --git a/jwql/utils/logging_functions.py b/jwql/utils/logging_functions.py\n--- a/jwql/utils/logging_functions.py\n+++ b/jwql/utils/logging_functions.py\n@@ -10,8 +10,9 @@\n -------\n \n - Catherine Martlin\n- - Alex Viana (WFC3 QL Version)\n+ - Alex Viana (wfc3ql Version)\n - Matthew Bourque\n+ - Jason Neal\n \n Use\n ---\n@@ -59,6 +60,7 @@\n import os\n import pwd\n import socket\n+import subprocess\n import sys\n import time\n import traceback\n@@ -236,7 +238,10 @@\n except (ImportError, AttributeError) as err:\n logging.warning(err)\n \n- logging.info('')\n+ environment = subprocess.check_output(['conda', 'env', 'export'], universal_newlines=True)\n+ logging.info('Environment:')\n+ for line in environment.split('\\n'):\n+ logging.info(line)\n \n # Call the function and time it\n t1_cpu = time.clock()\n", "issue": "Add a 'conda env export' to logging module\nWe could add the output of `conda env export` to the beginning of log files so that the exact environment being used to run a script is logged. \n", "before_files": [{"content": "\"\"\" Logging functions for the ``jwql`` automation platform.\n\nThis module provides decorators to log the execution of modules. Log\nfiles are written to the ``logs/`` directory in the ``jwql`` central\nstorage area, named by module name and timestamp, e.g.\n``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log``\n\n\nAuthors\n-------\n\n - Catherine Martlin\n - Alex Viana (WFC3 QL Version)\n - Matthew Bourque\n\nUse\n---\n\n To log the execution of a module, use:\n ::\n\n import os\n import logging\n\n from jwql.logging.logging_functions import configure_logging\n from jwql.logging.logging_functions import log_info\n from jwql.logging.logging_functions import log_fail\n\n @log_info\n @log_fail\n def my_main_function():\n pass\n\n if __name__ == '__main__':\n\n module = os.path.basename(__file__).replace('.py', '')\n configure_logging(module)\n\n my_main_function()\n\nDependencies\n------------\n\n The user must have a configuration file named ``config.json``\n placed in the ``utils`` directory and it must contain keys for\n ``log_dir`` and ``admin_account``.\n\nReferences\n----------\n This code is adopted and updated from python routine\n ``logging_functions.py`` written by Alex Viana, 2013 for the WFC3\n Quicklook automation platform.\n\"\"\"\n\nimport datetime\nimport getpass\nimport importlib\nimport logging\nimport os\nimport pwd\nimport socket\nimport sys\nimport time\nimport traceback\n\nfrom functools import wraps\n\nfrom jwql.utils.permissions import set_permissions\nfrom jwql.utils.utils import get_config, ensure_dir_exists\n\n\ndef configure_logging(module):\n \"\"\"Configure the log file with a standard logging format.\n\n Parameters\n ----------\n module : str\n The name of the module being logged.\n production_mode : bool\n Whether or not the output should be written to the production\n environement.\n path : str\n Where to write the log if user-supplied path; default to working dir.\n\n Returns\n -------\n log_file : str\n The path to the file where the log is written to.\n \"\"\"\n\n # Determine log file location\n log_file = make_log_file(module)\n\n # Make sure no other root lhandlers exist before configuring the logger\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n # Create the log file and set the permissions\n logging.basicConfig(filename=log_file,\n format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S %p',\n level=logging.INFO)\n print('Log file initialized to {}'.format(log_file))\n set_permissions(log_file)\n\n return log_file\n\n\ndef get_log_status(log_file):\n \"\"\"Returns the end status of the given ``log_file`` (i.e.\n ``SUCCESS`` or ``FAILURE``)\n\n Parameters\n ----------\n log_file : str\n The path to the file where the log is written to\n\n Returns\n -------\n status : bool\n The status of the execution of the script described by the log\n file (i.e. ``SUCCESS`` or ``FAILURE``)\n \"\"\"\n\n with open(log_file, 'r') as f:\n data = f.readlines()\n last_line = data[-1].strip()\n\n if 'Completed Successfully' in last_line:\n return 'SUCCESS'\n else:\n return 'FAILURE'\n\n\ndef make_log_file(module):\n \"\"\"Create the log file name based on the module name.\n\n The name of the ``log_file`` is a combination of the name of the\n module being logged and the current datetime.\n\n Parameters\n ----------\n module : str\n The name of the module being logged.\n production_mode : bool\n Whether or not the output should be written to the production\n environment.\n path : str\n Where to write the log if user-supplied path; default to\n working dir.\n\n Returns\n -------\n log_file : str\n The full path to where the log file will be written to.\n \"\"\"\n\n # Build filename\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')\n filename = '{0}_{1}.log'.format(module, timestamp)\n\n # Determine save location\n user = pwd.getpwuid(os.getuid()).pw_name\n admin_account = get_config()['admin_account']\n log_path = get_config()['log_dir']\n\n # For production\n if user == admin_account and socket.gethostname()[0] == 'p':\n log_file = os.path.join(log_path, 'prod', module, filename)\n\n # For test\n elif user == admin_account and socket.gethostname()[0] == 't':\n log_file = os.path.join(log_path, 'test', module, filename)\n\n # For dev\n elif user == admin_account and socket.gethostname()[0] == 'd':\n log_file = os.path.join(log_path, 'dev', module, filename)\n\n # For local (also write to dev)\n else:\n log_file = os.path.join(log_path, 'dev', module, filename)\n\n # Make sure parent directory exists\n ensure_dir_exists(os.path.dirname(log_file))\n\n return log_file\n\n\ndef log_info(func):\n \"\"\"Decorator to log useful system information.\n\n This function can be used as a decorator to log user environment\n and system information. Future packages we want to track can be\n added or removed as necessary.\n\n Parameters\n ----------\n func : func\n The function to decorate.\n\n Returns\n -------\n wrapped : func\n The wrapped function.\n \"\"\"\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n\n # Log environment information\n logging.info('User: ' + getpass.getuser())\n logging.info('System: ' + socket.gethostname())\n logging.info('Python Version: ' + sys.version.replace('\\n', ''))\n logging.info('Python Executable Path: ' + sys.executable)\n\n # Read in setup.py file to build list of required modules\n with open(get_config()['setup_file']) as f:\n data = f.readlines()\n\n for i, line in enumerate(data):\n if 'REQUIRES = [' in line:\n begin = i + 1\n elif 'setup(' in line:\n end = i - 2\n required_modules = data[begin:end]\n\n # Clean up the module list\n module_list = [item.strip().replace(\"'\", \"\").replace(\",\", \"\").split(\"=\")[0].split(\">\")[0].split(\"<\")[0] for item in required_modules]\n\n # Log common module version information\n for module in module_list:\n try:\n mod = importlib.import_module(module)\n logging.info(module + ' Version: ' + mod.__version__)\n logging.info(module + ' Path: ' + mod.__path__[0])\n except (ImportError, AttributeError) as err:\n logging.warning(err)\n\n logging.info('')\n\n # Call the function and time it\n t1_cpu = time.clock()\n t1_time = time.time()\n func(*args, **kwargs)\n t2_cpu = time.clock()\n t2_time = time.time()\n\n # Log execution time\n hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)\n minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)\n hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)\n minutes_time, seconds_time = divmod(remainder_time, 60)\n logging.info('Elapsed Real Time: {}:{}:{}'.format(int(hours_time), int(minutes_time), int(seconds_time)))\n logging.info('Elapsed CPU Time: {}:{}:{}'.format(int(hours_cpu), int(minutes_cpu), int(seconds_cpu)))\n\n return wrapped\n\n\ndef log_fail(func):\n \"\"\"Decorator to log crashes in the decorated code.\n\n Parameters\n ----------\n func : func\n The function to decorate.\n\n Returns\n -------\n wrapped : func\n The wrapped function.\n \"\"\"\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n\n try:\n\n # Run the function\n func(*args, **kwargs)\n logging.info('Completed Successfully')\n\n except Exception:\n logging.critical(traceback.format_exc())\n logging.critical('CRASHED')\n\n return wrapped\n\n\ndef log_timing(func):\n \"\"\"Decorator to time a module or function within a code.\n\n Parameters\n ----------\n func : func\n The function to time.\n\n Returns\n -------\n wrapped : func\n The wrapped function. Will log the time.\"\"\"\n\n def wrapped(*args, **kwargs):\n\n # Call the function and time it\n t1_cpu = time.process_time()\n t1_time = time.time()\n func(*args, **kwargs)\n t2_cpu = time.process_time()\n t2_time = time.time()\n\n # Log execution time\n hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)\n minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)\n hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)\n minutes_time, seconds_time = divmod(remainder_time, 60)\n logging.info('Elapsed Real Time of {}: {}:{}:{}'.format(func.__name__, int(hours_time), int(minutes_time), int(seconds_time)))\n logging.info('Elapsed CPU Time of {}: {}:{}:{}'.format(func.__name__, int(hours_cpu), int(minutes_cpu), int(seconds_cpu)))\n\n return wrapped\n", "path": "jwql/utils/logging_functions.py"}], "after_files": [{"content": "\"\"\" Logging functions for the ``jwql`` automation platform.\n\nThis module provides decorators to log the execution of modules. Log\nfiles are written to the ``logs/`` directory in the ``jwql`` central\nstorage area, named by module name and timestamp, e.g.\n``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log``\n\n\nAuthors\n-------\n\n - Catherine Martlin\n - Alex Viana (wfc3ql Version)\n - Matthew Bourque\n - Jason Neal\n\nUse\n---\n\n To log the execution of a module, use:\n ::\n\n import os\n import logging\n\n from jwql.logging.logging_functions import configure_logging\n from jwql.logging.logging_functions import log_info\n from jwql.logging.logging_functions import log_fail\n\n @log_info\n @log_fail\n def my_main_function():\n pass\n\n if __name__ == '__main__':\n\n module = os.path.basename(__file__).replace('.py', '')\n configure_logging(module)\n\n my_main_function()\n\nDependencies\n------------\n\n The user must have a configuration file named ``config.json``\n placed in the ``utils`` directory and it must contain keys for\n ``log_dir`` and ``admin_account``.\n\nReferences\n----------\n This code is adopted and updated from python routine\n ``logging_functions.py`` written by Alex Viana, 2013 for the WFC3\n Quicklook automation platform.\n\"\"\"\n\nimport datetime\nimport getpass\nimport importlib\nimport logging\nimport os\nimport pwd\nimport socket\nimport subprocess\nimport sys\nimport time\nimport traceback\n\nfrom functools import wraps\n\nfrom jwql.utils.permissions import set_permissions\nfrom jwql.utils.utils import get_config, ensure_dir_exists\n\n\ndef configure_logging(module):\n \"\"\"Configure the log file with a standard logging format.\n\n Parameters\n ----------\n module : str\n The name of the module being logged.\n production_mode : bool\n Whether or not the output should be written to the production\n environement.\n path : str\n Where to write the log if user-supplied path; default to working dir.\n\n Returns\n -------\n log_file : str\n The path to the file where the log is written to.\n \"\"\"\n\n # Determine log file location\n log_file = make_log_file(module)\n\n # Make sure no other root lhandlers exist before configuring the logger\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n # Create the log file and set the permissions\n logging.basicConfig(filename=log_file,\n format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S %p',\n level=logging.INFO)\n print('Log file initialized to {}'.format(log_file))\n set_permissions(log_file)\n\n return log_file\n\n\ndef get_log_status(log_file):\n \"\"\"Returns the end status of the given ``log_file`` (i.e.\n ``SUCCESS`` or ``FAILURE``)\n\n Parameters\n ----------\n log_file : str\n The path to the file where the log is written to\n\n Returns\n -------\n status : bool\n The status of the execution of the script described by the log\n file (i.e. ``SUCCESS`` or ``FAILURE``)\n \"\"\"\n\n with open(log_file, 'r') as f:\n data = f.readlines()\n last_line = data[-1].strip()\n\n if 'Completed Successfully' in last_line:\n return 'SUCCESS'\n else:\n return 'FAILURE'\n\n\ndef make_log_file(module):\n \"\"\"Create the log file name based on the module name.\n\n The name of the ``log_file`` is a combination of the name of the\n module being logged and the current datetime.\n\n Parameters\n ----------\n module : str\n The name of the module being logged.\n production_mode : bool\n Whether or not the output should be written to the production\n environment.\n path : str\n Where to write the log if user-supplied path; default to\n working dir.\n\n Returns\n -------\n log_file : str\n The full path to where the log file will be written to.\n \"\"\"\n\n # Build filename\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')\n filename = '{0}_{1}.log'.format(module, timestamp)\n\n # Determine save location\n user = pwd.getpwuid(os.getuid()).pw_name\n admin_account = get_config()['admin_account']\n log_path = get_config()['log_dir']\n\n # For production\n if user == admin_account and socket.gethostname()[0] == 'p':\n log_file = os.path.join(log_path, 'prod', module, filename)\n\n # For test\n elif user == admin_account and socket.gethostname()[0] == 't':\n log_file = os.path.join(log_path, 'test', module, filename)\n\n # For dev\n elif user == admin_account and socket.gethostname()[0] == 'd':\n log_file = os.path.join(log_path, 'dev', module, filename)\n\n # For local (also write to dev)\n else:\n log_file = os.path.join(log_path, 'dev', module, filename)\n\n # Make sure parent directory exists\n ensure_dir_exists(os.path.dirname(log_file))\n\n return log_file\n\n\ndef log_info(func):\n \"\"\"Decorator to log useful system information.\n\n This function can be used as a decorator to log user environment\n and system information. Future packages we want to track can be\n added or removed as necessary.\n\n Parameters\n ----------\n func : func\n The function to decorate.\n\n Returns\n -------\n wrapped : func\n The wrapped function.\n \"\"\"\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n\n # Log environment information\n logging.info('User: ' + getpass.getuser())\n logging.info('System: ' + socket.gethostname())\n logging.info('Python Version: ' + sys.version.replace('\\n', ''))\n logging.info('Python Executable Path: ' + sys.executable)\n\n # Read in setup.py file to build list of required modules\n with open(get_config()['setup_file']) as f:\n data = f.readlines()\n\n for i, line in enumerate(data):\n if 'REQUIRES = [' in line:\n begin = i + 1\n elif 'setup(' in line:\n end = i - 2\n required_modules = data[begin:end]\n\n # Clean up the module list\n module_list = [item.strip().replace(\"'\", \"\").replace(\",\", \"\").split(\"=\")[0].split(\">\")[0].split(\"<\")[0] for item in required_modules]\n\n # Log common module version information\n for module in module_list:\n try:\n mod = importlib.import_module(module)\n logging.info(module + ' Version: ' + mod.__version__)\n logging.info(module + ' Path: ' + mod.__path__[0])\n except (ImportError, AttributeError) as err:\n logging.warning(err)\n\n environment = subprocess.check_output(['conda', 'env', 'export'], universal_newlines=True)\n logging.info('Environment:')\n for line in environment.split('\\n'):\n logging.info(line)\n\n # Call the function and time it\n t1_cpu = time.clock()\n t1_time = time.time()\n func(*args, **kwargs)\n t2_cpu = time.clock()\n t2_time = time.time()\n\n # Log execution time\n hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)\n minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)\n hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)\n minutes_time, seconds_time = divmod(remainder_time, 60)\n logging.info('Elapsed Real Time: {}:{}:{}'.format(int(hours_time), int(minutes_time), int(seconds_time)))\n logging.info('Elapsed CPU Time: {}:{}:{}'.format(int(hours_cpu), int(minutes_cpu), int(seconds_cpu)))\n\n return wrapped\n\n\ndef log_fail(func):\n \"\"\"Decorator to log crashes in the decorated code.\n\n Parameters\n ----------\n func : func\n The function to decorate.\n\n Returns\n -------\n wrapped : func\n The wrapped function.\n \"\"\"\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n\n try:\n\n # Run the function\n func(*args, **kwargs)\n logging.info('Completed Successfully')\n\n except Exception:\n logging.critical(traceback.format_exc())\n logging.critical('CRASHED')\n\n return wrapped\n\n\ndef log_timing(func):\n \"\"\"Decorator to time a module or function within a code.\n\n Parameters\n ----------\n func : func\n The function to time.\n\n Returns\n -------\n wrapped : func\n The wrapped function. Will log the time.\"\"\"\n\n def wrapped(*args, **kwargs):\n\n # Call the function and time it\n t1_cpu = time.process_time()\n t1_time = time.time()\n func(*args, **kwargs)\n t2_cpu = time.process_time()\n t2_time = time.time()\n\n # Log execution time\n hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)\n minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)\n hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)\n minutes_time, seconds_time = divmod(remainder_time, 60)\n logging.info('Elapsed Real Time of {}: {}:{}:{}'.format(func.__name__, int(hours_time), int(minutes_time), int(seconds_time)))\n logging.info('Elapsed CPU Time of {}: {}:{}:{}'.format(func.__name__, int(hours_cpu), int(minutes_cpu), int(seconds_cpu)))\n\n return wrapped\n", "path": "jwql/utils/logging_functions.py"}]} | 3,316 | 235 |
gh_patches_debug_140 | rasdani/github-patches | git_diff | d2l-ai__d2l-en-2078 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[MXNet] matplotlib >=3.5 raises TypeError with ax.plot_wireframe in MXNet ndarray
With the latest version of matplotlib, multiple notebooks fail with a type error in mxnet (mxnet==1.7.0 & CUDA 10.2). Some of the affected sections include [optimization intro](https://d2l.ai/chapter_optimization/optimization-intro.html), [integral calculus](https://d2l.ai/chapter_appendix-mathematics-for-deep-learning/integral-calculus.html), [multivariable calculus](https://d2l.ai/chapter_appendix-mathematics-for-deep-learning/multivariable-calculus.html) etc.
```
TypeError: no implementation found for 'numpy.column_stack' on types that implement __array_function__: [<class 'mxnet.numpy.ndarray'>, <class 'numpy.ndarray'>]
```
Please see attached traceback and reproduction instructions below.
Steps to reproduce the issue.
1. Setup the d2l environment (using `static/build.yml`)
2. While setting up the environment, it will automatically install the latest version of matplotlib (i.e. `matplotlib==3.5.1` as of today).
Run one of the notebooks which is affected (mentioned above)
<details>
<summary>Click to expand: Error Traceback</summary>
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Input In [7], in <module>
9 # Plot function
10 ax = d2l.plt.figure().add_subplot(111, projection='3d')
---> 11 ax.plot_wireframe(x, y, z, **{'rstride': 10, 'cstride': 10})
12 ax.plot_wireframe(x, y, w, **{'rstride': 10, 'cstride': 10}, color='purple')
13 d2l.plt.xlabel('x')
File ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/matplotlib/_api/deprecation.py:412, in delete_parameter.<locals>.wrapper(*inner_args, **inner_kwargs)
402 deprecation_addendum = (
403 f"If any parameter follows {name!r}, they should be passed as "
404 f"keyword, not positionally.")
405 warn_deprecated(
406 since,
407 name=repr(name),
(...)
410 else deprecation_addendum,
411 **kwargs)
--> 412 return func(*inner_args, **inner_kwargs)
File ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/mpl_toolkits/mplot3d/axes3d.py:1908, in Axes3D.plot_wireframe(self, X, Y, Z, *args, **kwargs)
1906 linec = art3d.Line3DCollection(lines, *args, **kwargs)
1907 self.add_collection(linec)
-> 1908 self.auto_scale_xyz(X, Y, Z, had_data)
1910 return linec
File ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/mpl_toolkits/mplot3d/axes3d.py:658, in Axes3D.auto_scale_xyz(self, X, Y, Z, had_data)
656 self.xy_dataLim.update_from_data_y(Y, not had_data)
657 if Z is not None:
--> 658 self.zz_dataLim.update_from_data_x(Z, not had_data)
659 # Let autoscale_view figure out how to use this data.
660 self.autoscale_view()
File ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/matplotlib/transforms.py:922, in Bbox.update_from_data_x(self, x, ignore)
906 """
907 Update the x-bounds of the `Bbox` based on the passed in data. After
908 updating, the bounds will have positive *width*, and *x0* will be the
(...)
919 - When ``None``, use the last value passed to :meth:`ignore`.
920 """
921 x = np.ravel(x)
--> 922 self.update_from_data_xy(np.column_stack([x, np.ones(x.size)]),
923 ignore=ignore, updatey=False)
File <__array_function__ internals>:180, in column_stack(*args, **kwargs)
TypeError: no implementation found for 'numpy.column_stack' on types that implement __array_function__: [<class 'mxnet.numpy.ndarray'>, <class 'numpy.ndarray'>]
```
</details>
This is another issue validating the need of #2044.
A simple solution for now is to pin the matplotlib version to 1.4. I'll send a PR for this.
cc @astonzhang
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 import d2l
3
4 requirements = [
5 'jupyter',
6 'numpy',
7 'matplotlib==3.4',
8 'requests',
9 'pandas',
10 'gym'
11 ]
12
13 setup(
14 name='d2l',
15 version=d2l.__version__,
16 python_requires='>=3.5',
17 author='D2L Developers',
18 author_email='[email protected]',
19 url='https://d2l.ai',
20 description='Dive into Deep Learning',
21 license='MIT-0',
22 packages=find_packages(),
23 zip_safe=True,
24 install_requires=requirements,
25 )
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@
requirements = [
'jupyter',
'numpy',
- 'matplotlib==3.4',
+ 'matplotlib',
'requests',
'pandas',
'gym'
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,7 +4,7 @@\n requirements = [\n 'jupyter',\n 'numpy',\n- 'matplotlib==3.4',\n+ 'matplotlib',\n 'requests',\n 'pandas',\n 'gym'\n", "issue": "[MXNet] matplotlib >=3.5 raises TypeError with ax.plot_wireframe in MXNet ndarray\nWith the latest version of matplotlib, multiple notebooks fail with a type error in mxnet (mxnet==1.7.0 & CUDA 10.2). Some of the affected sections include [optimization intro](https://d2l.ai/chapter_optimization/optimization-intro.html), [integral calculus](https://d2l.ai/chapter_appendix-mathematics-for-deep-learning/integral-calculus.html), [multivariable calculus](https://d2l.ai/chapter_appendix-mathematics-for-deep-learning/multivariable-calculus.html) etc.\r\n\r\n```\r\nTypeError: no implementation found for 'numpy.column_stack' on types that implement __array_function__: [<class 'mxnet.numpy.ndarray'>, <class 'numpy.ndarray'>]\r\n```\r\n\r\nPlease see attached traceback and reproduction instructions below.\r\n\r\nSteps to reproduce the issue.\r\n\r\n1. Setup the d2l environment (using `static/build.yml`)\r\n2. While setting up the environment, it will automatically install the latest version of matplotlib (i.e. `matplotlib==3.5.1` as of today). \r\n\r\nRun one of the notebooks which is affected (mentioned above) \r\n\r\n<details>\r\n <summary>Click to expand: Error Traceback</summary>\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\nInput In [7], in <module>\r\n 9 # Plot function\r\n 10 ax = d2l.plt.figure().add_subplot(111, projection='3d')\r\n---> 11 ax.plot_wireframe(x, y, z, **{'rstride': 10, 'cstride': 10})\r\n 12 ax.plot_wireframe(x, y, w, **{'rstride': 10, 'cstride': 10}, color='purple')\r\n 13 d2l.plt.xlabel('x')\r\n\r\nFile ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/matplotlib/_api/deprecation.py:412, in delete_parameter.<locals>.wrapper(*inner_args, **inner_kwargs)\r\n 402 deprecation_addendum = (\r\n 403 f\"If any parameter follows {name!r}, they should be passed as \"\r\n 404 f\"keyword, not positionally.\")\r\n 405 warn_deprecated(\r\n 406 since,\r\n 407 name=repr(name),\r\n (...)\r\n 410 else deprecation_addendum,\r\n 411 **kwargs)\r\n--> 412 return func(*inner_args, **inner_kwargs)\r\n\r\nFile ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/mpl_toolkits/mplot3d/axes3d.py:1908, in Axes3D.plot_wireframe(self, X, Y, Z, *args, **kwargs)\r\n 1906 linec = art3d.Line3DCollection(lines, *args, **kwargs)\r\n 1907 self.add_collection(linec)\r\n-> 1908 self.auto_scale_xyz(X, Y, Z, had_data)\r\n 1910 return linec\r\n\r\nFile ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/mpl_toolkits/mplot3d/axes3d.py:658, in Axes3D.auto_scale_xyz(self, X, Y, Z, had_data)\r\n 656 self.xy_dataLim.update_from_data_y(Y, not had_data)\r\n 657 if Z is not None:\r\n--> 658 self.zz_dataLim.update_from_data_x(Z, not had_data)\r\n 659 # Let autoscale_view figure out how to use this data.\r\n 660 self.autoscale_view()\r\n\r\nFile ~/miniconda3/envs/mpl_d2l/lib/python3.8/site-packages/matplotlib/transforms.py:922, in Bbox.update_from_data_x(self, x, ignore)\r\n 906 \"\"\"\r\n 907 Update the x-bounds of the `Bbox` based on the passed in data. After\r\n 908 updating, the bounds will have positive *width*, and *x0* will be the\r\n (...)\r\n 919 - When ``None``, use the last value passed to :meth:`ignore`.\r\n 920 \"\"\"\r\n 921 x = np.ravel(x)\r\n--> 922 self.update_from_data_xy(np.column_stack([x, np.ones(x.size)]),\r\n 923 ignore=ignore, updatey=False)\r\n\r\nFile <__array_function__ internals>:180, in column_stack(*args, **kwargs)\r\n\r\nTypeError: no implementation found for 'numpy.column_stack' on types that implement __array_function__: [<class 'mxnet.numpy.ndarray'>, <class 'numpy.ndarray'>]\r\n```\r\n\r\n</details>\r\n\r\nThis is another issue validating the need of #2044.\r\n\r\nA simple solution for now is to pin the matplotlib version to 1.4. I'll send a PR for this.\r\n\r\ncc @astonzhang \n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'jupyter',\n 'numpy',\n 'matplotlib==3.4',\n 'requests',\n 'pandas',\n 'gym'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='[email protected]',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nimport d2l\n\nrequirements = [\n 'jupyter',\n 'numpy',\n 'matplotlib',\n 'requests',\n 'pandas',\n 'gym'\n]\n\nsetup(\n name='d2l',\n version=d2l.__version__,\n python_requires='>=3.5',\n author='D2L Developers',\n author_email='[email protected]',\n url='https://d2l.ai',\n description='Dive into Deep Learning',\n license='MIT-0',\n packages=find_packages(),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py"}]} | 1,558 | 70 |
gh_patches_debug_23097 | rasdani/github-patches | git_diff | CTFd__CTFd-428 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Configuration can be auto-filled potentially causing issues
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/admin/__init__.py`
Content:
```
1 import hashlib
2 import json
3 import os
4 import datetime
5
6 from flask import current_app as app, render_template, request, redirect, jsonify, url_for, Blueprint, \
7 abort, render_template_string, send_file
8 from passlib.hash import bcrypt_sha256
9 from sqlalchemy.sql import not_
10 from sqlalchemy.exc import IntegrityError
11
12 from CTFd.utils import admins_only, is_admin, cache, export_ctf, import_ctf
13 from CTFd.models import db, Teams, Solves, Awards, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config, DatabaseError
14 from CTFd.plugins.keys import get_key_class, KEY_CLASSES
15
16 from CTFd.admin.statistics import admin_statistics
17 from CTFd.admin.challenges import admin_challenges
18 from CTFd.admin.scoreboard import admin_scoreboard
19 from CTFd.admin.pages import admin_pages
20 from CTFd.admin.keys import admin_keys
21 from CTFd.admin.teams import admin_teams
22
23 from CTFd import utils
24
25
26 admin = Blueprint('admin', __name__)
27
28
29 @admin.route('/admin', methods=['GET'])
30 def admin_view():
31 if is_admin():
32 return redirect(url_for('admin_statistics.admin_graphs'))
33
34 return redirect(url_for('auth.login'))
35
36
37 @admin.route('/admin/plugins/<plugin>', methods=['GET', 'POST'])
38 @admins_only
39 def admin_plugin_config(plugin):
40 if request.method == 'GET':
41 plugins_path = os.path.join(app.root_path, 'plugins')
42
43 config_html_plugins = [name for name in os.listdir(plugins_path)
44 if os.path.isfile(os.path.join(plugins_path, name, 'config.html'))]
45
46 if plugin in config_html_plugins:
47 config = open(os.path.join(app.root_path, 'plugins', plugin, 'config.html')).read()
48 return render_template_string(config)
49 abort(404)
50 elif request.method == 'POST':
51 for k, v in request.form.items():
52 if k == "nonce":
53 continue
54 utils.set_config(k, v)
55 with app.app_context():
56 cache.clear()
57 return '1'
58
59
60 @admin.route('/admin/import', methods=['GET', 'POST'])
61 @admins_only
62 def admin_import_ctf():
63 backup = request.files['backup']
64 segments = request.form.get('segments')
65 errors = []
66 try:
67 if segments:
68 import_ctf(backup, segments=segments.split(','))
69 else:
70 import_ctf(backup)
71 except Exception as e:
72 print(e)
73 errors.append(type(e).__name__)
74
75 if errors:
76 return errors[0], 500
77 else:
78 return redirect(url_for('admin.admin_config'))
79
80
81 @admin.route('/admin/export', methods=['GET', 'POST'])
82 @admins_only
83 def admin_export_ctf():
84 segments = request.args.get('segments')
85 if segments:
86 backup = export_ctf(segments.split(','))
87 else:
88 backup = export_ctf()
89 ctf_name = utils.ctf_name()
90 day = datetime.datetime.now().strftime("%Y-%m-%d")
91 full_name = "{}.{}.zip".format(ctf_name, day)
92 return send_file(backup, as_attachment=True, attachment_filename=full_name)
93
94
95 @admin.route('/admin/config', methods=['GET', 'POST'])
96 @admins_only
97 def admin_config():
98 if request.method == "POST":
99 start = None
100 end = None
101 freeze = None
102 if request.form.get('start'):
103 start = int(request.form['start'])
104 if request.form.get('end'):
105 end = int(request.form['end'])
106 if request.form.get('freeze'):
107 freeze = int(request.form['freeze'])
108
109 try:
110 view_challenges_unregistered = bool(request.form.get('view_challenges_unregistered', None))
111 view_scoreboard_if_authed = bool(request.form.get('view_scoreboard_if_authed', None))
112 hide_scores = bool(request.form.get('hide_scores', None))
113 prevent_registration = bool(request.form.get('prevent_registration', None))
114 prevent_name_change = bool(request.form.get('prevent_name_change', None))
115 view_after_ctf = bool(request.form.get('view_after_ctf', None))
116 verify_emails = bool(request.form.get('verify_emails', None))
117 mail_tls = bool(request.form.get('mail_tls', None))
118 mail_ssl = bool(request.form.get('mail_ssl', None))
119 mail_useauth = bool(request.form.get('mail_useauth', None))
120 except (ValueError, TypeError):
121 view_challenges_unregistered = None
122 view_scoreboard_if_authed = None
123 hide_scores = None
124 prevent_registration = None
125 prevent_name_change = None
126 view_after_ctf = None
127 verify_emails = None
128 mail_tls = None
129 mail_ssl = None
130 mail_useauth = None
131 finally:
132 view_challenges_unregistered = utils.set_config('view_challenges_unregistered', view_challenges_unregistered)
133 view_scoreboard_if_authed = utils.set_config('view_scoreboard_if_authed', view_scoreboard_if_authed)
134 hide_scores = utils.set_config('hide_scores', hide_scores)
135 prevent_registration = utils.set_config('prevent_registration', prevent_registration)
136 prevent_name_change = utils.set_config('prevent_name_change', prevent_name_change)
137 view_after_ctf = utils.set_config('view_after_ctf', view_after_ctf)
138 verify_emails = utils.set_config('verify_emails', verify_emails)
139 mail_tls = utils.set_config('mail_tls', mail_tls)
140 mail_ssl = utils.set_config('mail_ssl', mail_ssl)
141 mail_useauth = utils.set_config('mail_useauth', mail_useauth)
142
143 mail_server = utils.set_config("mail_server", request.form.get('mail_server', None))
144 mail_port = utils.set_config("mail_port", request.form.get('mail_port', None))
145
146 mail_username = utils.set_config("mail_username", request.form.get('mail_username', None))
147 mail_password = utils.set_config("mail_password", request.form.get('mail_password', None))
148
149 ctf_name = utils.set_config("ctf_name", request.form.get('ctf_name', None))
150 ctf_theme = utils.set_config("ctf_theme", request.form.get('ctf_theme', None))
151
152 mailfrom_addr = utils.set_config("mailfrom_addr", request.form.get('mailfrom_addr', None))
153 mg_base_url = utils.set_config("mg_base_url", request.form.get('mg_base_url', None))
154 mg_api_key = utils.set_config("mg_api_key", request.form.get('mg_api_key', None))
155
156 db_freeze = utils.set_config("freeze", freeze)
157
158 db_start = Config.query.filter_by(key='start').first()
159 db_start.value = start
160
161 db_end = Config.query.filter_by(key='end').first()
162 db_end.value = end
163
164 db.session.add(db_start)
165 db.session.add(db_end)
166
167 db.session.commit()
168 db.session.close()
169 with app.app_context():
170 cache.clear()
171 return redirect(url_for('admin.admin_config'))
172
173 with app.app_context():
174 cache.clear()
175 ctf_name = utils.get_config('ctf_name')
176 ctf_theme = utils.get_config('ctf_theme')
177 hide_scores = utils.get_config('hide_scores')
178
179 mail_server = utils.get_config('mail_server')
180 mail_port = utils.get_config('mail_port')
181 mail_username = utils.get_config('mail_username')
182 mail_password = utils.get_config('mail_password')
183
184 mailfrom_addr = utils.get_config('mailfrom_addr')
185 mg_api_key = utils.get_config('mg_api_key')
186 mg_base_url = utils.get_config('mg_base_url')
187
188 view_after_ctf = utils.get_config('view_after_ctf')
189 start = utils.get_config('start')
190 end = utils.get_config('end')
191 freeze = utils.get_config('freeze')
192
193 mail_tls = utils.get_config('mail_tls')
194 mail_ssl = utils.get_config('mail_ssl')
195 mail_useauth = utils.get_config('mail_useauth')
196
197 view_challenges_unregistered = utils.get_config('view_challenges_unregistered')
198 view_scoreboard_if_authed = utils.get_config('view_scoreboard_if_authed')
199 prevent_registration = utils.get_config('prevent_registration')
200 prevent_name_change = utils.get_config('prevent_name_change')
201 verify_emails = utils.get_config('verify_emails')
202
203 db.session.commit()
204 db.session.close()
205
206 themes = utils.get_themes()
207 themes.remove(ctf_theme)
208
209 return render_template('admin/config.html',
210 ctf_name=ctf_name,
211 ctf_theme_config=ctf_theme,
212 start=start,
213 end=end,
214 freeze=freeze,
215 hide_scores=hide_scores,
216 mail_server=mail_server,
217 mail_port=mail_port,
218 mail_useauth=mail_useauth,
219 mail_username=mail_username,
220 mail_password=mail_password,
221 mail_tls=mail_tls,
222 mail_ssl=mail_ssl,
223 view_challenges_unregistered=view_challenges_unregistered,
224 view_scoreboard_if_authed=view_scoreboard_if_authed,
225 prevent_registration=prevent_registration,
226 mailfrom_addr=mailfrom_addr,
227 mg_base_url=mg_base_url,
228 mg_api_key=mg_api_key,
229 prevent_name_change=prevent_name_change,
230 verify_emails=verify_emails,
231 view_after_ctf=view_after_ctf,
232 themes=themes)
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/admin/__init__.py b/CTFd/admin/__init__.py
--- a/CTFd/admin/__init__.py
+++ b/CTFd/admin/__init__.py
@@ -143,8 +143,15 @@
mail_server = utils.set_config("mail_server", request.form.get('mail_server', None))
mail_port = utils.set_config("mail_port", request.form.get('mail_port', None))
- mail_username = utils.set_config("mail_username", request.form.get('mail_username', None))
- mail_password = utils.set_config("mail_password", request.form.get('mail_password', None))
+ if request.form.get('mail_useauth', None) and (request.form.get('mail_u', None) or request.form.get('mail_p', None)):
+ if len(request.form.get('mail_u')) > 0:
+ mail_username = utils.set_config("mail_username", request.form.get('mail_u', None))
+ if len(request.form.get('mail_p')) > 0:
+ mail_password = utils.set_config("mail_password", request.form.get('mail_p', None))
+
+ elif request.form.get('mail_useauth', None) is None:
+ utils.set_config("mail_username", None)
+ utils.set_config("mail_password", None)
ctf_name = utils.set_config("ctf_name", request.form.get('ctf_name', None))
ctf_theme = utils.set_config("ctf_theme", request.form.get('ctf_theme', None))
| {"golden_diff": "diff --git a/CTFd/admin/__init__.py b/CTFd/admin/__init__.py\n--- a/CTFd/admin/__init__.py\n+++ b/CTFd/admin/__init__.py\n@@ -143,8 +143,15 @@\n mail_server = utils.set_config(\"mail_server\", request.form.get('mail_server', None))\n mail_port = utils.set_config(\"mail_port\", request.form.get('mail_port', None))\n \n- mail_username = utils.set_config(\"mail_username\", request.form.get('mail_username', None))\n- mail_password = utils.set_config(\"mail_password\", request.form.get('mail_password', None))\n+ if request.form.get('mail_useauth', None) and (request.form.get('mail_u', None) or request.form.get('mail_p', None)):\n+ if len(request.form.get('mail_u')) > 0:\n+ mail_username = utils.set_config(\"mail_username\", request.form.get('mail_u', None))\n+ if len(request.form.get('mail_p')) > 0:\n+ mail_password = utils.set_config(\"mail_password\", request.form.get('mail_p', None))\n+\n+ elif request.form.get('mail_useauth', None) is None:\n+ utils.set_config(\"mail_username\", None)\n+ utils.set_config(\"mail_password\", None)\n \n ctf_name = utils.set_config(\"ctf_name\", request.form.get('ctf_name', None))\n ctf_theme = utils.set_config(\"ctf_theme\", request.form.get('ctf_theme', None))\n", "issue": "Configuration can be auto-filled potentially causing issues\n\n", "before_files": [{"content": "import hashlib\nimport json\nimport os\nimport datetime\n\nfrom flask import current_app as app, render_template, request, redirect, jsonify, url_for, Blueprint, \\\n abort, render_template_string, send_file\nfrom passlib.hash import bcrypt_sha256\nfrom sqlalchemy.sql import not_\nfrom sqlalchemy.exc import IntegrityError\n\nfrom CTFd.utils import admins_only, is_admin, cache, export_ctf, import_ctf\nfrom CTFd.models import db, Teams, Solves, Awards, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config, DatabaseError\nfrom CTFd.plugins.keys import get_key_class, KEY_CLASSES\n\nfrom CTFd.admin.statistics import admin_statistics\nfrom CTFd.admin.challenges import admin_challenges\nfrom CTFd.admin.scoreboard import admin_scoreboard\nfrom CTFd.admin.pages import admin_pages\nfrom CTFd.admin.keys import admin_keys\nfrom CTFd.admin.teams import admin_teams\n\nfrom CTFd import utils\n\n\nadmin = Blueprint('admin', __name__)\n\n\[email protected]('/admin', methods=['GET'])\ndef admin_view():\n if is_admin():\n return redirect(url_for('admin_statistics.admin_graphs'))\n\n return redirect(url_for('auth.login'))\n\n\[email protected]('/admin/plugins/<plugin>', methods=['GET', 'POST'])\n@admins_only\ndef admin_plugin_config(plugin):\n if request.method == 'GET':\n plugins_path = os.path.join(app.root_path, 'plugins')\n\n config_html_plugins = [name for name in os.listdir(plugins_path)\n if os.path.isfile(os.path.join(plugins_path, name, 'config.html'))]\n\n if plugin in config_html_plugins:\n config = open(os.path.join(app.root_path, 'plugins', plugin, 'config.html')).read()\n return render_template_string(config)\n abort(404)\n elif request.method == 'POST':\n for k, v in request.form.items():\n if k == \"nonce\":\n continue\n utils.set_config(k, v)\n with app.app_context():\n cache.clear()\n return '1'\n\n\[email protected]('/admin/import', methods=['GET', 'POST'])\n@admins_only\ndef admin_import_ctf():\n backup = request.files['backup']\n segments = request.form.get('segments')\n errors = []\n try:\n if segments:\n import_ctf(backup, segments=segments.split(','))\n else:\n import_ctf(backup)\n except Exception as e:\n print(e)\n errors.append(type(e).__name__)\n\n if errors:\n return errors[0], 500\n else:\n return redirect(url_for('admin.admin_config'))\n\n\[email protected]('/admin/export', methods=['GET', 'POST'])\n@admins_only\ndef admin_export_ctf():\n segments = request.args.get('segments')\n if segments:\n backup = export_ctf(segments.split(','))\n else:\n backup = export_ctf()\n ctf_name = utils.ctf_name()\n day = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n full_name = \"{}.{}.zip\".format(ctf_name, day)\n return send_file(backup, as_attachment=True, attachment_filename=full_name)\n\n\[email protected]('/admin/config', methods=['GET', 'POST'])\n@admins_only\ndef admin_config():\n if request.method == \"POST\":\n start = None\n end = None\n freeze = None\n if request.form.get('start'):\n start = int(request.form['start'])\n if request.form.get('end'):\n end = int(request.form['end'])\n if request.form.get('freeze'):\n freeze = int(request.form['freeze'])\n\n try:\n view_challenges_unregistered = bool(request.form.get('view_challenges_unregistered', None))\n view_scoreboard_if_authed = bool(request.form.get('view_scoreboard_if_authed', None))\n hide_scores = bool(request.form.get('hide_scores', None))\n prevent_registration = bool(request.form.get('prevent_registration', None))\n prevent_name_change = bool(request.form.get('prevent_name_change', None))\n view_after_ctf = bool(request.form.get('view_after_ctf', None))\n verify_emails = bool(request.form.get('verify_emails', None))\n mail_tls = bool(request.form.get('mail_tls', None))\n mail_ssl = bool(request.form.get('mail_ssl', None))\n mail_useauth = bool(request.form.get('mail_useauth', None))\n except (ValueError, TypeError):\n view_challenges_unregistered = None\n view_scoreboard_if_authed = None\n hide_scores = None\n prevent_registration = None\n prevent_name_change = None\n view_after_ctf = None\n verify_emails = None\n mail_tls = None\n mail_ssl = None\n mail_useauth = None\n finally:\n view_challenges_unregistered = utils.set_config('view_challenges_unregistered', view_challenges_unregistered)\n view_scoreboard_if_authed = utils.set_config('view_scoreboard_if_authed', view_scoreboard_if_authed)\n hide_scores = utils.set_config('hide_scores', hide_scores)\n prevent_registration = utils.set_config('prevent_registration', prevent_registration)\n prevent_name_change = utils.set_config('prevent_name_change', prevent_name_change)\n view_after_ctf = utils.set_config('view_after_ctf', view_after_ctf)\n verify_emails = utils.set_config('verify_emails', verify_emails)\n mail_tls = utils.set_config('mail_tls', mail_tls)\n mail_ssl = utils.set_config('mail_ssl', mail_ssl)\n mail_useauth = utils.set_config('mail_useauth', mail_useauth)\n\n mail_server = utils.set_config(\"mail_server\", request.form.get('mail_server', None))\n mail_port = utils.set_config(\"mail_port\", request.form.get('mail_port', None))\n\n mail_username = utils.set_config(\"mail_username\", request.form.get('mail_username', None))\n mail_password = utils.set_config(\"mail_password\", request.form.get('mail_password', None))\n\n ctf_name = utils.set_config(\"ctf_name\", request.form.get('ctf_name', None))\n ctf_theme = utils.set_config(\"ctf_theme\", request.form.get('ctf_theme', None))\n\n mailfrom_addr = utils.set_config(\"mailfrom_addr\", request.form.get('mailfrom_addr', None))\n mg_base_url = utils.set_config(\"mg_base_url\", request.form.get('mg_base_url', None))\n mg_api_key = utils.set_config(\"mg_api_key\", request.form.get('mg_api_key', None))\n\n db_freeze = utils.set_config(\"freeze\", freeze)\n\n db_start = Config.query.filter_by(key='start').first()\n db_start.value = start\n\n db_end = Config.query.filter_by(key='end').first()\n db_end.value = end\n\n db.session.add(db_start)\n db.session.add(db_end)\n\n db.session.commit()\n db.session.close()\n with app.app_context():\n cache.clear()\n return redirect(url_for('admin.admin_config'))\n\n with app.app_context():\n cache.clear()\n ctf_name = utils.get_config('ctf_name')\n ctf_theme = utils.get_config('ctf_theme')\n hide_scores = utils.get_config('hide_scores')\n\n mail_server = utils.get_config('mail_server')\n mail_port = utils.get_config('mail_port')\n mail_username = utils.get_config('mail_username')\n mail_password = utils.get_config('mail_password')\n\n mailfrom_addr = utils.get_config('mailfrom_addr')\n mg_api_key = utils.get_config('mg_api_key')\n mg_base_url = utils.get_config('mg_base_url')\n\n view_after_ctf = utils.get_config('view_after_ctf')\n start = utils.get_config('start')\n end = utils.get_config('end')\n freeze = utils.get_config('freeze')\n\n mail_tls = utils.get_config('mail_tls')\n mail_ssl = utils.get_config('mail_ssl')\n mail_useauth = utils.get_config('mail_useauth')\n\n view_challenges_unregistered = utils.get_config('view_challenges_unregistered')\n view_scoreboard_if_authed = utils.get_config('view_scoreboard_if_authed')\n prevent_registration = utils.get_config('prevent_registration')\n prevent_name_change = utils.get_config('prevent_name_change')\n verify_emails = utils.get_config('verify_emails')\n\n db.session.commit()\n db.session.close()\n\n themes = utils.get_themes()\n themes.remove(ctf_theme)\n\n return render_template('admin/config.html',\n ctf_name=ctf_name,\n ctf_theme_config=ctf_theme,\n start=start,\n end=end,\n freeze=freeze,\n hide_scores=hide_scores,\n mail_server=mail_server,\n mail_port=mail_port,\n mail_useauth=mail_useauth,\n mail_username=mail_username,\n mail_password=mail_password,\n mail_tls=mail_tls,\n mail_ssl=mail_ssl,\n view_challenges_unregistered=view_challenges_unregistered,\n view_scoreboard_if_authed=view_scoreboard_if_authed,\n prevent_registration=prevent_registration,\n mailfrom_addr=mailfrom_addr,\n mg_base_url=mg_base_url,\n mg_api_key=mg_api_key,\n prevent_name_change=prevent_name_change,\n verify_emails=verify_emails,\n view_after_ctf=view_after_ctf,\n themes=themes)\n", "path": "CTFd/admin/__init__.py"}], "after_files": [{"content": "import hashlib\nimport json\nimport os\nimport datetime\n\nfrom flask import current_app as app, render_template, request, redirect, jsonify, url_for, Blueprint, \\\n abort, render_template_string, send_file\nfrom passlib.hash import bcrypt_sha256\nfrom sqlalchemy.sql import not_\nfrom sqlalchemy.exc import IntegrityError\n\nfrom CTFd.utils import admins_only, is_admin, cache, export_ctf, import_ctf\nfrom CTFd.models import db, Teams, Solves, Awards, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config, DatabaseError\nfrom CTFd.plugins.keys import get_key_class, KEY_CLASSES\n\nfrom CTFd.admin.statistics import admin_statistics\nfrom CTFd.admin.challenges import admin_challenges\nfrom CTFd.admin.scoreboard import admin_scoreboard\nfrom CTFd.admin.pages import admin_pages\nfrom CTFd.admin.keys import admin_keys\nfrom CTFd.admin.teams import admin_teams\n\nfrom CTFd import utils\n\n\nadmin = Blueprint('admin', __name__)\n\n\[email protected]('/admin', methods=['GET'])\ndef admin_view():\n if is_admin():\n return redirect(url_for('admin_statistics.admin_graphs'))\n\n return redirect(url_for('auth.login'))\n\n\[email protected]('/admin/plugins/<plugin>', methods=['GET', 'POST'])\n@admins_only\ndef admin_plugin_config(plugin):\n if request.method == 'GET':\n plugins_path = os.path.join(app.root_path, 'plugins')\n\n config_html_plugins = [name for name in os.listdir(plugins_path)\n if os.path.isfile(os.path.join(plugins_path, name, 'config.html'))]\n\n if plugin in config_html_plugins:\n config = open(os.path.join(app.root_path, 'plugins', plugin, 'config.html')).read()\n return render_template_string(config)\n abort(404)\n elif request.method == 'POST':\n for k, v in request.form.items():\n if k == \"nonce\":\n continue\n utils.set_config(k, v)\n with app.app_context():\n cache.clear()\n return '1'\n\n\[email protected]('/admin/import', methods=['GET', 'POST'])\n@admins_only\ndef admin_import_ctf():\n backup = request.files['backup']\n segments = request.form.get('segments')\n errors = []\n try:\n if segments:\n import_ctf(backup, segments=segments.split(','))\n else:\n import_ctf(backup)\n except Exception as e:\n print(e)\n errors.append(type(e).__name__)\n\n if errors:\n return errors[0], 500\n else:\n return redirect(url_for('admin.admin_config'))\n\n\[email protected]('/admin/export', methods=['GET', 'POST'])\n@admins_only\ndef admin_export_ctf():\n segments = request.args.get('segments')\n if segments:\n backup = export_ctf(segments.split(','))\n else:\n backup = export_ctf()\n ctf_name = utils.ctf_name()\n day = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n full_name = \"{}.{}.zip\".format(ctf_name, day)\n return send_file(backup, as_attachment=True, attachment_filename=full_name)\n\n\[email protected]('/admin/config', methods=['GET', 'POST'])\n@admins_only\ndef admin_config():\n if request.method == \"POST\":\n start = None\n end = None\n freeze = None\n if request.form.get('start'):\n start = int(request.form['start'])\n if request.form.get('end'):\n end = int(request.form['end'])\n if request.form.get('freeze'):\n freeze = int(request.form['freeze'])\n\n try:\n view_challenges_unregistered = bool(request.form.get('view_challenges_unregistered', None))\n view_scoreboard_if_authed = bool(request.form.get('view_scoreboard_if_authed', None))\n hide_scores = bool(request.form.get('hide_scores', None))\n prevent_registration = bool(request.form.get('prevent_registration', None))\n prevent_name_change = bool(request.form.get('prevent_name_change', None))\n view_after_ctf = bool(request.form.get('view_after_ctf', None))\n verify_emails = bool(request.form.get('verify_emails', None))\n mail_tls = bool(request.form.get('mail_tls', None))\n mail_ssl = bool(request.form.get('mail_ssl', None))\n mail_useauth = bool(request.form.get('mail_useauth', None))\n except (ValueError, TypeError):\n view_challenges_unregistered = None\n view_scoreboard_if_authed = None\n hide_scores = None\n prevent_registration = None\n prevent_name_change = None\n view_after_ctf = None\n verify_emails = None\n mail_tls = None\n mail_ssl = None\n mail_useauth = None\n finally:\n view_challenges_unregistered = utils.set_config('view_challenges_unregistered', view_challenges_unregistered)\n view_scoreboard_if_authed = utils.set_config('view_scoreboard_if_authed', view_scoreboard_if_authed)\n hide_scores = utils.set_config('hide_scores', hide_scores)\n prevent_registration = utils.set_config('prevent_registration', prevent_registration)\n prevent_name_change = utils.set_config('prevent_name_change', prevent_name_change)\n view_after_ctf = utils.set_config('view_after_ctf', view_after_ctf)\n verify_emails = utils.set_config('verify_emails', verify_emails)\n mail_tls = utils.set_config('mail_tls', mail_tls)\n mail_ssl = utils.set_config('mail_ssl', mail_ssl)\n mail_useauth = utils.set_config('mail_useauth', mail_useauth)\n\n mail_server = utils.set_config(\"mail_server\", request.form.get('mail_server', None))\n mail_port = utils.set_config(\"mail_port\", request.form.get('mail_port', None))\n\n if request.form.get('mail_useauth', None) and (request.form.get('mail_u', None) or request.form.get('mail_p', None)):\n if len(request.form.get('mail_u')) > 0:\n mail_username = utils.set_config(\"mail_username\", request.form.get('mail_u', None))\n if len(request.form.get('mail_p')) > 0:\n mail_password = utils.set_config(\"mail_password\", request.form.get('mail_p', None))\n\n elif request.form.get('mail_useauth', None) is None:\n utils.set_config(\"mail_username\", None)\n utils.set_config(\"mail_password\", None)\n\n ctf_name = utils.set_config(\"ctf_name\", request.form.get('ctf_name', None))\n ctf_theme = utils.set_config(\"ctf_theme\", request.form.get('ctf_theme', None))\n\n mailfrom_addr = utils.set_config(\"mailfrom_addr\", request.form.get('mailfrom_addr', None))\n mg_base_url = utils.set_config(\"mg_base_url\", request.form.get('mg_base_url', None))\n mg_api_key = utils.set_config(\"mg_api_key\", request.form.get('mg_api_key', None))\n\n db_freeze = utils.set_config(\"freeze\", freeze)\n\n db_start = Config.query.filter_by(key='start').first()\n db_start.value = start\n\n db_end = Config.query.filter_by(key='end').first()\n db_end.value = end\n\n db.session.add(db_start)\n db.session.add(db_end)\n\n db.session.commit()\n db.session.close()\n with app.app_context():\n cache.clear()\n return redirect(url_for('admin.admin_config'))\n\n with app.app_context():\n cache.clear()\n ctf_name = utils.get_config('ctf_name')\n ctf_theme = utils.get_config('ctf_theme')\n hide_scores = utils.get_config('hide_scores')\n\n mail_server = utils.get_config('mail_server')\n mail_port = utils.get_config('mail_port')\n mail_username = utils.get_config('mail_username')\n mail_password = utils.get_config('mail_password')\n\n mailfrom_addr = utils.get_config('mailfrom_addr')\n mg_api_key = utils.get_config('mg_api_key')\n mg_base_url = utils.get_config('mg_base_url')\n\n view_after_ctf = utils.get_config('view_after_ctf')\n start = utils.get_config('start')\n end = utils.get_config('end')\n freeze = utils.get_config('freeze')\n\n mail_tls = utils.get_config('mail_tls')\n mail_ssl = utils.get_config('mail_ssl')\n mail_useauth = utils.get_config('mail_useauth')\n\n view_challenges_unregistered = utils.get_config('view_challenges_unregistered')\n view_scoreboard_if_authed = utils.get_config('view_scoreboard_if_authed')\n prevent_registration = utils.get_config('prevent_registration')\n prevent_name_change = utils.get_config('prevent_name_change')\n verify_emails = utils.get_config('verify_emails')\n\n db.session.commit()\n db.session.close()\n\n themes = utils.get_themes()\n themes.remove(ctf_theme)\n\n return render_template('admin/config.html',\n ctf_name=ctf_name,\n ctf_theme_config=ctf_theme,\n start=start,\n end=end,\n freeze=freeze,\n hide_scores=hide_scores,\n mail_server=mail_server,\n mail_port=mail_port,\n mail_useauth=mail_useauth,\n mail_username=mail_username,\n mail_password=mail_password,\n mail_tls=mail_tls,\n mail_ssl=mail_ssl,\n view_challenges_unregistered=view_challenges_unregistered,\n view_scoreboard_if_authed=view_scoreboard_if_authed,\n prevent_registration=prevent_registration,\n mailfrom_addr=mailfrom_addr,\n mg_base_url=mg_base_url,\n mg_api_key=mg_api_key,\n prevent_name_change=prevent_name_change,\n verify_emails=verify_emails,\n view_after_ctf=view_after_ctf,\n themes=themes)\n", "path": "CTFd/admin/__init__.py"}]} | 2,879 | 334 |
gh_patches_debug_31133 | rasdani/github-patches | git_diff | Mailu__Mailu-2130 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fix keyError WEBMAIL_ADDRESS
## What type of PR?
bugfix WEBMAIL_ADDRESS not initialized in admin/mailu/configuration.py, leading to lot of errors in log.
## What does this PR do?
Initialize 'WEBMAIL_ADDRESS' to None in the admin configuration
### Related issue(s)
- closes #2125
## Prerequisites
None
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/limiter.py`
Content:
```
1 from mailu import utils
2 from flask import current_app as app
3 import base64
4 import limits
5 import limits.storage
6 import limits.strategies
7
8 import hmac
9 import secrets
10
11 class LimitWrapper(object):
12 """ Wraps a limit by providing the storage, item and identifiers
13 """
14
15 def __init__(self, limiter, limit, *identifiers):
16 self.limiter = limiter
17 self.limit = limit
18 self.base_identifiers = identifiers
19
20 def test(self, *args):
21 return self.limiter.test(self.limit, *(self.base_identifiers + args))
22
23 def hit(self, *args):
24 return self.limiter.hit(self.limit, *(self.base_identifiers + args))
25
26 def get_window_stats(self, *args):
27 return self.limiter.get_window_stats(self.limit, *(self.base_identifiers + args))
28
29
30 class LimitWraperFactory(object):
31 """ Global limiter, to be used as a factory
32 """
33
34 def init_app(self, app):
35 self.storage = limits.storage.storage_from_string(app.config["RATELIMIT_STORAGE_URL"])
36 self.limiter = limits.strategies.MovingWindowRateLimiter(self.storage)
37
38 def get_limiter(self, limit, *args):
39 return LimitWrapper(self.limiter, limits.parse(limit), *args)
40
41 def is_subject_to_rate_limits(self, ip):
42 return False if utils.is_exempt_from_ratelimits(ip) else not (self.storage.get(f'exempt-{ip}') > 0)
43
44 def exempt_ip_from_ratelimits(self, ip):
45 self.storage.incr(f'exempt-{ip}', app.config["AUTH_RATELIMIT_EXEMPTION_LENGTH"], True)
46
47 def should_rate_limit_ip(self, ip):
48 limiter = self.get_limiter(app.config["AUTH_RATELIMIT_IP"], 'auth-ip')
49 client_network = utils.extract_network_from_ip(ip)
50 is_rate_limited = self.is_subject_to_rate_limits(ip) and not limiter.test(client_network)
51 if is_rate_limited:
52 app.logger.warn(f'Authentication attempt from {ip} has been rate-limited.')
53 return is_rate_limited
54
55 def rate_limit_ip(self, ip):
56 if ip != app.config['WEBMAIL_ADDRESS']:
57 limiter = self.get_limiter(app.config["AUTH_RATELIMIT_IP"], 'auth-ip')
58 client_network = utils.extract_network_from_ip(ip)
59 if self.is_subject_to_rate_limits(ip):
60 limiter.hit(client_network)
61
62 def should_rate_limit_user(self, username, ip, device_cookie=None, device_cookie_name=None):
63 limiter = self.get_limiter(app.config["AUTH_RATELIMIT_USER"], 'auth-user')
64 is_rate_limited = self.is_subject_to_rate_limits(ip) and not limiter.test(device_cookie if device_cookie_name == username else username)
65 if is_rate_limited:
66 app.logger.warn(f'Authentication attempt from {ip} for {username} has been rate-limited.')
67 return is_rate_limited
68
69 def rate_limit_user(self, username, ip, device_cookie=None, device_cookie_name=None):
70 limiter = self.get_limiter(app.config["AUTH_RATELIMIT_USER"], 'auth-user')
71 if self.is_subject_to_rate_limits(ip):
72 limiter.hit(device_cookie if device_cookie_name == username else username)
73
74 """ Device cookies as described on:
75 https://owasp.org/www-community/Slow_Down_Online_Guessing_Attacks_with_Device_Cookies
76 """
77 def parse_device_cookie(self, cookie):
78 try:
79 login, nonce, _ = cookie.split('$')
80 if hmac.compare_digest(cookie, self.device_cookie(login, nonce)):
81 return nonce, login
82 except:
83 pass
84 return None, None
85
86 """ Device cookies don't require strong crypto:
87 72bits of nonce, 96bits of signature is more than enough
88 and these values avoid padding in most cases
89 """
90 def device_cookie(self, username, nonce=None):
91 if not nonce:
92 nonce = secrets.token_urlsafe(9)
93 sig = str(base64.urlsafe_b64encode(hmac.new(app.device_cookie_key, bytearray(f'device_cookie|{username}|{nonce}', 'utf-8'), 'sha256').digest()[20:]), 'utf-8')
94 return f'{username}${nonce}${sig}'
95
```
Path: `core/admin/mailu/internal/views/auth.py`
Content:
```
1 from mailu import models, utils
2 from mailu.internal import internal, nginx
3 from flask import current_app as app
4
5 import flask
6 import flask_login
7 import base64
8
9 @internal.route("/auth/email")
10 def nginx_authentication():
11 """ Main authentication endpoint for Nginx email server
12 """
13 client_ip = flask.request.headers["Client-Ip"]
14 headers = flask.request.headers
15 if headers["Auth-Port"] == '25' and headers['Auth-Method'] == 'plain':
16 response = flask.Response()
17 response.headers['Auth-Status'] = 'AUTH not supported'
18 response.headers['Auth-Error-Code'] = '502 5.5.1'
19 utils.limiter.rate_limit_ip(client_ip)
20 return response
21 if utils.limiter.should_rate_limit_ip(client_ip):
22 status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')
23 response = flask.Response()
24 response.headers['Auth-Status'] = status
25 response.headers['Auth-Error-Code'] = code
26 if int(flask.request.headers['Auth-Login-Attempt']) < 10:
27 response.headers['Auth-Wait'] = '3'
28 return response
29 headers = nginx.handle_authentication(flask.request.headers)
30 response = flask.Response()
31 for key, value in headers.items():
32 response.headers[key] = str(value)
33 is_valid_user = False
34 if response.headers.get("Auth-User-Exists"):
35 username = response.headers["Auth-User"]
36 if utils.limiter.should_rate_limit_user(username, client_ip):
37 # FIXME could be done before handle_authentication()
38 status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')
39 response = flask.Response()
40 response.headers['Auth-Status'] = status
41 response.headers['Auth-Error-Code'] = code
42 if int(flask.request.headers['Auth-Login-Attempt']) < 10:
43 response.headers['Auth-Wait'] = '3'
44 return response
45 is_valid_user = True
46 if headers.get("Auth-Status") == "OK":
47 utils.limiter.exempt_ip_from_ratelimits(client_ip)
48 elif is_valid_user:
49 utils.limiter.rate_limit_user(username, client_ip)
50 else:
51 utils.limiter.rate_limit_ip(client_ip)
52 return response
53
54 @internal.route("/auth/admin")
55 def admin_authentication():
56 """ Fails if the user is not an authenticated admin.
57 """
58 if (not flask_login.current_user.is_anonymous
59 and flask_login.current_user.global_admin
60 and flask_login.current_user.enabled):
61 return ""
62 return flask.abort(403)
63
64 @internal.route("/auth/user")
65 def user_authentication():
66 """ Fails if the user is not authenticated.
67 """
68 if (not flask_login.current_user.is_anonymous
69 and flask_login.current_user.enabled):
70 response = flask.Response()
71 email = flask_login.current_user.get_id()
72 response.headers["X-User"] = models.IdnaEmail.process_bind_param(flask_login, email, "")
73 response.headers["X-User-Token"] = utils.gen_temp_token(email, flask.session)
74 return response
75 return flask.abort(403)
76
77
78 @internal.route("/auth/basic")
79 def basic_authentication():
80 """ Tries to authenticate using the Authorization header.
81 """
82 client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)
83 if utils.limiter.should_rate_limit_ip(client_ip):
84 response = flask.Response(status=401)
85 response.headers["WWW-Authenticate"] = 'Basic realm="Authentication rate limit from one source exceeded"'
86 response.headers['Retry-After'] = '60'
87 return response
88 authorization = flask.request.headers.get("Authorization")
89 if authorization and authorization.startswith("Basic "):
90 encoded = authorization.replace("Basic ", "")
91 user_email, password = base64.b64decode(encoded).split(b":", 1)
92 user_email = user_email.decode("utf8")
93 if utils.limiter.should_rate_limit_user(user_email, client_ip):
94 response = flask.Response(status=401)
95 response.headers["WWW-Authenticate"] = 'Basic realm="Authentication rate limit for this username exceeded"'
96 response.headers['Retry-After'] = '60'
97 return response
98 user = models.User.query.get(user_email)
99 if user and nginx.check_credentials(user, password.decode('utf-8'), client_ip, "web"):
100 response = flask.Response()
101 response.headers["X-User"] = models.IdnaEmail.process_bind_param(flask_login, user.email, "")
102 utils.limiter.exempt_ip_from_ratelimits(client_ip)
103 return response
104 utils.limiter.rate_limit_user(user_email, client_ip) if user else utils.limiter.rate_limit_ip(client_ip)
105 response = flask.Response(status=401)
106 response.headers["WWW-Authenticate"] = 'Basic realm="Login Required"'
107 return response
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -31,6 +31,7 @@
for key, value in headers.items():
response.headers[key] = str(value)
is_valid_user = False
+ is_from_webmail = headers['Auth-Port'] in ['10143', '10025']
if response.headers.get("Auth-User-Exists"):
username = response.headers["Auth-User"]
if utils.limiter.should_rate_limit_user(username, client_ip):
@@ -47,7 +48,7 @@
utils.limiter.exempt_ip_from_ratelimits(client_ip)
elif is_valid_user:
utils.limiter.rate_limit_user(username, client_ip)
- else:
+ elif not is_from_webmail:
utils.limiter.rate_limit_ip(client_ip)
return response
diff --git a/core/admin/mailu/limiter.py b/core/admin/mailu/limiter.py
--- a/core/admin/mailu/limiter.py
+++ b/core/admin/mailu/limiter.py
@@ -53,11 +53,10 @@
return is_rate_limited
def rate_limit_ip(self, ip):
- if ip != app.config['WEBMAIL_ADDRESS']:
- limiter = self.get_limiter(app.config["AUTH_RATELIMIT_IP"], 'auth-ip')
- client_network = utils.extract_network_from_ip(ip)
- if self.is_subject_to_rate_limits(ip):
- limiter.hit(client_network)
+ limiter = self.get_limiter(app.config["AUTH_RATELIMIT_IP"], 'auth-ip')
+ client_network = utils.extract_network_from_ip(ip)
+ if self.is_subject_to_rate_limits(ip):
+ limiter.hit(client_network)
def should_rate_limit_user(self, username, ip, device_cookie=None, device_cookie_name=None):
limiter = self.get_limiter(app.config["AUTH_RATELIMIT_USER"], 'auth-user')
| {"golden_diff": "diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py\n--- a/core/admin/mailu/internal/views/auth.py\n+++ b/core/admin/mailu/internal/views/auth.py\n@@ -31,6 +31,7 @@\n for key, value in headers.items():\n response.headers[key] = str(value)\n is_valid_user = False\n+ is_from_webmail = headers['Auth-Port'] in ['10143', '10025']\n if response.headers.get(\"Auth-User-Exists\"):\n username = response.headers[\"Auth-User\"]\n if utils.limiter.should_rate_limit_user(username, client_ip):\n@@ -47,7 +48,7 @@\n utils.limiter.exempt_ip_from_ratelimits(client_ip)\n elif is_valid_user:\n utils.limiter.rate_limit_user(username, client_ip)\n- else:\n+ elif not is_from_webmail:\n utils.limiter.rate_limit_ip(client_ip)\n return response\n \ndiff --git a/core/admin/mailu/limiter.py b/core/admin/mailu/limiter.py\n--- a/core/admin/mailu/limiter.py\n+++ b/core/admin/mailu/limiter.py\n@@ -53,11 +53,10 @@\n return is_rate_limited\n \n def rate_limit_ip(self, ip):\n- if ip != app.config['WEBMAIL_ADDRESS']:\n- limiter = self.get_limiter(app.config[\"AUTH_RATELIMIT_IP\"], 'auth-ip')\n- client_network = utils.extract_network_from_ip(ip)\n- if self.is_subject_to_rate_limits(ip):\n- limiter.hit(client_network)\n+ limiter = self.get_limiter(app.config[\"AUTH_RATELIMIT_IP\"], 'auth-ip')\n+ client_network = utils.extract_network_from_ip(ip)\n+ if self.is_subject_to_rate_limits(ip):\n+ limiter.hit(client_network)\n \n def should_rate_limit_user(self, username, ip, device_cookie=None, device_cookie_name=None):\n limiter = self.get_limiter(app.config[\"AUTH_RATELIMIT_USER\"], 'auth-user')\n", "issue": "fix keyError WEBMAIL_ADDRESS\n## What type of PR?\r\nbugfix WEBMAIL_ADDRESS not initialized in admin/mailu/configuration.py, leading to lot of errors in log.\r\n\r\n## What does this PR do?\r\nInitialize 'WEBMAIL_ADDRESS' to None in the admin configuration\r\n\r\n### Related issue(s)\r\n- closes #2125\r\n\r\n## Prerequisites\r\nNone\n", "before_files": [{"content": "from mailu import utils\nfrom flask import current_app as app\nimport base64\nimport limits\nimport limits.storage\nimport limits.strategies\n\nimport hmac\nimport secrets\n\nclass LimitWrapper(object):\n \"\"\" Wraps a limit by providing the storage, item and identifiers\n \"\"\"\n\n def __init__(self, limiter, limit, *identifiers):\n self.limiter = limiter\n self.limit = limit\n self.base_identifiers = identifiers\n\n def test(self, *args):\n return self.limiter.test(self.limit, *(self.base_identifiers + args))\n\n def hit(self, *args):\n return self.limiter.hit(self.limit, *(self.base_identifiers + args))\n\n def get_window_stats(self, *args):\n return self.limiter.get_window_stats(self.limit, *(self.base_identifiers + args))\n\n\nclass LimitWraperFactory(object):\n \"\"\" Global limiter, to be used as a factory\n \"\"\"\n\n def init_app(self, app):\n self.storage = limits.storage.storage_from_string(app.config[\"RATELIMIT_STORAGE_URL\"])\n self.limiter = limits.strategies.MovingWindowRateLimiter(self.storage)\n\n def get_limiter(self, limit, *args):\n return LimitWrapper(self.limiter, limits.parse(limit), *args)\n\n def is_subject_to_rate_limits(self, ip):\n return False if utils.is_exempt_from_ratelimits(ip) else not (self.storage.get(f'exempt-{ip}') > 0)\n\n def exempt_ip_from_ratelimits(self, ip):\n self.storage.incr(f'exempt-{ip}', app.config[\"AUTH_RATELIMIT_EXEMPTION_LENGTH\"], True)\n\n def should_rate_limit_ip(self, ip):\n limiter = self.get_limiter(app.config[\"AUTH_RATELIMIT_IP\"], 'auth-ip')\n client_network = utils.extract_network_from_ip(ip)\n is_rate_limited = self.is_subject_to_rate_limits(ip) and not limiter.test(client_network)\n if is_rate_limited:\n app.logger.warn(f'Authentication attempt from {ip} has been rate-limited.')\n return is_rate_limited\n\n def rate_limit_ip(self, ip):\n if ip != app.config['WEBMAIL_ADDRESS']:\n limiter = self.get_limiter(app.config[\"AUTH_RATELIMIT_IP\"], 'auth-ip')\n client_network = utils.extract_network_from_ip(ip)\n if self.is_subject_to_rate_limits(ip):\n limiter.hit(client_network)\n\n def should_rate_limit_user(self, username, ip, device_cookie=None, device_cookie_name=None):\n limiter = self.get_limiter(app.config[\"AUTH_RATELIMIT_USER\"], 'auth-user')\n is_rate_limited = self.is_subject_to_rate_limits(ip) and not limiter.test(device_cookie if device_cookie_name == username else username)\n if is_rate_limited:\n app.logger.warn(f'Authentication attempt from {ip} for {username} has been rate-limited.')\n return is_rate_limited\n\n def rate_limit_user(self, username, ip, device_cookie=None, device_cookie_name=None):\n limiter = self.get_limiter(app.config[\"AUTH_RATELIMIT_USER\"], 'auth-user')\n if self.is_subject_to_rate_limits(ip):\n limiter.hit(device_cookie if device_cookie_name == username else username)\n\n \"\"\" Device cookies as described on:\n https://owasp.org/www-community/Slow_Down_Online_Guessing_Attacks_with_Device_Cookies\n \"\"\"\n def parse_device_cookie(self, cookie):\n try:\n login, nonce, _ = cookie.split('$')\n if hmac.compare_digest(cookie, self.device_cookie(login, nonce)):\n return nonce, login\n except:\n pass\n return None, None\n\n \"\"\" Device cookies don't require strong crypto:\n 72bits of nonce, 96bits of signature is more than enough\n and these values avoid padding in most cases\n \"\"\"\n def device_cookie(self, username, nonce=None):\n if not nonce:\n nonce = secrets.token_urlsafe(9)\n sig = str(base64.urlsafe_b64encode(hmac.new(app.device_cookie_key, bytearray(f'device_cookie|{username}|{nonce}', 'utf-8'), 'sha256').digest()[20:]), 'utf-8')\n return f'{username}${nonce}${sig}'\n", "path": "core/admin/mailu/limiter.py"}, {"content": "from mailu import models, utils\nfrom mailu.internal import internal, nginx\nfrom flask import current_app as app\n\nimport flask\nimport flask_login\nimport base64\n\[email protected](\"/auth/email\")\ndef nginx_authentication():\n \"\"\" Main authentication endpoint for Nginx email server\n \"\"\"\n client_ip = flask.request.headers[\"Client-Ip\"]\n headers = flask.request.headers\n if headers[\"Auth-Port\"] == '25' and headers['Auth-Method'] == 'plain':\n response = flask.Response()\n response.headers['Auth-Status'] = 'AUTH not supported'\n response.headers['Auth-Error-Code'] = '502 5.5.1'\n utils.limiter.rate_limit_ip(client_ip)\n return response\n if utils.limiter.should_rate_limit_ip(client_ip):\n status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')\n response = flask.Response()\n response.headers['Auth-Status'] = status\n response.headers['Auth-Error-Code'] = code\n if int(flask.request.headers['Auth-Login-Attempt']) < 10:\n response.headers['Auth-Wait'] = '3'\n return response\n headers = nginx.handle_authentication(flask.request.headers)\n response = flask.Response()\n for key, value in headers.items():\n response.headers[key] = str(value)\n is_valid_user = False\n if response.headers.get(\"Auth-User-Exists\"):\n username = response.headers[\"Auth-User\"]\n if utils.limiter.should_rate_limit_user(username, client_ip):\n # FIXME could be done before handle_authentication()\n status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')\n response = flask.Response()\n response.headers['Auth-Status'] = status\n response.headers['Auth-Error-Code'] = code\n if int(flask.request.headers['Auth-Login-Attempt']) < 10:\n response.headers['Auth-Wait'] = '3'\n return response\n is_valid_user = True\n if headers.get(\"Auth-Status\") == \"OK\":\n utils.limiter.exempt_ip_from_ratelimits(client_ip)\n elif is_valid_user:\n utils.limiter.rate_limit_user(username, client_ip)\n else:\n utils.limiter.rate_limit_ip(client_ip)\n return response\n\[email protected](\"/auth/admin\")\ndef admin_authentication():\n \"\"\" Fails if the user is not an authenticated admin.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.global_admin\n and flask_login.current_user.enabled):\n return \"\"\n return flask.abort(403)\n\[email protected](\"/auth/user\")\ndef user_authentication():\n \"\"\" Fails if the user is not authenticated.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.enabled):\n response = flask.Response()\n email = flask_login.current_user.get_id()\n response.headers[\"X-User\"] = models.IdnaEmail.process_bind_param(flask_login, email, \"\")\n response.headers[\"X-User-Token\"] = utils.gen_temp_token(email, flask.session)\n return response\n return flask.abort(403)\n\n\[email protected](\"/auth/basic\")\ndef basic_authentication():\n \"\"\" Tries to authenticate using the Authorization header.\n \"\"\"\n client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)\n if utils.limiter.should_rate_limit_ip(client_ip):\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Authentication rate limit from one source exceeded\"'\n response.headers['Retry-After'] = '60'\n return response\n authorization = flask.request.headers.get(\"Authorization\")\n if authorization and authorization.startswith(\"Basic \"):\n encoded = authorization.replace(\"Basic \", \"\")\n user_email, password = base64.b64decode(encoded).split(b\":\", 1)\n user_email = user_email.decode(\"utf8\")\n if utils.limiter.should_rate_limit_user(user_email, client_ip):\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Authentication rate limit for this username exceeded\"'\n response.headers['Retry-After'] = '60'\n return response\n user = models.User.query.get(user_email)\n if user and nginx.check_credentials(user, password.decode('utf-8'), client_ip, \"web\"):\n response = flask.Response()\n response.headers[\"X-User\"] = models.IdnaEmail.process_bind_param(flask_login, user.email, \"\")\n utils.limiter.exempt_ip_from_ratelimits(client_ip)\n return response\n utils.limiter.rate_limit_user(user_email, client_ip) if user else utils.limiter.rate_limit_ip(client_ip)\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Login Required\"'\n return response\n", "path": "core/admin/mailu/internal/views/auth.py"}], "after_files": [{"content": "from mailu import utils\nfrom flask import current_app as app\nimport base64\nimport limits\nimport limits.storage\nimport limits.strategies\n\nimport hmac\nimport secrets\n\nclass LimitWrapper(object):\n \"\"\" Wraps a limit by providing the storage, item and identifiers\n \"\"\"\n\n def __init__(self, limiter, limit, *identifiers):\n self.limiter = limiter\n self.limit = limit\n self.base_identifiers = identifiers\n\n def test(self, *args):\n return self.limiter.test(self.limit, *(self.base_identifiers + args))\n\n def hit(self, *args):\n return self.limiter.hit(self.limit, *(self.base_identifiers + args))\n\n def get_window_stats(self, *args):\n return self.limiter.get_window_stats(self.limit, *(self.base_identifiers + args))\n\n\nclass LimitWraperFactory(object):\n \"\"\" Global limiter, to be used as a factory\n \"\"\"\n\n def init_app(self, app):\n self.storage = limits.storage.storage_from_string(app.config[\"RATELIMIT_STORAGE_URL\"])\n self.limiter = limits.strategies.MovingWindowRateLimiter(self.storage)\n\n def get_limiter(self, limit, *args):\n return LimitWrapper(self.limiter, limits.parse(limit), *args)\n\n def is_subject_to_rate_limits(self, ip):\n return False if utils.is_exempt_from_ratelimits(ip) else not (self.storage.get(f'exempt-{ip}') > 0)\n\n def exempt_ip_from_ratelimits(self, ip):\n self.storage.incr(f'exempt-{ip}', app.config[\"AUTH_RATELIMIT_EXEMPTION_LENGTH\"], True)\n\n def should_rate_limit_ip(self, ip):\n limiter = self.get_limiter(app.config[\"AUTH_RATELIMIT_IP\"], 'auth-ip')\n client_network = utils.extract_network_from_ip(ip)\n is_rate_limited = self.is_subject_to_rate_limits(ip) and not limiter.test(client_network)\n if is_rate_limited:\n app.logger.warn(f'Authentication attempt from {ip} has been rate-limited.')\n return is_rate_limited\n\n def rate_limit_ip(self, ip):\n limiter = self.get_limiter(app.config[\"AUTH_RATELIMIT_IP\"], 'auth-ip')\n client_network = utils.extract_network_from_ip(ip)\n if self.is_subject_to_rate_limits(ip):\n limiter.hit(client_network)\n\n def should_rate_limit_user(self, username, ip, device_cookie=None, device_cookie_name=None):\n limiter = self.get_limiter(app.config[\"AUTH_RATELIMIT_USER\"], 'auth-user')\n is_rate_limited = self.is_subject_to_rate_limits(ip) and not limiter.test(device_cookie if device_cookie_name == username else username)\n if is_rate_limited:\n app.logger.warn(f'Authentication attempt from {ip} for {username} has been rate-limited.')\n return is_rate_limited\n\n def rate_limit_user(self, username, ip, device_cookie=None, device_cookie_name=None):\n limiter = self.get_limiter(app.config[\"AUTH_RATELIMIT_USER\"], 'auth-user')\n if self.is_subject_to_rate_limits(ip):\n limiter.hit(device_cookie if device_cookie_name == username else username)\n\n \"\"\" Device cookies as described on:\n https://owasp.org/www-community/Slow_Down_Online_Guessing_Attacks_with_Device_Cookies\n \"\"\"\n def parse_device_cookie(self, cookie):\n try:\n login, nonce, _ = cookie.split('$')\n if hmac.compare_digest(cookie, self.device_cookie(login, nonce)):\n return nonce, login\n except:\n pass\n return None, None\n\n \"\"\" Device cookies don't require strong crypto:\n 72bits of nonce, 96bits of signature is more than enough\n and these values avoid padding in most cases\n \"\"\"\n def device_cookie(self, username, nonce=None):\n if not nonce:\n nonce = secrets.token_urlsafe(9)\n sig = str(base64.urlsafe_b64encode(hmac.new(app.device_cookie_key, bytearray(f'device_cookie|{username}|{nonce}', 'utf-8'), 'sha256').digest()[20:]), 'utf-8')\n return f'{username}${nonce}${sig}'\n", "path": "core/admin/mailu/limiter.py"}, {"content": "from mailu import models, utils\nfrom mailu.internal import internal, nginx\nfrom flask import current_app as app\n\nimport flask\nimport flask_login\nimport base64\n\[email protected](\"/auth/email\")\ndef nginx_authentication():\n \"\"\" Main authentication endpoint for Nginx email server\n \"\"\"\n client_ip = flask.request.headers[\"Client-Ip\"]\n headers = flask.request.headers\n if headers[\"Auth-Port\"] == '25' and headers['Auth-Method'] == 'plain':\n response = flask.Response()\n response.headers['Auth-Status'] = 'AUTH not supported'\n response.headers['Auth-Error-Code'] = '502 5.5.1'\n utils.limiter.rate_limit_ip(client_ip)\n return response\n if utils.limiter.should_rate_limit_ip(client_ip):\n status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')\n response = flask.Response()\n response.headers['Auth-Status'] = status\n response.headers['Auth-Error-Code'] = code\n if int(flask.request.headers['Auth-Login-Attempt']) < 10:\n response.headers['Auth-Wait'] = '3'\n return response\n headers = nginx.handle_authentication(flask.request.headers)\n response = flask.Response()\n for key, value in headers.items():\n response.headers[key] = str(value)\n is_valid_user = False\n is_from_webmail = headers['Auth-Port'] in ['10143', '10025']\n if response.headers.get(\"Auth-User-Exists\"):\n username = response.headers[\"Auth-User\"]\n if utils.limiter.should_rate_limit_user(username, client_ip):\n # FIXME could be done before handle_authentication()\n status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')\n response = flask.Response()\n response.headers['Auth-Status'] = status\n response.headers['Auth-Error-Code'] = code\n if int(flask.request.headers['Auth-Login-Attempt']) < 10:\n response.headers['Auth-Wait'] = '3'\n return response\n is_valid_user = True\n if headers.get(\"Auth-Status\") == \"OK\":\n utils.limiter.exempt_ip_from_ratelimits(client_ip)\n elif is_valid_user:\n utils.limiter.rate_limit_user(username, client_ip)\n elif not is_from_webmail:\n utils.limiter.rate_limit_ip(client_ip)\n return response\n\[email protected](\"/auth/admin\")\ndef admin_authentication():\n \"\"\" Fails if the user is not an authenticated admin.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.global_admin\n and flask_login.current_user.enabled):\n return \"\"\n return flask.abort(403)\n\[email protected](\"/auth/user\")\ndef user_authentication():\n \"\"\" Fails if the user is not authenticated.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.enabled):\n response = flask.Response()\n email = flask_login.current_user.get_id()\n response.headers[\"X-User\"] = models.IdnaEmail.process_bind_param(flask_login, email, \"\")\n response.headers[\"X-User-Token\"] = utils.gen_temp_token(email, flask.session)\n return response\n return flask.abort(403)\n\n\[email protected](\"/auth/basic\")\ndef basic_authentication():\n \"\"\" Tries to authenticate using the Authorization header.\n \"\"\"\n client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)\n if utils.limiter.should_rate_limit_ip(client_ip):\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Authentication rate limit from one source exceeded\"'\n response.headers['Retry-After'] = '60'\n return response\n authorization = flask.request.headers.get(\"Authorization\")\n if authorization and authorization.startswith(\"Basic \"):\n encoded = authorization.replace(\"Basic \", \"\")\n user_email, password = base64.b64decode(encoded).split(b\":\", 1)\n user_email = user_email.decode(\"utf8\")\n if utils.limiter.should_rate_limit_user(user_email, client_ip):\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Authentication rate limit for this username exceeded\"'\n response.headers['Retry-After'] = '60'\n return response\n user = models.User.query.get(user_email)\n if user and nginx.check_credentials(user, password.decode('utf-8'), client_ip, \"web\"):\n response = flask.Response()\n response.headers[\"X-User\"] = models.IdnaEmail.process_bind_param(flask_login, user.email, \"\")\n utils.limiter.exempt_ip_from_ratelimits(client_ip)\n return response\n utils.limiter.rate_limit_user(user_email, client_ip) if user else utils.limiter.rate_limit_ip(client_ip)\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Login Required\"'\n return response\n", "path": "core/admin/mailu/internal/views/auth.py"}]} | 2,728 | 448 |
gh_patches_debug_929 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-5586 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SEO et signature : <a rel="nofollow" />
Dans la signature il faudrait voir si on peut facilement ajouter un attribut `rel="nofollow"` pour préserver notre SEO. https://github.com/zestedesavoir/zmarkdown/blob/1dded309a2670689a4a3353f9e38b80624c6df1a/packages/zmarkdown/server/handlers.js#L139
> limitez les liens en signatures à des no follow or lien interne.
c’est pas mal (:evil) de partager un lien, mais si A-312 répond 4 fois dans la même page, il renvoie 4 fois du jus sur son compte twitter, 4 coding game, … ca a plusieurs effet négatifs
Source: https://zestedesavoir.com/forums/sujet/12099/seo-et-spam/?page=1#p199005
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/utils/templatetags/emarkdown.py`
Content:
```
1 import re
2 import json
3 import logging
4 from requests import post, HTTPError
5
6 from django import template
7 from django.conf import settings
8 from django.template.defaultfilters import stringfilter
9 from django.utils.safestring import mark_safe
10 from django.utils.translation import ugettext_lazy as _
11
12 logger = logging.getLogger(__name__)
13 register = template.Library()
14 """
15 Markdown related filters.
16 """
17
18 # Constants
19 MAX_ATTEMPTS = 3
20 MD_PARSING_ERROR = _('Une erreur est survenue dans la génération de texte Markdown. Veuillez rapporter le bug.')
21
22 FORMAT_ENDPOINTS = {
23 'html': '/html',
24 'texfile': '/latex-document',
25 'epub': '/epub',
26 'tex': '/latex',
27 }
28
29
30 def _render_markdown_once(md_input, *, output_format='html', **kwargs):
31 """
32 Returns None on error (error details are logged). No retry mechanism.
33 """
34 def log_args():
35 logger.error('md_input: {!r}'.format(md_input))
36 logger.error('kwargs: {!r}'.format(kwargs))
37
38 inline = kwargs.get('inline', False) is True
39
40 if settings.ZDS_APP['zmd']['disable_pings'] is True:
41 kwargs['disable_ping'] = True
42
43 endpoint = FORMAT_ENDPOINTS[output_format]
44
45 try:
46 timeout = 10
47 if output_format.startswith('tex'):
48 # latex may be really long to generate but it is also restrained by server configuration
49 timeout = 120
50 response = post('{}{}'.format(settings.ZDS_APP['zmd']['server'], endpoint), json={
51 'opts': kwargs,
52 'md': str(md_input),
53 }, timeout=timeout)
54 except HTTPError:
55 logger.exception('An HTTP error happened, markdown rendering failed')
56 log_args()
57 return '', {}, []
58
59 if response.status_code == 413:
60 return '', {}, [{'message': str(_('Texte trop volumineux.'))}]
61
62 if response.status_code != 200:
63 logger.error('The markdown server replied with status {} (expected 200)'.format(response.status_code))
64 log_args()
65 return '', {}, []
66
67 try:
68 content, metadata, messages = response.json()
69 logger.debug('Result %s, %s, %s', content, metadata, messages)
70 if messages:
71 logger.error('Markdown errors %s', json.dumps(messages))
72 content = content.strip()
73 if inline:
74 content = content.replace('</p>\n', '\n\n').replace('\n<p>', '\n')
75 return mark_safe(content), metadata, messages
76 except: # noqa
77 logger.exception('Unexpected exception raised')
78 log_args()
79 return '', {}, []
80
81
82 def render_markdown(md_input, *, on_error=None, **kwargs):
83 """Render a markdown string.
84
85 Returns a tuple ``(rendered_content, metadata)``, where
86 ``rendered_content`` is a string and ``metadata`` is a dict.
87
88 Handles errors gracefully by returning an user-friendly HTML
89 string which explains that the Markdown rendering has failed
90 (without any technical details).
91
92 """
93 content, metadata, messages = _render_markdown_once(md_input, **kwargs)
94 if messages and on_error:
95 on_error([m['message'] for m in messages])
96 if content is not None:
97 # Success!
98 return content, metadata, messages
99
100 # Oops, something went wrong
101
102 attempts = kwargs.get('attempts', 0)
103 inline = kwargs.get('inline', False) is True
104
105 if attempts < MAX_ATTEMPTS:
106 if not kwargs:
107 kwargs = dict()
108 return render_markdown(md_input, **dict(kwargs, attempts=attempts + 1))
109
110 logger.error('Max attempt count reached, giving up')
111 logger.error('md_input: {!r}'.format(md_input))
112 logger.error('kwargs: {!r}'.format(kwargs))
113
114 # FIXME: This cannot work with LaTeX.
115 if inline:
116 return mark_safe('<p>{}</p>'.format(json.dumps(messages))), metadata, []
117 else:
118 return mark_safe('<div class="error ico-after"><p>{}</p></div>'.format(json.dumps(messages))), metadata, []
119
120
121 @register.filter(name='epub_markdown', needs_autoescape=False)
122 def epub_markdown(md_input, image_directory):
123 return emarkdown(md_input, output_format='epub', images_download_dir=image_directory.absolute,
124 local_url_to_local_path=[settings.MEDIA_URL + 'galleries/[0-9]+', image_directory.relative])
125
126
127 @register.filter(needs_autoescape=False)
128 @stringfilter
129 def emarkdown(md_input, use_jsfiddle='', **kwargs):
130 """
131 :param str md_input: Markdown string.
132 :return: HTML string.
133 :rtype: str
134 """
135 disable_jsfiddle = (use_jsfiddle != 'js')
136
137 content, metadata, messages = render_markdown(
138 md_input,
139 on_error=lambda m: logger.error('Markdown errors %s', str(m)),
140 **dict(kwargs, disable_jsfiddle=disable_jsfiddle))
141
142 return content or ''
143
144
145 @register.filter(needs_autoescape=False)
146 @stringfilter
147 def emarkdown_preview(md_input, use_jsfiddle='', **kwargs):
148 """
149 Filter markdown string and render it to html.
150
151 :param str md_input: Markdown string.
152 :return: HTML string.
153 :rtype: str
154 """
155 disable_jsfiddle = (use_jsfiddle != 'js')
156
157 content, metadata, messages = render_markdown(
158 md_input,
159 **dict(kwargs, disable_jsfiddle=disable_jsfiddle))
160
161 if messages:
162 content = _('</div><div class="preview-error"><strong>Erreur du serveur Markdown:</strong>\n{}'
163 .format('<br>- '.join([m['message'] for m in messages])))
164 content = mark_safe(content)
165
166 return content
167
168
169 @register.filter(needs_autoescape=False)
170 @stringfilter
171 def emarkdown_inline(text):
172 """
173 Parses inline elements only and renders HTML. Mainly for member signatures.
174 Although they are inline elements, pings are disabled.
175
176 :param str text: Markdown string.
177 :return: HTML string.
178 :rtype: str
179 """
180 rendered = emarkdown(text, inline=True)
181 return rendered
182
183
184 def sub_hd(match, count):
185 """Replace header shifted."""
186 subt = match.group(1)
187 lvl = match.group('level')
188 header = match.group('header')
189 end = match.group(4)
190
191 new_content = subt + '#' * count + lvl + header + end
192
193 return new_content
194
195
196 def shift_heading(text, count):
197 """
198 Shift header in markdown document.
199
200 :param str text: Text to filter.
201 :param int count:
202 :return: Filtered text.
203 :rtype: str
204 """
205 text_by_code = re.split('(```|~~~)', text)
206 starting_code = None
207 for i, element in enumerate(text_by_code):
208 if element in ['```', '~~~'] and not starting_code:
209 starting_code = element
210 elif element == starting_code:
211 starting_code = None
212 elif starting_code is None:
213 text_by_code[i] = re.sub(r'(^|\n)(?P<level>#{1,4})(?P<header>.*?)#*(\n|$)',
214 lambda t: sub_hd(t, count), text_by_code[i])
215
216 return ''.join(text_by_code)
217
218
219 @register.filter('shift_heading_1')
220 def shift_heading_1(text):
221 return shift_heading(text, 1)
222
223
224 @register.filter('shift_heading_2')
225 def shift_heading_2(text):
226 return shift_heading(text, 2)
227
228
229 @register.filter('shift_heading_3')
230 def shift_heading_3(text):
231 return shift_heading(text, 3)
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/utils/templatetags/emarkdown.py b/zds/utils/templatetags/emarkdown.py
--- a/zds/utils/templatetags/emarkdown.py
+++ b/zds/utils/templatetags/emarkdown.py
@@ -178,7 +178,7 @@
:rtype: str
"""
rendered = emarkdown(text, inline=True)
- return rendered
+ return mark_safe(rendered.replace('<a href=', '<a rel="nofollow" href='))
def sub_hd(match, count):
| {"golden_diff": "diff --git a/zds/utils/templatetags/emarkdown.py b/zds/utils/templatetags/emarkdown.py\n--- a/zds/utils/templatetags/emarkdown.py\n+++ b/zds/utils/templatetags/emarkdown.py\n@@ -178,7 +178,7 @@\n :rtype: str\n \"\"\"\n rendered = emarkdown(text, inline=True)\n- return rendered\n+ return mark_safe(rendered.replace('<a href=', '<a rel=\"nofollow\" href='))\n \n \n def sub_hd(match, count):\n", "issue": "SEO et signature : <a rel=\"nofollow\" />\nDans la signature il faudrait voir si on peut facilement ajouter un attribut `rel=\"nofollow\"` pour pr\u00e9server notre SEO. https://github.com/zestedesavoir/zmarkdown/blob/1dded309a2670689a4a3353f9e38b80624c6df1a/packages/zmarkdown/server/handlers.js#L139\r\n\r\n> limitez les liens en signatures \u00e0 des no follow or lien interne.\r\nc\u2019est pas mal (:evil) de partager un lien, mais si A-312 r\u00e9pond 4 fois dans la m\u00eame page, il renvoie 4 fois du jus sur son compte twitter, 4 coding game, \u2026 ca a plusieurs effet n\u00e9gatifs\r\nSource: https://zestedesavoir.com/forums/sujet/12099/seo-et-spam/?page=1#p199005\r\n\r\n\n", "before_files": [{"content": "import re\nimport json\nimport logging\nfrom requests import post, HTTPError\n\nfrom django import template\nfrom django.conf import settings\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\n\nlogger = logging.getLogger(__name__)\nregister = template.Library()\n\"\"\"\nMarkdown related filters.\n\"\"\"\n\n# Constants\nMAX_ATTEMPTS = 3\nMD_PARSING_ERROR = _('Une erreur est survenue dans la g\u00e9n\u00e9ration de texte Markdown. Veuillez rapporter le bug.')\n\nFORMAT_ENDPOINTS = {\n 'html': '/html',\n 'texfile': '/latex-document',\n 'epub': '/epub',\n 'tex': '/latex',\n}\n\n\ndef _render_markdown_once(md_input, *, output_format='html', **kwargs):\n \"\"\"\n Returns None on error (error details are logged). No retry mechanism.\n \"\"\"\n def log_args():\n logger.error('md_input: {!r}'.format(md_input))\n logger.error('kwargs: {!r}'.format(kwargs))\n\n inline = kwargs.get('inline', False) is True\n\n if settings.ZDS_APP['zmd']['disable_pings'] is True:\n kwargs['disable_ping'] = True\n\n endpoint = FORMAT_ENDPOINTS[output_format]\n\n try:\n timeout = 10\n if output_format.startswith('tex'):\n # latex may be really long to generate but it is also restrained by server configuration\n timeout = 120\n response = post('{}{}'.format(settings.ZDS_APP['zmd']['server'], endpoint), json={\n 'opts': kwargs,\n 'md': str(md_input),\n }, timeout=timeout)\n except HTTPError:\n logger.exception('An HTTP error happened, markdown rendering failed')\n log_args()\n return '', {}, []\n\n if response.status_code == 413:\n return '', {}, [{'message': str(_('Texte trop volumineux.'))}]\n\n if response.status_code != 200:\n logger.error('The markdown server replied with status {} (expected 200)'.format(response.status_code))\n log_args()\n return '', {}, []\n\n try:\n content, metadata, messages = response.json()\n logger.debug('Result %s, %s, %s', content, metadata, messages)\n if messages:\n logger.error('Markdown errors %s', json.dumps(messages))\n content = content.strip()\n if inline:\n content = content.replace('</p>\\n', '\\n\\n').replace('\\n<p>', '\\n')\n return mark_safe(content), metadata, messages\n except: # noqa\n logger.exception('Unexpected exception raised')\n log_args()\n return '', {}, []\n\n\ndef render_markdown(md_input, *, on_error=None, **kwargs):\n \"\"\"Render a markdown string.\n\n Returns a tuple ``(rendered_content, metadata)``, where\n ``rendered_content`` is a string and ``metadata`` is a dict.\n\n Handles errors gracefully by returning an user-friendly HTML\n string which explains that the Markdown rendering has failed\n (without any technical details).\n\n \"\"\"\n content, metadata, messages = _render_markdown_once(md_input, **kwargs)\n if messages and on_error:\n on_error([m['message'] for m in messages])\n if content is not None:\n # Success!\n return content, metadata, messages\n\n # Oops, something went wrong\n\n attempts = kwargs.get('attempts', 0)\n inline = kwargs.get('inline', False) is True\n\n if attempts < MAX_ATTEMPTS:\n if not kwargs:\n kwargs = dict()\n return render_markdown(md_input, **dict(kwargs, attempts=attempts + 1))\n\n logger.error('Max attempt count reached, giving up')\n logger.error('md_input: {!r}'.format(md_input))\n logger.error('kwargs: {!r}'.format(kwargs))\n\n # FIXME: This cannot work with LaTeX.\n if inline:\n return mark_safe('<p>{}</p>'.format(json.dumps(messages))), metadata, []\n else:\n return mark_safe('<div class=\"error ico-after\"><p>{}</p></div>'.format(json.dumps(messages))), metadata, []\n\n\[email protected](name='epub_markdown', needs_autoescape=False)\ndef epub_markdown(md_input, image_directory):\n return emarkdown(md_input, output_format='epub', images_download_dir=image_directory.absolute,\n local_url_to_local_path=[settings.MEDIA_URL + 'galleries/[0-9]+', image_directory.relative])\n\n\[email protected](needs_autoescape=False)\n@stringfilter\ndef emarkdown(md_input, use_jsfiddle='', **kwargs):\n \"\"\"\n :param str md_input: Markdown string.\n :return: HTML string.\n :rtype: str\n \"\"\"\n disable_jsfiddle = (use_jsfiddle != 'js')\n\n content, metadata, messages = render_markdown(\n md_input,\n on_error=lambda m: logger.error('Markdown errors %s', str(m)),\n **dict(kwargs, disable_jsfiddle=disable_jsfiddle))\n\n return content or ''\n\n\[email protected](needs_autoescape=False)\n@stringfilter\ndef emarkdown_preview(md_input, use_jsfiddle='', **kwargs):\n \"\"\"\n Filter markdown string and render it to html.\n\n :param str md_input: Markdown string.\n :return: HTML string.\n :rtype: str\n \"\"\"\n disable_jsfiddle = (use_jsfiddle != 'js')\n\n content, metadata, messages = render_markdown(\n md_input,\n **dict(kwargs, disable_jsfiddle=disable_jsfiddle))\n\n if messages:\n content = _('</div><div class=\"preview-error\"><strong>Erreur du serveur Markdown:</strong>\\n{}'\n .format('<br>- '.join([m['message'] for m in messages])))\n content = mark_safe(content)\n\n return content\n\n\[email protected](needs_autoescape=False)\n@stringfilter\ndef emarkdown_inline(text):\n \"\"\"\n Parses inline elements only and renders HTML. Mainly for member signatures.\n Although they are inline elements, pings are disabled.\n\n :param str text: Markdown string.\n :return: HTML string.\n :rtype: str\n \"\"\"\n rendered = emarkdown(text, inline=True)\n return rendered\n\n\ndef sub_hd(match, count):\n \"\"\"Replace header shifted.\"\"\"\n subt = match.group(1)\n lvl = match.group('level')\n header = match.group('header')\n end = match.group(4)\n\n new_content = subt + '#' * count + lvl + header + end\n\n return new_content\n\n\ndef shift_heading(text, count):\n \"\"\"\n Shift header in markdown document.\n\n :param str text: Text to filter.\n :param int count:\n :return: Filtered text.\n :rtype: str\n \"\"\"\n text_by_code = re.split('(```|~~~)', text)\n starting_code = None\n for i, element in enumerate(text_by_code):\n if element in ['```', '~~~'] and not starting_code:\n starting_code = element\n elif element == starting_code:\n starting_code = None\n elif starting_code is None:\n text_by_code[i] = re.sub(r'(^|\\n)(?P<level>#{1,4})(?P<header>.*?)#*(\\n|$)',\n lambda t: sub_hd(t, count), text_by_code[i])\n\n return ''.join(text_by_code)\n\n\[email protected]('shift_heading_1')\ndef shift_heading_1(text):\n return shift_heading(text, 1)\n\n\[email protected]('shift_heading_2')\ndef shift_heading_2(text):\n return shift_heading(text, 2)\n\n\[email protected]('shift_heading_3')\ndef shift_heading_3(text):\n return shift_heading(text, 3)\n", "path": "zds/utils/templatetags/emarkdown.py"}], "after_files": [{"content": "import re\nimport json\nimport logging\nfrom requests import post, HTTPError\n\nfrom django import template\nfrom django.conf import settings\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\n\nlogger = logging.getLogger(__name__)\nregister = template.Library()\n\"\"\"\nMarkdown related filters.\n\"\"\"\n\n# Constants\nMAX_ATTEMPTS = 3\nMD_PARSING_ERROR = _('Une erreur est survenue dans la g\u00e9n\u00e9ration de texte Markdown. Veuillez rapporter le bug.')\n\nFORMAT_ENDPOINTS = {\n 'html': '/html',\n 'texfile': '/latex-document',\n 'epub': '/epub',\n 'tex': '/latex',\n}\n\n\ndef _render_markdown_once(md_input, *, output_format='html', **kwargs):\n \"\"\"\n Returns None on error (error details are logged). No retry mechanism.\n \"\"\"\n def log_args():\n logger.error('md_input: {!r}'.format(md_input))\n logger.error('kwargs: {!r}'.format(kwargs))\n\n inline = kwargs.get('inline', False) is True\n\n if settings.ZDS_APP['zmd']['disable_pings'] is True:\n kwargs['disable_ping'] = True\n\n endpoint = FORMAT_ENDPOINTS[output_format]\n\n try:\n timeout = 10\n if output_format.startswith('tex'):\n # latex may be really long to generate but it is also restrained by server configuration\n timeout = 120\n response = post('{}{}'.format(settings.ZDS_APP['zmd']['server'], endpoint), json={\n 'opts': kwargs,\n 'md': str(md_input),\n }, timeout=timeout)\n except HTTPError:\n logger.exception('An HTTP error happened, markdown rendering failed')\n log_args()\n return '', {}, []\n\n if response.status_code == 413:\n return '', {}, [{'message': str(_('Texte trop volumineux.'))}]\n\n if response.status_code != 200:\n logger.error('The markdown server replied with status {} (expected 200)'.format(response.status_code))\n log_args()\n return '', {}, []\n\n try:\n content, metadata, messages = response.json()\n logger.debug('Result %s, %s, %s', content, metadata, messages)\n if messages:\n logger.error('Markdown errors %s', json.dumps(messages))\n content = content.strip()\n if inline:\n content = content.replace('</p>\\n', '\\n\\n').replace('\\n<p>', '\\n')\n return mark_safe(content), metadata, messages\n except: # noqa\n logger.exception('Unexpected exception raised')\n log_args()\n return '', {}, []\n\n\ndef render_markdown(md_input, *, on_error=None, **kwargs):\n \"\"\"Render a markdown string.\n\n Returns a tuple ``(rendered_content, metadata)``, where\n ``rendered_content`` is a string and ``metadata`` is a dict.\n\n Handles errors gracefully by returning an user-friendly HTML\n string which explains that the Markdown rendering has failed\n (without any technical details).\n\n \"\"\"\n content, metadata, messages = _render_markdown_once(md_input, **kwargs)\n if messages and on_error:\n on_error([m['message'] for m in messages])\n if content is not None:\n # Success!\n return content, metadata, messages\n\n # Oops, something went wrong\n\n attempts = kwargs.get('attempts', 0)\n inline = kwargs.get('inline', False) is True\n\n if attempts < MAX_ATTEMPTS:\n if not kwargs:\n kwargs = dict()\n return render_markdown(md_input, **dict(kwargs, attempts=attempts + 1))\n\n logger.error('Max attempt count reached, giving up')\n logger.error('md_input: {!r}'.format(md_input))\n logger.error('kwargs: {!r}'.format(kwargs))\n\n # FIXME: This cannot work with LaTeX.\n if inline:\n return mark_safe('<p>{}</p>'.format(json.dumps(messages))), metadata, []\n else:\n return mark_safe('<div class=\"error ico-after\"><p>{}</p></div>'.format(json.dumps(messages))), metadata, []\n\n\[email protected](name='epub_markdown', needs_autoescape=False)\ndef epub_markdown(md_input, image_directory):\n return emarkdown(md_input, output_format='epub', images_download_dir=image_directory.absolute,\n local_url_to_local_path=[settings.MEDIA_URL + 'galleries/[0-9]+', image_directory.relative])\n\n\[email protected](needs_autoescape=False)\n@stringfilter\ndef emarkdown(md_input, use_jsfiddle='', **kwargs):\n \"\"\"\n :param str md_input: Markdown string.\n :return: HTML string.\n :rtype: str\n \"\"\"\n disable_jsfiddle = (use_jsfiddle != 'js')\n\n content, metadata, messages = render_markdown(\n md_input,\n on_error=lambda m: logger.error('Markdown errors %s', str(m)),\n **dict(kwargs, disable_jsfiddle=disable_jsfiddle))\n\n return content or ''\n\n\[email protected](needs_autoescape=False)\n@stringfilter\ndef emarkdown_preview(md_input, use_jsfiddle='', **kwargs):\n \"\"\"\n Filter markdown string and render it to html.\n\n :param str md_input: Markdown string.\n :return: HTML string.\n :rtype: str\n \"\"\"\n disable_jsfiddle = (use_jsfiddle != 'js')\n\n content, metadata, messages = render_markdown(\n md_input,\n **dict(kwargs, disable_jsfiddle=disable_jsfiddle))\n\n if messages:\n content = _('</div><div class=\"preview-error\"><strong>Erreur du serveur Markdown:</strong>\\n{}'\n .format('<br>- '.join([m['message'] for m in messages])))\n content = mark_safe(content)\n\n return content\n\n\[email protected](needs_autoescape=False)\n@stringfilter\ndef emarkdown_inline(text):\n \"\"\"\n Parses inline elements only and renders HTML. Mainly for member signatures.\n Although they are inline elements, pings are disabled.\n\n :param str text: Markdown string.\n :return: HTML string.\n :rtype: str\n \"\"\"\n rendered = emarkdown(text, inline=True)\n return mark_safe(rendered.replace('<a href=', '<a rel=\"nofollow\" href='))\n\n\ndef sub_hd(match, count):\n \"\"\"Replace header shifted.\"\"\"\n subt = match.group(1)\n lvl = match.group('level')\n header = match.group('header')\n end = match.group(4)\n\n new_content = subt + '#' * count + lvl + header + end\n\n return new_content\n\n\ndef shift_heading(text, count):\n \"\"\"\n Shift header in markdown document.\n\n :param str text: Text to filter.\n :param int count:\n :return: Filtered text.\n :rtype: str\n \"\"\"\n text_by_code = re.split('(```|~~~)', text)\n starting_code = None\n for i, element in enumerate(text_by_code):\n if element in ['```', '~~~'] and not starting_code:\n starting_code = element\n elif element == starting_code:\n starting_code = None\n elif starting_code is None:\n text_by_code[i] = re.sub(r'(^|\\n)(?P<level>#{1,4})(?P<header>.*?)#*(\\n|$)',\n lambda t: sub_hd(t, count), text_by_code[i])\n\n return ''.join(text_by_code)\n\n\[email protected]('shift_heading_1')\ndef shift_heading_1(text):\n return shift_heading(text, 1)\n\n\[email protected]('shift_heading_2')\ndef shift_heading_2(text):\n return shift_heading(text, 2)\n\n\[email protected]('shift_heading_3')\ndef shift_heading_3(text):\n return shift_heading(text, 3)\n", "path": "zds/utils/templatetags/emarkdown.py"}]} | 2,779 | 127 |
gh_patches_debug_11902 | rasdani/github-patches | git_diff | ansible-collections__community.general-2204 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't create github repo
### Summary
When I try to create a github repo with community general 2.4.0 github_repo module, I got an assertion error related to port.
### Issue Type
Bug Report
### Component Name
github_repo
### Ansible Version
ansible 2.10.7
community.general 2.4.0
PyGithub 1.54.1
### Configuration
_No response_
### OS / Environment
MacOS Mojave 10.14.6
### Steps to Reproduce
This is the task I've used
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: Create a Github repository
github_repo:
access_token: "{{ lookup('env', 'GITHUB_ACESS_TOKEN') }}"
organization: xxx
name: xxx
description: "xxx"
private: yes
```
### Expected Results
it should create a new Github Repo without error
### Actual Results
```console (paste below)
The full traceback is:
File "/var/folders/kx/5h602b9s6fq3r5h7lxpxy8sr0000gn/T/ansible_github_repo_payload_8ng_7p6h/ansible_github_repo_payload.zip/ansible_collections/community/general/plugins/modules/github_repo.py", line 233, in main
File "/var/folders/kx/5h602b9s6fq3r5h7lxpxy8sr0000gn/T/ansible_github_repo_payload_8ng_7p6h/ansible_github_repo_payload.zip/ansible_collections/community/general/plugins/modules/github_repo.py", line 197, in run_module
File "/var/folders/kx/5h602b9s6fq3r5h7lxpxy8sr0000gn/T/ansible_github_repo_payload_8ng_7p6h/ansible_github_repo_payload.zip/ansible_collections/community/general/plugins/modules/github_repo.py", line 144, in create_repo
File "/Users/kakarukeys/.local/share/virtualenvs/infra_setup-AVEq_0GX/lib/python3.9/site-packages/github/Organization.py", line 578, in create_repo
headers, data = self._requester.requestJsonAndCheck(
File "/Users/kakarukeys/.local/share/virtualenvs/infra_setup-AVEq_0GX/lib/python3.9/site-packages/github/Requester.py", line 316, in requestJsonAndCheck
*self.requestJson(
File "/Users/kakarukeys/.local/share/virtualenvs/infra_setup-AVEq_0GX/lib/python3.9/site-packages/github/Requester.py", line 408, in requestJson
return self.__requestEncode(cnx, verb, url, parameters, headers, input, encode)
File "/Users/kakarukeys/.local/share/virtualenvs/infra_setup-AVEq_0GX/lib/python3.9/site-packages/github/Requester.py", line 475, in __requestEncode
url = self.__makeAbsoluteUrl(url)
File "/Users/kakarukeys/.local/share/virtualenvs/infra_setup-AVEq_0GX/lib/python3.9/site-packages/github/Requester.py", line 557, in __makeAbsoluteUrl
assert o.port == self.__port
fatal: [localhost]: FAILED! => {
"changed": false,
"invocation": {
"module_args": {
"access_token": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
"description": "xxx",
"name": "xxx",
"organization": "xxx",
"password": null,
"private": true,
"state": "present",
"username": null
}
},
"msg": "Unexpected error. AssertionError()"
}
```
### Code of Conduct
I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/source_control/github/github_repo.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2021, Álvaro Torres Cogollo
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8 __metaclass__ = type
9
10 DOCUMENTATION = '''
11 ---
12 module: github_repo
13 short_description: Manage your repositories on Github
14 version_added: 2.2.0
15 description:
16 - Manages Github repositories using PyGithub library.
17 - Authentication can be done with I(access_token) or with I(username) and I(password).
18 options:
19 username:
20 description:
21 - Username used for authentication.
22 - This is only needed when not using I(access_token).
23 type: str
24 required: false
25 password:
26 description:
27 - Password used for authentication.
28 - This is only needed when not using I(access_token).
29 type: str
30 required: false
31 access_token:
32 description:
33 - Token parameter for authentication.
34 - This is only needed when not using I(username) and I(password).
35 type: str
36 required: false
37 name:
38 description:
39 - Repository name.
40 type: str
41 required: true
42 description:
43 description:
44 - Description for the repository.
45 - This is only used when I(state) is C(present).
46 type: str
47 default: ''
48 required: false
49 private:
50 description:
51 - Whether the new repository should be private or not.
52 - This is only used when I(state) is C(present).
53 type: bool
54 default: no
55 required: false
56 state:
57 description:
58 - Whether the repository should exist or not.
59 type: str
60 default: present
61 choices: [ absent, present ]
62 required: false
63 organization:
64 description:
65 - Organization for the repository.
66 - When I(state) is C(present), the repository will be created in the current user profile.
67 type: str
68 required: false
69 requirements:
70 - PyGithub>=1.54
71 notes:
72 - For Python 3, PyGithub>=1.54 should be used.
73 - "For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020)."
74 - "For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019)."
75 - Supports C(check_mode).
76 author:
77 - Álvaro Torres Cogollo (@atorrescogollo)
78 '''
79
80 EXAMPLES = '''
81 - name: Create a Github repository
82 community.general.github_repo:
83 access_token: mytoken
84 organization: MyOrganization
85 name: myrepo
86 description: "Just for fun"
87 private: yes
88 state: present
89 register: result
90
91 - name: Delete the repository
92 community.general.github_repo:
93 username: octocat
94 password: password
95 organization: MyOrganization
96 name: myrepo
97 state: absent
98 register: result
99 '''
100
101 RETURN = '''
102 repo:
103 description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository).
104 returned: success and I(state) is C(present)
105 type: dict
106 '''
107
108 import traceback
109 from ansible.module_utils.basic import AnsibleModule, missing_required_lib
110 import sys
111
112 GITHUB_IMP_ERR = None
113 try:
114 from github import Github, GithubException
115 from github.GithubException import UnknownObjectException
116 HAS_GITHUB_PACKAGE = True
117 except Exception:
118 GITHUB_IMP_ERR = traceback.format_exc()
119 HAS_GITHUB_PACKAGE = False
120
121
122 def authenticate(username=None, password=None, access_token=None):
123 if access_token:
124 return Github(base_url="https://api.github.com:443", login_or_token=access_token)
125 else:
126 return Github(base_url="https://api.github.com:443", login_or_token=username, password=password)
127
128
129 def create_repo(gh, name, organization=None, private=False, description='', check_mode=False):
130 result = dict(
131 changed=False,
132 repo=dict())
133 if organization:
134 target = gh.get_organization(organization)
135 else:
136 target = gh.get_user()
137
138 repo = None
139 try:
140 repo = target.get_repo(name=name)
141 result['repo'] = repo.raw_data
142 except UnknownObjectException:
143 if not check_mode:
144 repo = target.create_repo(
145 name=name, private=private, description=description)
146 result['repo'] = repo.raw_data
147
148 result['changed'] = True
149
150 changes = {}
151 if repo is None or repo.raw_data['private'] != private:
152 changes['private'] = private
153 if repo is None or repo.raw_data['description'] != description:
154 changes['description'] = description
155
156 if changes:
157 if not check_mode:
158 repo.edit(**changes)
159
160 result['repo'].update({
161 'private': repo._private.value if not check_mode else private,
162 'description': repo._description.value if not check_mode else description,
163 })
164 result['changed'] = True
165
166 return result
167
168
169 def delete_repo(gh, name, organization=None, check_mode=False):
170 result = dict(changed=False)
171 if organization:
172 target = gh.get_organization(organization)
173 else:
174 target = gh.get_user()
175 try:
176 repo = target.get_repo(name=name)
177 if not check_mode:
178 repo.delete()
179 result['changed'] = True
180 except UnknownObjectException:
181 pass
182
183 return result
184
185
186 def run_module(params, check_mode=False):
187 gh = authenticate(
188 username=params['username'], password=params['password'], access_token=params['access_token'])
189 if params['state'] == "absent":
190 return delete_repo(
191 gh=gh,
192 name=params['name'],
193 organization=params['organization'],
194 check_mode=check_mode
195 )
196 else:
197 return create_repo(
198 gh=gh,
199 name=params['name'],
200 organization=params['organization'],
201 private=params['private'],
202 description=params['description'],
203 check_mode=check_mode
204 )
205
206
207 def main():
208 module_args = dict(
209 username=dict(type='str', required=False, default=None),
210 password=dict(type='str', required=False, default=None, no_log=True),
211 access_token=dict(type='str', required=False,
212 default=None, no_log=True),
213 name=dict(type='str', required=True),
214 state=dict(type='str', required=False, default="present",
215 choices=["present", "absent"]),
216 organization=dict(type='str', required=False, default=None),
217 private=dict(type='bool', required=False, default=False),
218 description=dict(type='str', required=False, default=''),
219 )
220 module = AnsibleModule(
221 argument_spec=module_args,
222 supports_check_mode=True,
223 required_together=[('username', 'password')],
224 required_one_of=[('username', 'access_token')],
225 mutually_exclusive=[('username', 'access_token')]
226 )
227
228 if not HAS_GITHUB_PACKAGE:
229 module.fail_json(msg=missing_required_lib(
230 "PyGithub"), exception=GITHUB_IMP_ERR)
231
232 try:
233 result = run_module(module.params, module.check_mode)
234 module.exit_json(**result)
235 except GithubException as e:
236 module.fail_json(msg="Github error. {0}".format(repr(e)))
237 except Exception as e:
238 module.fail_json(msg="Unexpected error. {0}".format(repr(e)))
239
240
241 if __name__ == '__main__':
242 main()
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/source_control/github/github_repo.py b/plugins/modules/source_control/github/github_repo.py
--- a/plugins/modules/source_control/github/github_repo.py
+++ b/plugins/modules/source_control/github/github_repo.py
@@ -121,9 +121,9 @@
def authenticate(username=None, password=None, access_token=None):
if access_token:
- return Github(base_url="https://api.github.com:443", login_or_token=access_token)
+ return Github(base_url="https://api.github.com", login_or_token=access_token)
else:
- return Github(base_url="https://api.github.com:443", login_or_token=username, password=password)
+ return Github(base_url="https://api.github.com", login_or_token=username, password=password)
def create_repo(gh, name, organization=None, private=False, description='', check_mode=False):
| {"golden_diff": "diff --git a/plugins/modules/source_control/github/github_repo.py b/plugins/modules/source_control/github/github_repo.py\n--- a/plugins/modules/source_control/github/github_repo.py\n+++ b/plugins/modules/source_control/github/github_repo.py\n@@ -121,9 +121,9 @@\n \n def authenticate(username=None, password=None, access_token=None):\n if access_token:\n- return Github(base_url=\"https://api.github.com:443\", login_or_token=access_token)\n+ return Github(base_url=\"https://api.github.com\", login_or_token=access_token)\n else:\n- return Github(base_url=\"https://api.github.com:443\", login_or_token=username, password=password)\n+ return Github(base_url=\"https://api.github.com\", login_or_token=username, password=password)\n \n \n def create_repo(gh, name, organization=None, private=False, description='', check_mode=False):\n", "issue": "Can't create github repo\n### Summary\r\n\r\nWhen I try to create a github repo with community general 2.4.0 github_repo module, I got an assertion error related to port.\r\n\r\n### Issue Type\r\n\r\nBug Report\r\n\r\n### Component Name\r\n\r\ngithub_repo\r\n\r\n### Ansible Version\r\n\r\nansible 2.10.7\r\ncommunity.general 2.4.0\r\nPyGithub 1.54.1\r\n\r\n### Configuration\r\n\r\n_No response_\r\n\r\n### OS / Environment\r\n\r\nMacOS Mojave 10.14.6\r\n\r\n### Steps to Reproduce\r\n\r\nThis is the task I've used\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n- name: Create a Github repository\r\n github_repo:\r\n access_token: \"{{ lookup('env', 'GITHUB_ACESS_TOKEN') }}\"\r\n organization: xxx\r\n name: xxx\r\n description: \"xxx\"\r\n private: yes\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nit should create a new Github Repo without error\r\n\r\n### Actual Results\r\n\r\n```console (paste below)\r\nThe full traceback is:\r\n File \"/var/folders/kx/5h602b9s6fq3r5h7lxpxy8sr0000gn/T/ansible_github_repo_payload_8ng_7p6h/ansible_github_repo_payload.zip/ansible_collections/community/general/plugins/modules/github_repo.py\", line 233, in main\r\n File \"/var/folders/kx/5h602b9s6fq3r5h7lxpxy8sr0000gn/T/ansible_github_repo_payload_8ng_7p6h/ansible_github_repo_payload.zip/ansible_collections/community/general/plugins/modules/github_repo.py\", line 197, in run_module\r\n File \"/var/folders/kx/5h602b9s6fq3r5h7lxpxy8sr0000gn/T/ansible_github_repo_payload_8ng_7p6h/ansible_github_repo_payload.zip/ansible_collections/community/general/plugins/modules/github_repo.py\", line 144, in create_repo\r\n File \"/Users/kakarukeys/.local/share/virtualenvs/infra_setup-AVEq_0GX/lib/python3.9/site-packages/github/Organization.py\", line 578, in create_repo\r\n headers, data = self._requester.requestJsonAndCheck(\r\n File \"/Users/kakarukeys/.local/share/virtualenvs/infra_setup-AVEq_0GX/lib/python3.9/site-packages/github/Requester.py\", line 316, in requestJsonAndCheck\r\n *self.requestJson(\r\n File \"/Users/kakarukeys/.local/share/virtualenvs/infra_setup-AVEq_0GX/lib/python3.9/site-packages/github/Requester.py\", line 408, in requestJson\r\n return self.__requestEncode(cnx, verb, url, parameters, headers, input, encode)\r\n File \"/Users/kakarukeys/.local/share/virtualenvs/infra_setup-AVEq_0GX/lib/python3.9/site-packages/github/Requester.py\", line 475, in __requestEncode\r\n url = self.__makeAbsoluteUrl(url)\r\n File \"/Users/kakarukeys/.local/share/virtualenvs/infra_setup-AVEq_0GX/lib/python3.9/site-packages/github/Requester.py\", line 557, in __makeAbsoluteUrl\r\n assert o.port == self.__port\r\nfatal: [localhost]: FAILED! => {\r\n \"changed\": false,\r\n \"invocation\": {\r\n \"module_args\": {\r\n \"access_token\": \"VALUE_SPECIFIED_IN_NO_LOG_PARAMETER\",\r\n \"description\": \"xxx\",\r\n \"name\": \"xxx\",\r\n \"organization\": \"xxx\",\r\n \"password\": null,\r\n \"private\": true,\r\n \"state\": \"present\",\r\n \"username\": null\r\n }\r\n },\r\n \"msg\": \"Unexpected error. AssertionError()\"\r\n}\r\n```\r\n\r\n\r\n### Code of Conduct\r\n\r\nI agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2021, \u00c1lvaro Torres Cogollo\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\nmodule: github_repo\nshort_description: Manage your repositories on Github\nversion_added: 2.2.0\ndescription:\n- Manages Github repositories using PyGithub library.\n- Authentication can be done with I(access_token) or with I(username) and I(password).\noptions:\n username:\n description:\n - Username used for authentication.\n - This is only needed when not using I(access_token).\n type: str\n required: false\n password:\n description:\n - Password used for authentication.\n - This is only needed when not using I(access_token).\n type: str\n required: false\n access_token:\n description:\n - Token parameter for authentication.\n - This is only needed when not using I(username) and I(password).\n type: str\n required: false\n name:\n description:\n - Repository name.\n type: str\n required: true\n description:\n description:\n - Description for the repository.\n - This is only used when I(state) is C(present).\n type: str\n default: ''\n required: false\n private:\n description:\n - Whether the new repository should be private or not.\n - This is only used when I(state) is C(present).\n type: bool\n default: no\n required: false\n state:\n description:\n - Whether the repository should exist or not.\n type: str\n default: present\n choices: [ absent, present ]\n required: false\n organization:\n description:\n - Organization for the repository.\n - When I(state) is C(present), the repository will be created in the current user profile.\n type: str\n required: false\nrequirements:\n- PyGithub>=1.54\nnotes:\n- For Python 3, PyGithub>=1.54 should be used.\n- \"For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020).\"\n- \"For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019).\"\n- Supports C(check_mode).\nauthor:\n- \u00c1lvaro Torres Cogollo (@atorrescogollo)\n'''\n\nEXAMPLES = '''\n- name: Create a Github repository\n community.general.github_repo:\n access_token: mytoken\n organization: MyOrganization\n name: myrepo\n description: \"Just for fun\"\n private: yes\n state: present\n register: result\n\n- name: Delete the repository\n community.general.github_repo:\n username: octocat\n password: password\n organization: MyOrganization\n name: myrepo\n state: absent\n register: result\n'''\n\nRETURN = '''\nrepo:\n description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository).\n returned: success and I(state) is C(present)\n type: dict\n'''\n\nimport traceback\nfrom ansible.module_utils.basic import AnsibleModule, missing_required_lib\nimport sys\n\nGITHUB_IMP_ERR = None\ntry:\n from github import Github, GithubException\n from github.GithubException import UnknownObjectException\n HAS_GITHUB_PACKAGE = True\nexcept Exception:\n GITHUB_IMP_ERR = traceback.format_exc()\n HAS_GITHUB_PACKAGE = False\n\n\ndef authenticate(username=None, password=None, access_token=None):\n if access_token:\n return Github(base_url=\"https://api.github.com:443\", login_or_token=access_token)\n else:\n return Github(base_url=\"https://api.github.com:443\", login_or_token=username, password=password)\n\n\ndef create_repo(gh, name, organization=None, private=False, description='', check_mode=False):\n result = dict(\n changed=False,\n repo=dict())\n if organization:\n target = gh.get_organization(organization)\n else:\n target = gh.get_user()\n\n repo = None\n try:\n repo = target.get_repo(name=name)\n result['repo'] = repo.raw_data\n except UnknownObjectException:\n if not check_mode:\n repo = target.create_repo(\n name=name, private=private, description=description)\n result['repo'] = repo.raw_data\n\n result['changed'] = True\n\n changes = {}\n if repo is None or repo.raw_data['private'] != private:\n changes['private'] = private\n if repo is None or repo.raw_data['description'] != description:\n changes['description'] = description\n\n if changes:\n if not check_mode:\n repo.edit(**changes)\n\n result['repo'].update({\n 'private': repo._private.value if not check_mode else private,\n 'description': repo._description.value if not check_mode else description,\n })\n result['changed'] = True\n\n return result\n\n\ndef delete_repo(gh, name, organization=None, check_mode=False):\n result = dict(changed=False)\n if organization:\n target = gh.get_organization(organization)\n else:\n target = gh.get_user()\n try:\n repo = target.get_repo(name=name)\n if not check_mode:\n repo.delete()\n result['changed'] = True\n except UnknownObjectException:\n pass\n\n return result\n\n\ndef run_module(params, check_mode=False):\n gh = authenticate(\n username=params['username'], password=params['password'], access_token=params['access_token'])\n if params['state'] == \"absent\":\n return delete_repo(\n gh=gh,\n name=params['name'],\n organization=params['organization'],\n check_mode=check_mode\n )\n else:\n return create_repo(\n gh=gh,\n name=params['name'],\n organization=params['organization'],\n private=params['private'],\n description=params['description'],\n check_mode=check_mode\n )\n\n\ndef main():\n module_args = dict(\n username=dict(type='str', required=False, default=None),\n password=dict(type='str', required=False, default=None, no_log=True),\n access_token=dict(type='str', required=False,\n default=None, no_log=True),\n name=dict(type='str', required=True),\n state=dict(type='str', required=False, default=\"present\",\n choices=[\"present\", \"absent\"]),\n organization=dict(type='str', required=False, default=None),\n private=dict(type='bool', required=False, default=False),\n description=dict(type='str', required=False, default=''),\n )\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True,\n required_together=[('username', 'password')],\n required_one_of=[('username', 'access_token')],\n mutually_exclusive=[('username', 'access_token')]\n )\n\n if not HAS_GITHUB_PACKAGE:\n module.fail_json(msg=missing_required_lib(\n \"PyGithub\"), exception=GITHUB_IMP_ERR)\n\n try:\n result = run_module(module.params, module.check_mode)\n module.exit_json(**result)\n except GithubException as e:\n module.fail_json(msg=\"Github error. {0}\".format(repr(e)))\n except Exception as e:\n module.fail_json(msg=\"Unexpected error. {0}\".format(repr(e)))\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/source_control/github/github_repo.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2021, \u00c1lvaro Torres Cogollo\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\nmodule: github_repo\nshort_description: Manage your repositories on Github\nversion_added: 2.2.0\ndescription:\n- Manages Github repositories using PyGithub library.\n- Authentication can be done with I(access_token) or with I(username) and I(password).\noptions:\n username:\n description:\n - Username used for authentication.\n - This is only needed when not using I(access_token).\n type: str\n required: false\n password:\n description:\n - Password used for authentication.\n - This is only needed when not using I(access_token).\n type: str\n required: false\n access_token:\n description:\n - Token parameter for authentication.\n - This is only needed when not using I(username) and I(password).\n type: str\n required: false\n name:\n description:\n - Repository name.\n type: str\n required: true\n description:\n description:\n - Description for the repository.\n - This is only used when I(state) is C(present).\n type: str\n default: ''\n required: false\n private:\n description:\n - Whether the new repository should be private or not.\n - This is only used when I(state) is C(present).\n type: bool\n default: no\n required: false\n state:\n description:\n - Whether the repository should exist or not.\n type: str\n default: present\n choices: [ absent, present ]\n required: false\n organization:\n description:\n - Organization for the repository.\n - When I(state) is C(present), the repository will be created in the current user profile.\n type: str\n required: false\nrequirements:\n- PyGithub>=1.54\nnotes:\n- For Python 3, PyGithub>=1.54 should be used.\n- \"For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020).\"\n- \"For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019).\"\n- Supports C(check_mode).\nauthor:\n- \u00c1lvaro Torres Cogollo (@atorrescogollo)\n'''\n\nEXAMPLES = '''\n- name: Create a Github repository\n community.general.github_repo:\n access_token: mytoken\n organization: MyOrganization\n name: myrepo\n description: \"Just for fun\"\n private: yes\n state: present\n register: result\n\n- name: Delete the repository\n community.general.github_repo:\n username: octocat\n password: password\n organization: MyOrganization\n name: myrepo\n state: absent\n register: result\n'''\n\nRETURN = '''\nrepo:\n description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository).\n returned: success and I(state) is C(present)\n type: dict\n'''\n\nimport traceback\nfrom ansible.module_utils.basic import AnsibleModule, missing_required_lib\nimport sys\n\nGITHUB_IMP_ERR = None\ntry:\n from github import Github, GithubException\n from github.GithubException import UnknownObjectException\n HAS_GITHUB_PACKAGE = True\nexcept Exception:\n GITHUB_IMP_ERR = traceback.format_exc()\n HAS_GITHUB_PACKAGE = False\n\n\ndef authenticate(username=None, password=None, access_token=None):\n if access_token:\n return Github(base_url=\"https://api.github.com\", login_or_token=access_token)\n else:\n return Github(base_url=\"https://api.github.com\", login_or_token=username, password=password)\n\n\ndef create_repo(gh, name, organization=None, private=False, description='', check_mode=False):\n result = dict(\n changed=False,\n repo=dict())\n if organization:\n target = gh.get_organization(organization)\n else:\n target = gh.get_user()\n\n repo = None\n try:\n repo = target.get_repo(name=name)\n result['repo'] = repo.raw_data\n except UnknownObjectException:\n if not check_mode:\n repo = target.create_repo(\n name=name, private=private, description=description)\n result['repo'] = repo.raw_data\n\n result['changed'] = True\n\n changes = {}\n if repo is None or repo.raw_data['private'] != private:\n changes['private'] = private\n if repo is None or repo.raw_data['description'] != description:\n changes['description'] = description\n\n if changes:\n if not check_mode:\n repo.edit(**changes)\n\n result['repo'].update({\n 'private': repo._private.value if not check_mode else private,\n 'description': repo._description.value if not check_mode else description,\n })\n result['changed'] = True\n\n return result\n\n\ndef delete_repo(gh, name, organization=None, check_mode=False):\n result = dict(changed=False)\n if organization:\n target = gh.get_organization(organization)\n else:\n target = gh.get_user()\n try:\n repo = target.get_repo(name=name)\n if not check_mode:\n repo.delete()\n result['changed'] = True\n except UnknownObjectException:\n pass\n\n return result\n\n\ndef run_module(params, check_mode=False):\n gh = authenticate(\n username=params['username'], password=params['password'], access_token=params['access_token'])\n if params['state'] == \"absent\":\n return delete_repo(\n gh=gh,\n name=params['name'],\n organization=params['organization'],\n check_mode=check_mode\n )\n else:\n return create_repo(\n gh=gh,\n name=params['name'],\n organization=params['organization'],\n private=params['private'],\n description=params['description'],\n check_mode=check_mode\n )\n\n\ndef main():\n module_args = dict(\n username=dict(type='str', required=False, default=None),\n password=dict(type='str', required=False, default=None, no_log=True),\n access_token=dict(type='str', required=False,\n default=None, no_log=True),\n name=dict(type='str', required=True),\n state=dict(type='str', required=False, default=\"present\",\n choices=[\"present\", \"absent\"]),\n organization=dict(type='str', required=False, default=None),\n private=dict(type='bool', required=False, default=False),\n description=dict(type='str', required=False, default=''),\n )\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True,\n required_together=[('username', 'password')],\n required_one_of=[('username', 'access_token')],\n mutually_exclusive=[('username', 'access_token')]\n )\n\n if not HAS_GITHUB_PACKAGE:\n module.fail_json(msg=missing_required_lib(\n \"PyGithub\"), exception=GITHUB_IMP_ERR)\n\n try:\n result = run_module(module.params, module.check_mode)\n module.exit_json(**result)\n except GithubException as e:\n module.fail_json(msg=\"Github error. {0}\".format(repr(e)))\n except Exception as e:\n module.fail_json(msg=\"Unexpected error. {0}\".format(repr(e)))\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/source_control/github/github_repo.py"}]} | 3,466 | 190 |
gh_patches_debug_4192 | rasdani/github-patches | git_diff | OpenMined__PySyft-2276 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dim does not work with multipointer tensors
**Describe the bug**
Calling dim on a multipointer tensor returns a multipointer tensor where the values of the children are all ints. The return signature should be an int
**To Reproduce**
create a multipointer tensor and call .dim()
**Expected behavior**
the value returned should be an int
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `syft/frameworks/torch/tensors/interpreters/multi_pointer.py`
Content:
```
1 import torch
2 from typing import List
3 from typing import Union
4
5 import syft as sy
6 from syft.frameworks.torch.tensors.interpreters.abstract import AbstractTensor
7 from syft.frameworks.torch.tensors.interpreters import AdditiveSharingTensor
8 from syft.workers import BaseWorker
9 from syft.frameworks.torch.overload_torch import overloaded
10
11 from syft.workers import AbstractWorker
12
13
14 class MultiPointerTensor(AbstractTensor):
15 ""
16
17 def __init__(
18 self,
19 location: BaseWorker = None,
20 id_at_location: Union[str, int] = None,
21 register: bool = False,
22 owner: BaseWorker = None,
23 id: Union[str, int] = None,
24 garbage_collect_data: bool = True,
25 point_to_attr: str = None,
26 tags: List[str] = None,
27 description: str = None,
28 children: List[AbstractTensor] = [],
29 ):
30
31 super().__init__(tags, description)
32
33 self.location = location
34 self.id_at_location = id_at_location
35 self.owner = owner
36 self.id = id
37 self.garbage_collect_data = garbage_collect_data
38 self.point_to_attr = point_to_attr
39
40 self.child = {}
41 for c in children:
42 assert c.shape == children[0].shape
43 self.child[c.location.id] = c
44
45 def __str__(self):
46 type_name = type(self).__name__
47 out = f"[" f"{type_name}]"
48 for v in self.child.values():
49 out += "\n\t-> " + str(v)
50 return out
51
52 def __eq__(self, other):
53 return torch.eq(self, other)
54
55 def __add__(self, other):
56 """
57 Adding a MultiPointer (MPT) and an AdditiveShared Tensor (AST) should return an
58 AdditiveShared Tensor, so if we have this configuration, we permute self and
59 other to use the fact that other.__add__(...) return an object of type other
60
61 Else, we just redirect to .add which works well
62 """
63 if isinstance(other, AdditiveSharingTensor):
64 return other.__add__(self)
65 else:
66 return self.add(other)
67
68 def __mul__(self, other):
69 """
70 See __add__ for details but, MPT * AST should return AST
71 """
72 if isinstance(other, AdditiveSharingTensor):
73 return other.__mul__(self)
74 else:
75 return self.mul(other)
76
77 @property
78 def shape(self) -> torch.Size:
79 """This method returns the shape of the data being pointed to.
80 This shape information SHOULD be cached on self._shape, but
81 occasionally this information may not be present. If this is the
82 case, then it requests the shape information from the remote object
83 directly (which is inefficient and should be avoided)."""
84
85 return list(self.child.values())[0].shape
86
87 def get(self, sum_results: bool = False) -> torch.Tensor:
88
89 results = list()
90 for v in self.child.values():
91 results.append(v.get())
92
93 if sum_results:
94 return sum(results)
95
96 return results
97
98 def virtual_get(self, sum_results: bool = False):
99 """Get the value of the tensor without calling get - Only for VirtualWorkers"""
100
101 results = list()
102 for v in self.child.values():
103 value = v.location._objects[v.id_at_location]
104 results.append(value)
105
106 if sum_results:
107 return sum(results)
108
109 return results
110
111 @staticmethod
112 def dispatch(args, worker):
113 """
114 utility function for handle_func_command which help to select
115 shares (seen as elements of dict) in an argument set. It could
116 perhaps be put elsewhere
117
118 Args:
119 args: arguments to give to a functions
120 worker: owner of the shares to select
121
122 Return:
123 args where the MultiPointerTensor are replaced by
124 the appropriate share
125 """
126 return map(lambda x: x[worker] if isinstance(x, dict) else x, args)
127
128 @classmethod
129 def handle_func_command(cls, command):
130 """
131 Receive an instruction for a function to be applied on a Syft Tensor,
132 Replace in the args all the LogTensors with
133 their child attribute, forward the command instruction to the
134 handle_function_command of the type of the child attributes, get the
135 response and replace a Syft Tensor on top of all tensors found in
136 the response.
137
138 Args:
139 command: instruction of a function command: (command name,
140 <no self>, arguments[, kwargs])
141
142 Returns:
143 the response of the function command
144 """
145
146 cmd, _, args, kwargs = command
147
148 tensor = args[0]
149
150 # Check that the function has not been overwritten
151 try:
152 # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
153 cmd = cls.rgetattr(cls, cmd)
154 return cmd(*args, **kwargs)
155 except AttributeError:
156 pass
157
158 # TODO: I can't manage the import issue, can you?
159 # Replace all LoggingTensor with their child attribute
160 new_args, new_kwargs, new_type = sy.frameworks.torch.hook_args.hook_function_args(
161 cmd, args, kwargs
162 )
163
164 results = {}
165 for worker, share in new_args[0].items():
166 new_type = type(share)
167 new_args_worker = tuple(MultiPointerTensor.dispatch(new_args, worker))
168
169 # build the new command
170 new_command = (cmd, None, new_args_worker, new_kwargs)
171
172 # Send it to the appropriate class and get the response
173 results[worker] = new_type.handle_func_command(new_command)
174
175 # Put back MultiPointerTensor on the tensors found in the response
176 response = sy.frameworks.torch.hook_args.hook_response(
177 cmd, results, wrap_type=cls, wrap_args=tensor.get_class_attributes()
178 )
179
180 return response
181
182 def set_garbage_collect_data(self, value):
183 shares = self.child
184 for _, share in shares.items():
185 share.child.garbage_collect_data = value
186
187 @staticmethod
188 def simplify(tensor: "MultiPointerTensor") -> tuple:
189 """
190 This function takes the attributes of a MultiPointerTensor and saves them in a tuple
191 Args:
192 tensor (MultiPointerTensor): a MultiPointerTensor
193 Returns:
194 tuple: a tuple holding the unique attributes of the additive shared tensor
195 Examples:
196 data = simplify(tensor)
197 """
198
199 chain = None
200 if hasattr(tensor, "child"):
201 chain = sy.serde.simplify(tensor.child)
202 return (tensor.id, chain)
203
204 @staticmethod
205 def detail(worker: AbstractWorker, tensor_tuple: tuple) -> "MultiPointerTensor":
206 """
207 This function reconstructs a MultiPointerTensor given it's attributes in form of a tuple.
208 Args:
209 worker: the worker doing the deserialization
210 tensor_tuple: a tuple holding the attributes of the MultiPointerTensor
211 Returns:
212 MultiPointerTensor: a MultiPointerTensor
213 Examples:
214 multi_pointer_tensor = detail(data)
215 """
216
217 tensor_id, chain = tensor_tuple
218
219 tensor = sy.MultiPointerTensor(owner=worker, id=tensor_id)
220
221 if chain is not None:
222 chain = sy.serde._detail(worker, chain)
223 tensor.child = chain
224
225 return tensor
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/syft/frameworks/torch/tensors/interpreters/multi_pointer.py b/syft/frameworks/torch/tensors/interpreters/multi_pointer.py
--- a/syft/frameworks/torch/tensors/interpreters/multi_pointer.py
+++ b/syft/frameworks/torch/tensors/interpreters/multi_pointer.py
@@ -84,6 +84,12 @@
return list(self.child.values())[0].shape
+ def dim(self) -> int:
+ """This method fixes the error that the result of dim was a list of ints
+ stored inside a multipointer tensor"""
+
+ return len(self.shape)
+
def get(self, sum_results: bool = False) -> torch.Tensor:
results = list()
| {"golden_diff": "diff --git a/syft/frameworks/torch/tensors/interpreters/multi_pointer.py b/syft/frameworks/torch/tensors/interpreters/multi_pointer.py\n--- a/syft/frameworks/torch/tensors/interpreters/multi_pointer.py\n+++ b/syft/frameworks/torch/tensors/interpreters/multi_pointer.py\n@@ -84,6 +84,12 @@\n \n return list(self.child.values())[0].shape\n \n+ def dim(self) -> int:\n+ \"\"\"This method fixes the error that the result of dim was a list of ints\n+ stored inside a multipointer tensor\"\"\"\n+\n+ return len(self.shape)\n+\n def get(self, sum_results: bool = False) -> torch.Tensor:\n \n results = list()\n", "issue": "Dim does not work with multipointer tensors\n**Describe the bug**\r\nCalling dim on a multipointer tensor returns a multipointer tensor where the values of the children are all ints. The return signature should be an int\r\n\r\n**To Reproduce**\r\ncreate a multipointer tensor and call .dim()\r\n\r\n**Expected behavior**\r\nthe value returned should be an int\r\n\n", "before_files": [{"content": "import torch\nfrom typing import List\nfrom typing import Union\n\nimport syft as sy\nfrom syft.frameworks.torch.tensors.interpreters.abstract import AbstractTensor\nfrom syft.frameworks.torch.tensors.interpreters import AdditiveSharingTensor\nfrom syft.workers import BaseWorker\nfrom syft.frameworks.torch.overload_torch import overloaded\n\nfrom syft.workers import AbstractWorker\n\n\nclass MultiPointerTensor(AbstractTensor):\n \"\"\n\n def __init__(\n self,\n location: BaseWorker = None,\n id_at_location: Union[str, int] = None,\n register: bool = False,\n owner: BaseWorker = None,\n id: Union[str, int] = None,\n garbage_collect_data: bool = True,\n point_to_attr: str = None,\n tags: List[str] = None,\n description: str = None,\n children: List[AbstractTensor] = [],\n ):\n\n super().__init__(tags, description)\n\n self.location = location\n self.id_at_location = id_at_location\n self.owner = owner\n self.id = id\n self.garbage_collect_data = garbage_collect_data\n self.point_to_attr = point_to_attr\n\n self.child = {}\n for c in children:\n assert c.shape == children[0].shape\n self.child[c.location.id] = c\n\n def __str__(self):\n type_name = type(self).__name__\n out = f\"[\" f\"{type_name}]\"\n for v in self.child.values():\n out += \"\\n\\t-> \" + str(v)\n return out\n\n def __eq__(self, other):\n return torch.eq(self, other)\n\n def __add__(self, other):\n \"\"\"\n Adding a MultiPointer (MPT) and an AdditiveShared Tensor (AST) should return an\n AdditiveShared Tensor, so if we have this configuration, we permute self and\n other to use the fact that other.__add__(...) return an object of type other\n\n Else, we just redirect to .add which works well\n \"\"\"\n if isinstance(other, AdditiveSharingTensor):\n return other.__add__(self)\n else:\n return self.add(other)\n\n def __mul__(self, other):\n \"\"\"\n See __add__ for details but, MPT * AST should return AST\n \"\"\"\n if isinstance(other, AdditiveSharingTensor):\n return other.__mul__(self)\n else:\n return self.mul(other)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"This method returns the shape of the data being pointed to.\n This shape information SHOULD be cached on self._shape, but\n occasionally this information may not be present. If this is the\n case, then it requests the shape information from the remote object\n directly (which is inefficient and should be avoided).\"\"\"\n\n return list(self.child.values())[0].shape\n\n def get(self, sum_results: bool = False) -> torch.Tensor:\n\n results = list()\n for v in self.child.values():\n results.append(v.get())\n\n if sum_results:\n return sum(results)\n\n return results\n\n def virtual_get(self, sum_results: bool = False):\n \"\"\"Get the value of the tensor without calling get - Only for VirtualWorkers\"\"\"\n\n results = list()\n for v in self.child.values():\n value = v.location._objects[v.id_at_location]\n results.append(value)\n\n if sum_results:\n return sum(results)\n\n return results\n\n @staticmethod\n def dispatch(args, worker):\n \"\"\"\n utility function for handle_func_command which help to select\n shares (seen as elements of dict) in an argument set. It could\n perhaps be put elsewhere\n\n Args:\n args: arguments to give to a functions\n worker: owner of the shares to select\n\n Return:\n args where the MultiPointerTensor are replaced by\n the appropriate share\n \"\"\"\n return map(lambda x: x[worker] if isinstance(x, dict) else x, args)\n\n @classmethod\n def handle_func_command(cls, command):\n \"\"\"\n Receive an instruction for a function to be applied on a Syft Tensor,\n Replace in the args all the LogTensors with\n their child attribute, forward the command instruction to the\n handle_function_command of the type of the child attributes, get the\n response and replace a Syft Tensor on top of all tensors found in\n the response.\n\n Args:\n command: instruction of a function command: (command name,\n <no self>, arguments[, kwargs])\n\n Returns:\n the response of the function command\n \"\"\"\n\n cmd, _, args, kwargs = command\n\n tensor = args[0]\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n cmd = cls.rgetattr(cls, cmd)\n return cmd(*args, **kwargs)\n except AttributeError:\n pass\n\n # TODO: I can't manage the import issue, can you?\n # Replace all LoggingTensor with their child attribute\n new_args, new_kwargs, new_type = sy.frameworks.torch.hook_args.hook_function_args(\n cmd, args, kwargs\n )\n\n results = {}\n for worker, share in new_args[0].items():\n new_type = type(share)\n new_args_worker = tuple(MultiPointerTensor.dispatch(new_args, worker))\n\n # build the new command\n new_command = (cmd, None, new_args_worker, new_kwargs)\n\n # Send it to the appropriate class and get the response\n results[worker] = new_type.handle_func_command(new_command)\n\n # Put back MultiPointerTensor on the tensors found in the response\n response = sy.frameworks.torch.hook_args.hook_response(\n cmd, results, wrap_type=cls, wrap_args=tensor.get_class_attributes()\n )\n\n return response\n\n def set_garbage_collect_data(self, value):\n shares = self.child\n for _, share in shares.items():\n share.child.garbage_collect_data = value\n\n @staticmethod\n def simplify(tensor: \"MultiPointerTensor\") -> tuple:\n \"\"\"\n This function takes the attributes of a MultiPointerTensor and saves them in a tuple\n Args:\n tensor (MultiPointerTensor): a MultiPointerTensor\n Returns:\n tuple: a tuple holding the unique attributes of the additive shared tensor\n Examples:\n data = simplify(tensor)\n \"\"\"\n\n chain = None\n if hasattr(tensor, \"child\"):\n chain = sy.serde.simplify(tensor.child)\n return (tensor.id, chain)\n\n @staticmethod\n def detail(worker: AbstractWorker, tensor_tuple: tuple) -> \"MultiPointerTensor\":\n \"\"\"\n This function reconstructs a MultiPointerTensor given it's attributes in form of a tuple.\n Args:\n worker: the worker doing the deserialization\n tensor_tuple: a tuple holding the attributes of the MultiPointerTensor\n Returns:\n MultiPointerTensor: a MultiPointerTensor\n Examples:\n multi_pointer_tensor = detail(data)\n \"\"\"\n\n tensor_id, chain = tensor_tuple\n\n tensor = sy.MultiPointerTensor(owner=worker, id=tensor_id)\n\n if chain is not None:\n chain = sy.serde._detail(worker, chain)\n tensor.child = chain\n\n return tensor\n", "path": "syft/frameworks/torch/tensors/interpreters/multi_pointer.py"}], "after_files": [{"content": "import torch\nfrom typing import List\nfrom typing import Union\n\nimport syft as sy\nfrom syft.frameworks.torch.tensors.interpreters.abstract import AbstractTensor\nfrom syft.frameworks.torch.tensors.interpreters import AdditiveSharingTensor\nfrom syft.workers import BaseWorker\nfrom syft.frameworks.torch.overload_torch import overloaded\n\nfrom syft.workers import AbstractWorker\n\n\nclass MultiPointerTensor(AbstractTensor):\n \"\"\n\n def __init__(\n self,\n location: BaseWorker = None,\n id_at_location: Union[str, int] = None,\n register: bool = False,\n owner: BaseWorker = None,\n id: Union[str, int] = None,\n garbage_collect_data: bool = True,\n point_to_attr: str = None,\n tags: List[str] = None,\n description: str = None,\n children: List[AbstractTensor] = [],\n ):\n\n super().__init__(tags, description)\n\n self.location = location\n self.id_at_location = id_at_location\n self.owner = owner\n self.id = id\n self.garbage_collect_data = garbage_collect_data\n self.point_to_attr = point_to_attr\n\n self.child = {}\n for c in children:\n assert c.shape == children[0].shape\n self.child[c.location.id] = c\n\n def __str__(self):\n type_name = type(self).__name__\n out = f\"[\" f\"{type_name}]\"\n for v in self.child.values():\n out += \"\\n\\t-> \" + str(v)\n return out\n\n def __eq__(self, other):\n return torch.eq(self, other)\n\n def __add__(self, other):\n \"\"\"\n Adding a MultiPointer (MPT) and an AdditiveShared Tensor (AST) should return an\n AdditiveShared Tensor, so if we have this configuration, we permute self and\n other to use the fact that other.__add__(...) return an object of type other\n\n Else, we just redirect to .add which works well\n \"\"\"\n if isinstance(other, AdditiveSharingTensor):\n return other.__add__(self)\n else:\n return self.add(other)\n\n def __mul__(self, other):\n \"\"\"\n See __add__ for details but, MPT * AST should return AST\n \"\"\"\n if isinstance(other, AdditiveSharingTensor):\n return other.__mul__(self)\n else:\n return self.mul(other)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"This method returns the shape of the data being pointed to.\n This shape information SHOULD be cached on self._shape, but\n occasionally this information may not be present. If this is the\n case, then it requests the shape information from the remote object\n directly (which is inefficient and should be avoided).\"\"\"\n\n return list(self.child.values())[0].shape\n\n def dim(self) -> int:\n \"\"\"This method fixes the error that the result of dim was a list of ints\n stored inside a multipointer tensor\"\"\"\n\n return len(self.shape)\n\n def get(self, sum_results: bool = False) -> torch.Tensor:\n\n results = list()\n for v in self.child.values():\n results.append(v.get())\n\n if sum_results:\n return sum(results)\n\n return results\n\n def virtual_get(self, sum_results: bool = False):\n \"\"\"Get the value of the tensor without calling get - Only for VirtualWorkers\"\"\"\n\n results = list()\n for v in self.child.values():\n value = v.location._objects[v.id_at_location]\n results.append(value)\n\n if sum_results:\n return sum(results)\n\n return results\n\n @staticmethod\n def dispatch(args, worker):\n \"\"\"\n utility function for handle_func_command which help to select\n shares (seen as elements of dict) in an argument set. It could\n perhaps be put elsewhere\n\n Args:\n args: arguments to give to a functions\n worker: owner of the shares to select\n\n Return:\n args where the MultiPointerTensor are replaced by\n the appropriate share\n \"\"\"\n return map(lambda x: x[worker] if isinstance(x, dict) else x, args)\n\n @classmethod\n def handle_func_command(cls, command):\n \"\"\"\n Receive an instruction for a function to be applied on a Syft Tensor,\n Replace in the args all the LogTensors with\n their child attribute, forward the command instruction to the\n handle_function_command of the type of the child attributes, get the\n response and replace a Syft Tensor on top of all tensors found in\n the response.\n\n Args:\n command: instruction of a function command: (command name,\n <no self>, arguments[, kwargs])\n\n Returns:\n the response of the function command\n \"\"\"\n\n cmd, _, args, kwargs = command\n\n tensor = args[0]\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n cmd = cls.rgetattr(cls, cmd)\n return cmd(*args, **kwargs)\n except AttributeError:\n pass\n\n # TODO: I can't manage the import issue, can you?\n # Replace all LoggingTensor with their child attribute\n new_args, new_kwargs, new_type = sy.frameworks.torch.hook_args.hook_function_args(\n cmd, args, kwargs\n )\n\n results = {}\n for worker, share in new_args[0].items():\n new_type = type(share)\n new_args_worker = tuple(MultiPointerTensor.dispatch(new_args, worker))\n\n # build the new command\n new_command = (cmd, None, new_args_worker, new_kwargs)\n\n # Send it to the appropriate class and get the response\n results[worker] = new_type.handle_func_command(new_command)\n\n # Put back MultiPointerTensor on the tensors found in the response\n response = sy.frameworks.torch.hook_args.hook_response(\n cmd, results, wrap_type=cls, wrap_args=tensor.get_class_attributes()\n )\n\n return response\n\n def set_garbage_collect_data(self, value):\n shares = self.child\n for _, share in shares.items():\n share.child.garbage_collect_data = value\n\n @staticmethod\n def simplify(tensor: \"MultiPointerTensor\") -> tuple:\n \"\"\"\n This function takes the attributes of a MultiPointerTensor and saves them in a tuple\n Args:\n tensor (MultiPointerTensor): a MultiPointerTensor\n Returns:\n tuple: a tuple holding the unique attributes of the additive shared tensor\n Examples:\n data = simplify(tensor)\n \"\"\"\n\n chain = None\n if hasattr(tensor, \"child\"):\n chain = sy.serde.simplify(tensor.child)\n return (tensor.id, chain)\n\n @staticmethod\n def detail(worker: AbstractWorker, tensor_tuple: tuple) -> \"MultiPointerTensor\":\n \"\"\"\n This function reconstructs a MultiPointerTensor given it's attributes in form of a tuple.\n Args:\n worker: the worker doing the deserialization\n tensor_tuple: a tuple holding the attributes of the MultiPointerTensor\n Returns:\n MultiPointerTensor: a MultiPointerTensor\n Examples:\n multi_pointer_tensor = detail(data)\n \"\"\"\n\n tensor_id, chain = tensor_tuple\n\n tensor = sy.MultiPointerTensor(owner=worker, id=tensor_id)\n\n if chain is not None:\n chain = sy.serde._detail(worker, chain)\n tensor.child = chain\n\n return tensor\n", "path": "syft/frameworks/torch/tensors/interpreters/multi_pointer.py"}]} | 2,529 | 170 |
gh_patches_debug_37794 | rasdani/github-patches | git_diff | urllib3__urllib3-3311 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add http_version property to BaseHTTPResponse
Now that HTTP/2 is coming we should add a value to HTTPResponse to provide this value. The normal HTTPResponse will either be HTTP/1.1 or HTTP/1.0. Check `_http_vsn_str` for that value.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/urllib3/http2.py`
Content:
```
1 from __future__ import annotations
2
3 import contextlib
4 import threading
5 import typing
6
7 import h2.config # type: ignore[import]
8 import h2.connection # type: ignore[import]
9 import h2.events # type: ignore[import]
10
11 import urllib3.connection
12 import urllib3.util.ssl_
13
14 from ._collections import HTTPHeaderDict
15 from .connection import HTTPSConnection
16 from .connectionpool import HTTPSConnectionPool
17
18 orig_HTTPSConnection = HTTPSConnection
19
20
21 class HTTP2Connection(HTTPSConnection):
22 def __init__(
23 self, host: str, port: int | None = None, **kwargs: typing.Any
24 ) -> None:
25 self._h2_lock = threading.RLock()
26 self._h2_conn = h2.connection.H2Connection(
27 config=h2.config.H2Configuration(client_side=True)
28 )
29 self._h2_stream: int | None = None
30 self._h2_headers: list[tuple[bytes, bytes]] = []
31
32 if "proxy" in kwargs or "proxy_config" in kwargs: # Defensive:
33 raise NotImplementedError("Proxies aren't supported with HTTP/2")
34
35 super().__init__(host, port, **kwargs)
36
37 @contextlib.contextmanager
38 def _lock_h2_conn(self) -> typing.Generator[h2.connection.H2Connection, None, None]:
39 with self._h2_lock:
40 yield self._h2_conn
41
42 def connect(self) -> None:
43 super().connect()
44
45 with self._lock_h2_conn() as h2_conn:
46 h2_conn.initiate_connection()
47 self.sock.sendall(h2_conn.data_to_send())
48
49 def putrequest(
50 self,
51 method: str,
52 url: str,
53 skip_host: bool = False,
54 skip_accept_encoding: bool = False,
55 ) -> None:
56 with self._lock_h2_conn() as h2_conn:
57 self._h2_stream = h2_conn.get_next_available_stream_id()
58
59 if ":" in self.host:
60 authority = f"[{self.host}]:{self.port or 443}"
61 else:
62 authority = f"{self.host}:{self.port or 443}"
63
64 self._h2_headers.extend(
65 (
66 (b":scheme", b"https"),
67 (b":method", method.encode()),
68 (b":authority", authority.encode()),
69 (b":path", url.encode()),
70 )
71 )
72
73 def putheader(self, header: str, *values: str) -> None:
74 for value in values:
75 self._h2_headers.append(
76 (header.encode("utf-8").lower(), value.encode("utf-8"))
77 )
78
79 def endheaders(self) -> None: # type: ignore[override]
80 with self._lock_h2_conn() as h2_conn:
81 h2_conn.send_headers(
82 stream_id=self._h2_stream,
83 headers=self._h2_headers,
84 end_stream=True,
85 )
86 if data_to_send := h2_conn.data_to_send():
87 self.sock.sendall(data_to_send)
88
89 def send(self, data: bytes) -> None: # type: ignore[override] # Defensive:
90 if not data:
91 return
92 raise NotImplementedError("Sending data isn't supported yet")
93
94 def getresponse( # type: ignore[override]
95 self,
96 ) -> HTTP2Response:
97 status = None
98 data = bytearray()
99 with self._lock_h2_conn() as h2_conn:
100 end_stream = False
101 while not end_stream:
102 # TODO: Arbitrary read value.
103 if received_data := self.sock.recv(65535):
104 events = h2_conn.receive_data(received_data)
105 for event in events:
106 if isinstance(
107 event, h2.events.InformationalResponseReceived
108 ): # Defensive:
109 continue # TODO: Does the stdlib do anything with these responses?
110
111 elif isinstance(event, h2.events.ResponseReceived):
112 headers = HTTPHeaderDict()
113 for header, value in event.headers:
114 if header == b":status":
115 status = int(value.decode())
116 else:
117 headers.add(
118 header.decode("ascii"), value.decode("ascii")
119 )
120
121 elif isinstance(event, h2.events.DataReceived):
122 data += event.data
123 h2_conn.acknowledge_received_data(
124 event.flow_controlled_length, event.stream_id
125 )
126
127 elif isinstance(event, h2.events.StreamEnded):
128 end_stream = True
129
130 if data_to_send := h2_conn.data_to_send():
131 self.sock.sendall(data_to_send)
132
133 # We always close to not have to handle connection management.
134 self.close()
135
136 assert status is not None
137 return HTTP2Response(status=status, headers=headers, data=bytes(data))
138
139 def close(self) -> None:
140 with self._lock_h2_conn() as h2_conn:
141 try:
142 self._h2_conn.close_connection()
143 if data := h2_conn.data_to_send():
144 self.sock.sendall(data)
145 except Exception:
146 pass
147
148 # Reset all our HTTP/2 connection state.
149 self._h2_conn = h2.connection.H2Connection(
150 config=h2.config.H2Configuration(client_side=True)
151 )
152 self._h2_stream = None
153 self._h2_headers = []
154
155 super().close()
156
157
158 class HTTP2Response:
159 # TODO: This is a woefully incomplete response object, but works for non-streaming.
160 def __init__(self, status: int, headers: HTTPHeaderDict, data: bytes) -> None:
161 self.status = status
162 self.headers = headers
163 self.data = data
164 self.length_remaining = 0
165
166 def get_redirect_location(self) -> None:
167 return None
168
169
170 def inject_into_urllib3() -> None:
171 HTTPSConnectionPool.ConnectionCls = HTTP2Connection # type: ignore[assignment]
172 urllib3.connection.HTTPSConnection = HTTP2Connection # type: ignore[misc]
173
174 # TODO: Offer 'http/1.1' as well, but for testing purposes this is handy.
175 urllib3.util.ssl_.ALPN_PROTOCOLS = ["h2"]
176
177
178 def extract_from_urllib3() -> None:
179 HTTPSConnectionPool.ConnectionCls = orig_HTTPSConnection
180 urllib3.connection.HTTPSConnection = orig_HTTPSConnection # type: ignore[misc]
181
182 urllib3.util.ssl_.ALPN_PROTOCOLS = ["http/1.1"]
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/urllib3/http2.py b/src/urllib3/http2.py
--- a/src/urllib3/http2.py
+++ b/src/urllib3/http2.py
@@ -10,6 +10,7 @@
import urllib3.connection
import urllib3.util.ssl_
+from urllib3.response import BaseHTTPResponse
from ._collections import HTTPHeaderDict
from .connection import HTTPSConnection
@@ -54,6 +55,7 @@
skip_accept_encoding: bool = False,
) -> None:
with self._lock_h2_conn() as h2_conn:
+ self._request_url = url
self._h2_stream = h2_conn.get_next_available_stream_id()
if ":" in self.host:
@@ -134,7 +136,12 @@
self.close()
assert status is not None
- return HTTP2Response(status=status, headers=headers, data=bytes(data))
+ return HTTP2Response(
+ status=status,
+ headers=headers,
+ request_url=self._request_url,
+ data=bytes(data),
+ )
def close(self) -> None:
with self._lock_h2_conn() as h2_conn:
@@ -155,20 +162,39 @@
super().close()
-class HTTP2Response:
+class HTTP2Response(BaseHTTPResponse):
# TODO: This is a woefully incomplete response object, but works for non-streaming.
- def __init__(self, status: int, headers: HTTPHeaderDict, data: bytes) -> None:
- self.status = status
- self.headers = headers
- self.data = data
+ def __init__(
+ self,
+ status: int,
+ headers: HTTPHeaderDict,
+ request_url: str,
+ data: bytes,
+ decode_content: bool = False, # TODO: support decoding
+ ) -> None:
+ super().__init__(
+ status=status,
+ headers=headers,
+ # Following CPython, we map HTTP versions to major * 10 + minor integers
+ version=20,
+ # No reason phrase in HTTP/2
+ reason=None,
+ decode_content=decode_content,
+ request_url=request_url,
+ )
+ self._data = data
self.length_remaining = 0
+ @property
+ def data(self) -> bytes:
+ return self._data
+
def get_redirect_location(self) -> None:
return None
def inject_into_urllib3() -> None:
- HTTPSConnectionPool.ConnectionCls = HTTP2Connection # type: ignore[assignment]
+ HTTPSConnectionPool.ConnectionCls = HTTP2Connection
urllib3.connection.HTTPSConnection = HTTP2Connection # type: ignore[misc]
# TODO: Offer 'http/1.1' as well, but for testing purposes this is handy.
| {"golden_diff": "diff --git a/src/urllib3/http2.py b/src/urllib3/http2.py\n--- a/src/urllib3/http2.py\n+++ b/src/urllib3/http2.py\n@@ -10,6 +10,7 @@\n \n import urllib3.connection\n import urllib3.util.ssl_\n+from urllib3.response import BaseHTTPResponse\n \n from ._collections import HTTPHeaderDict\n from .connection import HTTPSConnection\n@@ -54,6 +55,7 @@\n skip_accept_encoding: bool = False,\n ) -> None:\n with self._lock_h2_conn() as h2_conn:\n+ self._request_url = url\n self._h2_stream = h2_conn.get_next_available_stream_id()\n \n if \":\" in self.host:\n@@ -134,7 +136,12 @@\n self.close()\n \n assert status is not None\n- return HTTP2Response(status=status, headers=headers, data=bytes(data))\n+ return HTTP2Response(\n+ status=status,\n+ headers=headers,\n+ request_url=self._request_url,\n+ data=bytes(data),\n+ )\n \n def close(self) -> None:\n with self._lock_h2_conn() as h2_conn:\n@@ -155,20 +162,39 @@\n super().close()\n \n \n-class HTTP2Response:\n+class HTTP2Response(BaseHTTPResponse):\n # TODO: This is a woefully incomplete response object, but works for non-streaming.\n- def __init__(self, status: int, headers: HTTPHeaderDict, data: bytes) -> None:\n- self.status = status\n- self.headers = headers\n- self.data = data\n+ def __init__(\n+ self,\n+ status: int,\n+ headers: HTTPHeaderDict,\n+ request_url: str,\n+ data: bytes,\n+ decode_content: bool = False, # TODO: support decoding\n+ ) -> None:\n+ super().__init__(\n+ status=status,\n+ headers=headers,\n+ # Following CPython, we map HTTP versions to major * 10 + minor integers\n+ version=20,\n+ # No reason phrase in HTTP/2\n+ reason=None,\n+ decode_content=decode_content,\n+ request_url=request_url,\n+ )\n+ self._data = data\n self.length_remaining = 0\n \n+ @property\n+ def data(self) -> bytes:\n+ return self._data\n+\n def get_redirect_location(self) -> None:\n return None\n \n \n def inject_into_urllib3() -> None:\n- HTTPSConnectionPool.ConnectionCls = HTTP2Connection # type: ignore[assignment]\n+ HTTPSConnectionPool.ConnectionCls = HTTP2Connection\n urllib3.connection.HTTPSConnection = HTTP2Connection # type: ignore[misc]\n \n # TODO: Offer 'http/1.1' as well, but for testing purposes this is handy.\n", "issue": "Add http_version property to BaseHTTPResponse\nNow that HTTP/2 is coming we should add a value to HTTPResponse to provide this value. The normal HTTPResponse will either be HTTP/1.1 or HTTP/1.0. Check `_http_vsn_str` for that value.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport threading\nimport typing\n\nimport h2.config # type: ignore[import]\nimport h2.connection # type: ignore[import]\nimport h2.events # type: ignore[import]\n\nimport urllib3.connection\nimport urllib3.util.ssl_\n\nfrom ._collections import HTTPHeaderDict\nfrom .connection import HTTPSConnection\nfrom .connectionpool import HTTPSConnectionPool\n\norig_HTTPSConnection = HTTPSConnection\n\n\nclass HTTP2Connection(HTTPSConnection):\n def __init__(\n self, host: str, port: int | None = None, **kwargs: typing.Any\n ) -> None:\n self._h2_lock = threading.RLock()\n self._h2_conn = h2.connection.H2Connection(\n config=h2.config.H2Configuration(client_side=True)\n )\n self._h2_stream: int | None = None\n self._h2_headers: list[tuple[bytes, bytes]] = []\n\n if \"proxy\" in kwargs or \"proxy_config\" in kwargs: # Defensive:\n raise NotImplementedError(\"Proxies aren't supported with HTTP/2\")\n\n super().__init__(host, port, **kwargs)\n\n @contextlib.contextmanager\n def _lock_h2_conn(self) -> typing.Generator[h2.connection.H2Connection, None, None]:\n with self._h2_lock:\n yield self._h2_conn\n\n def connect(self) -> None:\n super().connect()\n\n with self._lock_h2_conn() as h2_conn:\n h2_conn.initiate_connection()\n self.sock.sendall(h2_conn.data_to_send())\n\n def putrequest(\n self,\n method: str,\n url: str,\n skip_host: bool = False,\n skip_accept_encoding: bool = False,\n ) -> None:\n with self._lock_h2_conn() as h2_conn:\n self._h2_stream = h2_conn.get_next_available_stream_id()\n\n if \":\" in self.host:\n authority = f\"[{self.host}]:{self.port or 443}\"\n else:\n authority = f\"{self.host}:{self.port or 443}\"\n\n self._h2_headers.extend(\n (\n (b\":scheme\", b\"https\"),\n (b\":method\", method.encode()),\n (b\":authority\", authority.encode()),\n (b\":path\", url.encode()),\n )\n )\n\n def putheader(self, header: str, *values: str) -> None:\n for value in values:\n self._h2_headers.append(\n (header.encode(\"utf-8\").lower(), value.encode(\"utf-8\"))\n )\n\n def endheaders(self) -> None: # type: ignore[override]\n with self._lock_h2_conn() as h2_conn:\n h2_conn.send_headers(\n stream_id=self._h2_stream,\n headers=self._h2_headers,\n end_stream=True,\n )\n if data_to_send := h2_conn.data_to_send():\n self.sock.sendall(data_to_send)\n\n def send(self, data: bytes) -> None: # type: ignore[override] # Defensive:\n if not data:\n return\n raise NotImplementedError(\"Sending data isn't supported yet\")\n\n def getresponse( # type: ignore[override]\n self,\n ) -> HTTP2Response:\n status = None\n data = bytearray()\n with self._lock_h2_conn() as h2_conn:\n end_stream = False\n while not end_stream:\n # TODO: Arbitrary read value.\n if received_data := self.sock.recv(65535):\n events = h2_conn.receive_data(received_data)\n for event in events:\n if isinstance(\n event, h2.events.InformationalResponseReceived\n ): # Defensive:\n continue # TODO: Does the stdlib do anything with these responses?\n\n elif isinstance(event, h2.events.ResponseReceived):\n headers = HTTPHeaderDict()\n for header, value in event.headers:\n if header == b\":status\":\n status = int(value.decode())\n else:\n headers.add(\n header.decode(\"ascii\"), value.decode(\"ascii\")\n )\n\n elif isinstance(event, h2.events.DataReceived):\n data += event.data\n h2_conn.acknowledge_received_data(\n event.flow_controlled_length, event.stream_id\n )\n\n elif isinstance(event, h2.events.StreamEnded):\n end_stream = True\n\n if data_to_send := h2_conn.data_to_send():\n self.sock.sendall(data_to_send)\n\n # We always close to not have to handle connection management.\n self.close()\n\n assert status is not None\n return HTTP2Response(status=status, headers=headers, data=bytes(data))\n\n def close(self) -> None:\n with self._lock_h2_conn() as h2_conn:\n try:\n self._h2_conn.close_connection()\n if data := h2_conn.data_to_send():\n self.sock.sendall(data)\n except Exception:\n pass\n\n # Reset all our HTTP/2 connection state.\n self._h2_conn = h2.connection.H2Connection(\n config=h2.config.H2Configuration(client_side=True)\n )\n self._h2_stream = None\n self._h2_headers = []\n\n super().close()\n\n\nclass HTTP2Response:\n # TODO: This is a woefully incomplete response object, but works for non-streaming.\n def __init__(self, status: int, headers: HTTPHeaderDict, data: bytes) -> None:\n self.status = status\n self.headers = headers\n self.data = data\n self.length_remaining = 0\n\n def get_redirect_location(self) -> None:\n return None\n\n\ndef inject_into_urllib3() -> None:\n HTTPSConnectionPool.ConnectionCls = HTTP2Connection # type: ignore[assignment]\n urllib3.connection.HTTPSConnection = HTTP2Connection # type: ignore[misc]\n\n # TODO: Offer 'http/1.1' as well, but for testing purposes this is handy.\n urllib3.util.ssl_.ALPN_PROTOCOLS = [\"h2\"]\n\n\ndef extract_from_urllib3() -> None:\n HTTPSConnectionPool.ConnectionCls = orig_HTTPSConnection\n urllib3.connection.HTTPSConnection = orig_HTTPSConnection # type: ignore[misc]\n\n urllib3.util.ssl_.ALPN_PROTOCOLS = [\"http/1.1\"]\n", "path": "src/urllib3/http2.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport threading\nimport typing\n\nimport h2.config # type: ignore[import]\nimport h2.connection # type: ignore[import]\nimport h2.events # type: ignore[import]\n\nimport urllib3.connection\nimport urllib3.util.ssl_\nfrom urllib3.response import BaseHTTPResponse\n\nfrom ._collections import HTTPHeaderDict\nfrom .connection import HTTPSConnection\nfrom .connectionpool import HTTPSConnectionPool\n\norig_HTTPSConnection = HTTPSConnection\n\n\nclass HTTP2Connection(HTTPSConnection):\n def __init__(\n self, host: str, port: int | None = None, **kwargs: typing.Any\n ) -> None:\n self._h2_lock = threading.RLock()\n self._h2_conn = h2.connection.H2Connection(\n config=h2.config.H2Configuration(client_side=True)\n )\n self._h2_stream: int | None = None\n self._h2_headers: list[tuple[bytes, bytes]] = []\n\n if \"proxy\" in kwargs or \"proxy_config\" in kwargs: # Defensive:\n raise NotImplementedError(\"Proxies aren't supported with HTTP/2\")\n\n super().__init__(host, port, **kwargs)\n\n @contextlib.contextmanager\n def _lock_h2_conn(self) -> typing.Generator[h2.connection.H2Connection, None, None]:\n with self._h2_lock:\n yield self._h2_conn\n\n def connect(self) -> None:\n super().connect()\n\n with self._lock_h2_conn() as h2_conn:\n h2_conn.initiate_connection()\n self.sock.sendall(h2_conn.data_to_send())\n\n def putrequest(\n self,\n method: str,\n url: str,\n skip_host: bool = False,\n skip_accept_encoding: bool = False,\n ) -> None:\n with self._lock_h2_conn() as h2_conn:\n self._request_url = url\n self._h2_stream = h2_conn.get_next_available_stream_id()\n\n if \":\" in self.host:\n authority = f\"[{self.host}]:{self.port or 443}\"\n else:\n authority = f\"{self.host}:{self.port or 443}\"\n\n self._h2_headers.extend(\n (\n (b\":scheme\", b\"https\"),\n (b\":method\", method.encode()),\n (b\":authority\", authority.encode()),\n (b\":path\", url.encode()),\n )\n )\n\n def putheader(self, header: str, *values: str) -> None:\n for value in values:\n self._h2_headers.append(\n (header.encode(\"utf-8\").lower(), value.encode(\"utf-8\"))\n )\n\n def endheaders(self) -> None: # type: ignore[override]\n with self._lock_h2_conn() as h2_conn:\n h2_conn.send_headers(\n stream_id=self._h2_stream,\n headers=self._h2_headers,\n end_stream=True,\n )\n if data_to_send := h2_conn.data_to_send():\n self.sock.sendall(data_to_send)\n\n def send(self, data: bytes) -> None: # type: ignore[override] # Defensive:\n if not data:\n return\n raise NotImplementedError(\"Sending data isn't supported yet\")\n\n def getresponse( # type: ignore[override]\n self,\n ) -> HTTP2Response:\n status = None\n data = bytearray()\n with self._lock_h2_conn() as h2_conn:\n end_stream = False\n while not end_stream:\n # TODO: Arbitrary read value.\n if received_data := self.sock.recv(65535):\n events = h2_conn.receive_data(received_data)\n for event in events:\n if isinstance(\n event, h2.events.InformationalResponseReceived\n ): # Defensive:\n continue # TODO: Does the stdlib do anything with these responses?\n\n elif isinstance(event, h2.events.ResponseReceived):\n headers = HTTPHeaderDict()\n for header, value in event.headers:\n if header == b\":status\":\n status = int(value.decode())\n else:\n headers.add(\n header.decode(\"ascii\"), value.decode(\"ascii\")\n )\n\n elif isinstance(event, h2.events.DataReceived):\n data += event.data\n h2_conn.acknowledge_received_data(\n event.flow_controlled_length, event.stream_id\n )\n\n elif isinstance(event, h2.events.StreamEnded):\n end_stream = True\n\n if data_to_send := h2_conn.data_to_send():\n self.sock.sendall(data_to_send)\n\n # We always close to not have to handle connection management.\n self.close()\n\n assert status is not None\n return HTTP2Response(\n status=status,\n headers=headers,\n request_url=self._request_url,\n data=bytes(data),\n )\n\n def close(self) -> None:\n with self._lock_h2_conn() as h2_conn:\n try:\n self._h2_conn.close_connection()\n if data := h2_conn.data_to_send():\n self.sock.sendall(data)\n except Exception:\n pass\n\n # Reset all our HTTP/2 connection state.\n self._h2_conn = h2.connection.H2Connection(\n config=h2.config.H2Configuration(client_side=True)\n )\n self._h2_stream = None\n self._h2_headers = []\n\n super().close()\n\n\nclass HTTP2Response(BaseHTTPResponse):\n # TODO: This is a woefully incomplete response object, but works for non-streaming.\n def __init__(\n self,\n status: int,\n headers: HTTPHeaderDict,\n request_url: str,\n data: bytes,\n decode_content: bool = False, # TODO: support decoding\n ) -> None:\n super().__init__(\n status=status,\n headers=headers,\n # Following CPython, we map HTTP versions to major * 10 + minor integers\n version=20,\n # No reason phrase in HTTP/2\n reason=None,\n decode_content=decode_content,\n request_url=request_url,\n )\n self._data = data\n self.length_remaining = 0\n\n @property\n def data(self) -> bytes:\n return self._data\n\n def get_redirect_location(self) -> None:\n return None\n\n\ndef inject_into_urllib3() -> None:\n HTTPSConnectionPool.ConnectionCls = HTTP2Connection\n urllib3.connection.HTTPSConnection = HTTP2Connection # type: ignore[misc]\n\n # TODO: Offer 'http/1.1' as well, but for testing purposes this is handy.\n urllib3.util.ssl_.ALPN_PROTOCOLS = [\"h2\"]\n\n\ndef extract_from_urllib3() -> None:\n HTTPSConnectionPool.ConnectionCls = orig_HTTPSConnection\n urllib3.connection.HTTPSConnection = orig_HTTPSConnection # type: ignore[misc]\n\n urllib3.util.ssl_.ALPN_PROTOCOLS = [\"http/1.1\"]\n", "path": "src/urllib3/http2.py"}]} | 2,158 | 645 |
gh_patches_debug_43259 | rasdani/github-patches | git_diff | doccano__doccano-1783 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Export data will get nothing, if I choose only approved documents.
if I choose only approved documents, the zip file contains nothing
if I don't choose it, the zip file contains "all.json".
But those txt I have checked.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/data_export/pipeline/repositories.py`
Content:
```
1 import abc
2 import itertools
3 from collections import defaultdict
4 from typing import Any, Dict, Iterator, List, Tuple
5
6 from .data import Record
7 from examples.models import Example
8 from projects.models import Project
9
10 SpanType = Tuple[int, int, str]
11
12
13 class BaseRepository:
14 def __init__(self, project: Project):
15 self.project = project
16
17 def list(self, export_approved=False) -> Iterator[Record]:
18 raise NotImplementedError()
19
20
21 class FileRepository(BaseRepository):
22 def list(self, export_approved=False) -> Iterator[Record]:
23 examples = self.project.examples.all()
24 if export_approved:
25 examples = examples.exclude(annotations_approved_by=None)
26
27 for example in examples:
28 label_per_user = self.label_per_user(example)
29 if self.project.collaborative_annotation:
30 label_per_user = self.reduce_user(label_per_user)
31 for user, label in label_per_user.items():
32 yield Record(
33 data_id=example.id,
34 data=example.upload_name,
35 label=label,
36 user=user,
37 metadata=example.meta,
38 )
39 # todo:
40 # If there is no label, export the doc with `unknown` user.
41 # This is a quick solution.
42 # In the future, the doc without label will be exported
43 # with the user who approved the doc.
44 # This means I will allow each user to be able to approve the doc.
45 if len(label_per_user) == 0:
46 yield Record(data_id=example.id, data=example.upload_name, label=[], user="unknown", metadata={})
47
48 def label_per_user(self, example) -> Dict:
49 label_per_user = defaultdict(list)
50 for a in example.categories.all():
51 label_per_user[a.user.username].append(a.label.text)
52 return label_per_user
53
54 def reduce_user(self, label_per_user: Dict[str, Any]):
55 value = list(itertools.chain(*label_per_user.values()))
56 return {"all": value}
57
58
59 class Speech2TextRepository(FileRepository):
60 def label_per_user(self, example) -> Dict:
61 label_per_user = defaultdict(list)
62 for a in example.texts.all():
63 label_per_user[a.user.username].append(a.text)
64 return label_per_user
65
66
67 class TextRepository(BaseRepository):
68 @property
69 def docs(self):
70 return Example.objects.filter(project=self.project)
71
72 def list(self, export_approved=False):
73 docs = self.docs
74 if export_approved:
75 docs = docs.exclude(annotations_approved_by=None)
76
77 for doc in docs:
78 label_per_user = self.label_per_user(doc)
79 if self.project.collaborative_annotation:
80 label_per_user = self.reduce_user(label_per_user)
81 for user, label in label_per_user.items():
82 yield Record(data_id=doc.id, data=doc.text, label=label, user=user, metadata=doc.meta)
83 # todo:
84 # If there is no label, export the doc with `unknown` user.
85 # This is a quick solution.
86 # In the future, the doc without label will be exported
87 # with the user who approved the doc.
88 # This means I will allow each user to be able to approve the doc.
89 if len(label_per_user) == 0:
90 yield Record(data_id=doc.id, data=doc.text, label=[], user="unknown", metadata={})
91
92 @abc.abstractmethod
93 def label_per_user(self, doc) -> Dict:
94 raise NotImplementedError()
95
96 def reduce_user(self, label_per_user: Dict[str, Any]):
97 value = list(itertools.chain(*label_per_user.values()))
98 return {"all": value}
99
100
101 class TextClassificationRepository(TextRepository):
102 @property
103 def docs(self):
104 return Example.objects.filter(project=self.project).prefetch_related("categories__user", "categories__label")
105
106 def label_per_user(self, doc) -> Dict:
107 label_per_user = defaultdict(list)
108 for a in doc.categories.all():
109 label_per_user[a.user.username].append(a.label.text)
110 return label_per_user
111
112
113 class SequenceLabelingRepository(TextRepository):
114 @property
115 def docs(self):
116 return Example.objects.filter(project=self.project).prefetch_related("spans__user", "spans__label")
117
118 def label_per_user(self, doc) -> Dict:
119 label_per_user = defaultdict(list)
120 for a in doc.spans.all():
121 label = (a.start_offset, a.end_offset, a.label.text)
122 label_per_user[a.user.username].append(label)
123 return label_per_user
124
125
126 class RelationExtractionRepository(TextRepository):
127 @property
128 def docs(self):
129 return Example.objects.filter(project=self.project).prefetch_related(
130 "spans__user", "spans__label", "relations__user", "relations__type"
131 )
132
133 def label_per_user(self, doc) -> Dict:
134 relation_per_user: Dict = defaultdict(list)
135 span_per_user: Dict = defaultdict(list)
136 label_per_user: Dict = defaultdict(dict)
137 for relation in doc.relations.all():
138 relation_per_user[relation.user.username].append(
139 {
140 "id": relation.id,
141 "from_id": relation.from_id.id,
142 "to_id": relation.to_id.id,
143 "type": relation.type.text,
144 }
145 )
146 for span in doc.spans.all():
147 span_per_user[span.user.username].append(
148 {
149 "id": span.id,
150 "start_offset": span.start_offset,
151 "end_offset": span.end_offset,
152 "label": span.label.text,
153 }
154 )
155 for user, relations in relation_per_user.items():
156 label_per_user[user]["relations"] = relations
157 for user, span in span_per_user.items():
158 label_per_user[user]["entities"] = span
159 return label_per_user
160
161 def reduce_user(self, label_per_user: Dict[str, Any]):
162 entities = []
163 relations = []
164 for user, label in label_per_user.items():
165 entities.extend(label.get("entities", []))
166 relations.extend(label.get("relations", []))
167 return {"all": {"entities": entities, "relations": relations}}
168
169
170 class Seq2seqRepository(TextRepository):
171 @property
172 def docs(self):
173 return Example.objects.filter(project=self.project).prefetch_related("texts__user")
174
175 def label_per_user(self, doc) -> Dict:
176 label_per_user = defaultdict(list)
177 for a in doc.texts.all():
178 label_per_user[a.user.username].append(a.text)
179 return label_per_user
180
181
182 class IntentDetectionSlotFillingRepository(TextRepository):
183 @property
184 def docs(self):
185 return Example.objects.filter(project=self.project).prefetch_related(
186 "categories__user", "categories__label", "spans__user", "spans__label"
187 )
188
189 def label_per_user(self, doc) -> Dict:
190 category_per_user: Dict[str, List[str]] = defaultdict(list)
191 span_per_user: Dict[str, List[SpanType]] = defaultdict(list)
192 label_per_user: Dict[str, Dict[str, List]] = defaultdict(dict)
193 for a in doc.categories.all():
194 category_per_user[a.user.username].append(a.label.text)
195 for a in doc.spans.all():
196 span_per_user[a.user.username].append((a.start_offset, a.end_offset, a.label.text))
197 for user, cats in category_per_user.items():
198 label_per_user[user]["cats"] = cats
199 for user, span in span_per_user.items():
200 label_per_user[user]["entities"] = span
201 for label in label_per_user.values():
202 label.setdefault("cats", [])
203 label.setdefault("entities", [])
204 return label_per_user
205
206 def reduce_user(self, label_per_user: Dict[str, Any]):
207 cats = []
208 entities = []
209 for user, label in label_per_user.items():
210 cats.extend(label.get("cats", []))
211 entities.extend(label.get("entities", []))
212 return {"all": {"entities": entities, "cats": cats}}
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/data_export/pipeline/repositories.py b/backend/data_export/pipeline/repositories.py
--- a/backend/data_export/pipeline/repositories.py
+++ b/backend/data_export/pipeline/repositories.py
@@ -17,12 +17,15 @@
def list(self, export_approved=False) -> Iterator[Record]:
raise NotImplementedError()
+ def create_unlabeled_record(self, example: Example) -> Record:
+ raise NotImplementedError()
+
class FileRepository(BaseRepository):
def list(self, export_approved=False) -> Iterator[Record]:
examples = self.project.examples.all()
if export_approved:
- examples = examples.exclude(annotations_approved_by=None)
+ examples = examples.exclude(states=None)
for example in examples:
label_per_user = self.label_per_user(example)
@@ -43,7 +46,10 @@
# with the user who approved the doc.
# This means I will allow each user to be able to approve the doc.
if len(label_per_user) == 0:
- yield Record(data_id=example.id, data=example.upload_name, label=[], user="unknown", metadata={})
+ yield self.create_unlabeled_record(example)
+
+ def create_unlabeled_record(self, example: Example) -> Record:
+ return Record(data_id=example.id, data=example.upload_name, label=[], user="unknown", metadata=example.meta)
def label_per_user(self, example) -> Dict:
label_per_user = defaultdict(list)
@@ -72,7 +78,7 @@
def list(self, export_approved=False):
docs = self.docs
if export_approved:
- docs = docs.exclude(annotations_approved_by=None)
+ docs = docs.exclude(states=None)
for doc in docs:
label_per_user = self.label_per_user(doc)
@@ -87,7 +93,10 @@
# with the user who approved the doc.
# This means I will allow each user to be able to approve the doc.
if len(label_per_user) == 0:
- yield Record(data_id=doc.id, data=doc.text, label=[], user="unknown", metadata={})
+ yield self.create_unlabeled_record(doc)
+
+ def create_unlabeled_record(self, example: Example) -> Record:
+ return Record(data_id=example.id, data=example.text, label=[], user="unknown", metadata=example.meta)
@abc.abstractmethod
def label_per_user(self, doc) -> Dict:
@@ -130,6 +139,15 @@
"spans__user", "spans__label", "relations__user", "relations__type"
)
+ def create_unlabeled_record(self, example: Example) -> Record:
+ return Record(
+ data_id=example.id,
+ data=example.text,
+ label={"entities": [], "relations": []},
+ user="unknown",
+ metadata=example.meta,
+ )
+
def label_per_user(self, doc) -> Dict:
relation_per_user: Dict = defaultdict(list)
span_per_user: Dict = defaultdict(list)
@@ -186,6 +204,15 @@
"categories__user", "categories__label", "spans__user", "spans__label"
)
+ def create_unlabeled_record(self, example: Example) -> Record:
+ return Record(
+ data_id=example.id,
+ data=example.text,
+ label={"entities": [], "cats": []},
+ user="unknown",
+ metadata=example.meta,
+ )
+
def label_per_user(self, doc) -> Dict:
category_per_user: Dict[str, List[str]] = defaultdict(list)
span_per_user: Dict[str, List[SpanType]] = defaultdict(list)
| {"golden_diff": "diff --git a/backend/data_export/pipeline/repositories.py b/backend/data_export/pipeline/repositories.py\n--- a/backend/data_export/pipeline/repositories.py\n+++ b/backend/data_export/pipeline/repositories.py\n@@ -17,12 +17,15 @@\n def list(self, export_approved=False) -> Iterator[Record]:\n raise NotImplementedError()\n \n+ def create_unlabeled_record(self, example: Example) -> Record:\n+ raise NotImplementedError()\n+\n \n class FileRepository(BaseRepository):\n def list(self, export_approved=False) -> Iterator[Record]:\n examples = self.project.examples.all()\n if export_approved:\n- examples = examples.exclude(annotations_approved_by=None)\n+ examples = examples.exclude(states=None)\n \n for example in examples:\n label_per_user = self.label_per_user(example)\n@@ -43,7 +46,10 @@\n # with the user who approved the doc.\n # This means I will allow each user to be able to approve the doc.\n if len(label_per_user) == 0:\n- yield Record(data_id=example.id, data=example.upload_name, label=[], user=\"unknown\", metadata={})\n+ yield self.create_unlabeled_record(example)\n+\n+ def create_unlabeled_record(self, example: Example) -> Record:\n+ return Record(data_id=example.id, data=example.upload_name, label=[], user=\"unknown\", metadata=example.meta)\n \n def label_per_user(self, example) -> Dict:\n label_per_user = defaultdict(list)\n@@ -72,7 +78,7 @@\n def list(self, export_approved=False):\n docs = self.docs\n if export_approved:\n- docs = docs.exclude(annotations_approved_by=None)\n+ docs = docs.exclude(states=None)\n \n for doc in docs:\n label_per_user = self.label_per_user(doc)\n@@ -87,7 +93,10 @@\n # with the user who approved the doc.\n # This means I will allow each user to be able to approve the doc.\n if len(label_per_user) == 0:\n- yield Record(data_id=doc.id, data=doc.text, label=[], user=\"unknown\", metadata={})\n+ yield self.create_unlabeled_record(doc)\n+\n+ def create_unlabeled_record(self, example: Example) -> Record:\n+ return Record(data_id=example.id, data=example.text, label=[], user=\"unknown\", metadata=example.meta)\n \n @abc.abstractmethod\n def label_per_user(self, doc) -> Dict:\n@@ -130,6 +139,15 @@\n \"spans__user\", \"spans__label\", \"relations__user\", \"relations__type\"\n )\n \n+ def create_unlabeled_record(self, example: Example) -> Record:\n+ return Record(\n+ data_id=example.id,\n+ data=example.text,\n+ label={\"entities\": [], \"relations\": []},\n+ user=\"unknown\",\n+ metadata=example.meta,\n+ )\n+\n def label_per_user(self, doc) -> Dict:\n relation_per_user: Dict = defaultdict(list)\n span_per_user: Dict = defaultdict(list)\n@@ -186,6 +204,15 @@\n \"categories__user\", \"categories__label\", \"spans__user\", \"spans__label\"\n )\n \n+ def create_unlabeled_record(self, example: Example) -> Record:\n+ return Record(\n+ data_id=example.id,\n+ data=example.text,\n+ label={\"entities\": [], \"cats\": []},\n+ user=\"unknown\",\n+ metadata=example.meta,\n+ )\n+\n def label_per_user(self, doc) -> Dict:\n category_per_user: Dict[str, List[str]] = defaultdict(list)\n span_per_user: Dict[str, List[SpanType]] = defaultdict(list)\n", "issue": "Export data will get nothing, if I choose only approved documents.\nif I choose only approved documents, the zip file contains nothing\r\n\r\nif I don't choose it, the zip file contains \"all.json\".\r\n\r\nBut those txt I have checked.\n", "before_files": [{"content": "import abc\nimport itertools\nfrom collections import defaultdict\nfrom typing import Any, Dict, Iterator, List, Tuple\n\nfrom .data import Record\nfrom examples.models import Example\nfrom projects.models import Project\n\nSpanType = Tuple[int, int, str]\n\n\nclass BaseRepository:\n def __init__(self, project: Project):\n self.project = project\n\n def list(self, export_approved=False) -> Iterator[Record]:\n raise NotImplementedError()\n\n\nclass FileRepository(BaseRepository):\n def list(self, export_approved=False) -> Iterator[Record]:\n examples = self.project.examples.all()\n if export_approved:\n examples = examples.exclude(annotations_approved_by=None)\n\n for example in examples:\n label_per_user = self.label_per_user(example)\n if self.project.collaborative_annotation:\n label_per_user = self.reduce_user(label_per_user)\n for user, label in label_per_user.items():\n yield Record(\n data_id=example.id,\n data=example.upload_name,\n label=label,\n user=user,\n metadata=example.meta,\n )\n # todo:\n # If there is no label, export the doc with `unknown` user.\n # This is a quick solution.\n # In the future, the doc without label will be exported\n # with the user who approved the doc.\n # This means I will allow each user to be able to approve the doc.\n if len(label_per_user) == 0:\n yield Record(data_id=example.id, data=example.upload_name, label=[], user=\"unknown\", metadata={})\n\n def label_per_user(self, example) -> Dict:\n label_per_user = defaultdict(list)\n for a in example.categories.all():\n label_per_user[a.user.username].append(a.label.text)\n return label_per_user\n\n def reduce_user(self, label_per_user: Dict[str, Any]):\n value = list(itertools.chain(*label_per_user.values()))\n return {\"all\": value}\n\n\nclass Speech2TextRepository(FileRepository):\n def label_per_user(self, example) -> Dict:\n label_per_user = defaultdict(list)\n for a in example.texts.all():\n label_per_user[a.user.username].append(a.text)\n return label_per_user\n\n\nclass TextRepository(BaseRepository):\n @property\n def docs(self):\n return Example.objects.filter(project=self.project)\n\n def list(self, export_approved=False):\n docs = self.docs\n if export_approved:\n docs = docs.exclude(annotations_approved_by=None)\n\n for doc in docs:\n label_per_user = self.label_per_user(doc)\n if self.project.collaborative_annotation:\n label_per_user = self.reduce_user(label_per_user)\n for user, label in label_per_user.items():\n yield Record(data_id=doc.id, data=doc.text, label=label, user=user, metadata=doc.meta)\n # todo:\n # If there is no label, export the doc with `unknown` user.\n # This is a quick solution.\n # In the future, the doc without label will be exported\n # with the user who approved the doc.\n # This means I will allow each user to be able to approve the doc.\n if len(label_per_user) == 0:\n yield Record(data_id=doc.id, data=doc.text, label=[], user=\"unknown\", metadata={})\n\n @abc.abstractmethod\n def label_per_user(self, doc) -> Dict:\n raise NotImplementedError()\n\n def reduce_user(self, label_per_user: Dict[str, Any]):\n value = list(itertools.chain(*label_per_user.values()))\n return {\"all\": value}\n\n\nclass TextClassificationRepository(TextRepository):\n @property\n def docs(self):\n return Example.objects.filter(project=self.project).prefetch_related(\"categories__user\", \"categories__label\")\n\n def label_per_user(self, doc) -> Dict:\n label_per_user = defaultdict(list)\n for a in doc.categories.all():\n label_per_user[a.user.username].append(a.label.text)\n return label_per_user\n\n\nclass SequenceLabelingRepository(TextRepository):\n @property\n def docs(self):\n return Example.objects.filter(project=self.project).prefetch_related(\"spans__user\", \"spans__label\")\n\n def label_per_user(self, doc) -> Dict:\n label_per_user = defaultdict(list)\n for a in doc.spans.all():\n label = (a.start_offset, a.end_offset, a.label.text)\n label_per_user[a.user.username].append(label)\n return label_per_user\n\n\nclass RelationExtractionRepository(TextRepository):\n @property\n def docs(self):\n return Example.objects.filter(project=self.project).prefetch_related(\n \"spans__user\", \"spans__label\", \"relations__user\", \"relations__type\"\n )\n\n def label_per_user(self, doc) -> Dict:\n relation_per_user: Dict = defaultdict(list)\n span_per_user: Dict = defaultdict(list)\n label_per_user: Dict = defaultdict(dict)\n for relation in doc.relations.all():\n relation_per_user[relation.user.username].append(\n {\n \"id\": relation.id,\n \"from_id\": relation.from_id.id,\n \"to_id\": relation.to_id.id,\n \"type\": relation.type.text,\n }\n )\n for span in doc.spans.all():\n span_per_user[span.user.username].append(\n {\n \"id\": span.id,\n \"start_offset\": span.start_offset,\n \"end_offset\": span.end_offset,\n \"label\": span.label.text,\n }\n )\n for user, relations in relation_per_user.items():\n label_per_user[user][\"relations\"] = relations\n for user, span in span_per_user.items():\n label_per_user[user][\"entities\"] = span\n return label_per_user\n\n def reduce_user(self, label_per_user: Dict[str, Any]):\n entities = []\n relations = []\n for user, label in label_per_user.items():\n entities.extend(label.get(\"entities\", []))\n relations.extend(label.get(\"relations\", []))\n return {\"all\": {\"entities\": entities, \"relations\": relations}}\n\n\nclass Seq2seqRepository(TextRepository):\n @property\n def docs(self):\n return Example.objects.filter(project=self.project).prefetch_related(\"texts__user\")\n\n def label_per_user(self, doc) -> Dict:\n label_per_user = defaultdict(list)\n for a in doc.texts.all():\n label_per_user[a.user.username].append(a.text)\n return label_per_user\n\n\nclass IntentDetectionSlotFillingRepository(TextRepository):\n @property\n def docs(self):\n return Example.objects.filter(project=self.project).prefetch_related(\n \"categories__user\", \"categories__label\", \"spans__user\", \"spans__label\"\n )\n\n def label_per_user(self, doc) -> Dict:\n category_per_user: Dict[str, List[str]] = defaultdict(list)\n span_per_user: Dict[str, List[SpanType]] = defaultdict(list)\n label_per_user: Dict[str, Dict[str, List]] = defaultdict(dict)\n for a in doc.categories.all():\n category_per_user[a.user.username].append(a.label.text)\n for a in doc.spans.all():\n span_per_user[a.user.username].append((a.start_offset, a.end_offset, a.label.text))\n for user, cats in category_per_user.items():\n label_per_user[user][\"cats\"] = cats\n for user, span in span_per_user.items():\n label_per_user[user][\"entities\"] = span\n for label in label_per_user.values():\n label.setdefault(\"cats\", [])\n label.setdefault(\"entities\", [])\n return label_per_user\n\n def reduce_user(self, label_per_user: Dict[str, Any]):\n cats = []\n entities = []\n for user, label in label_per_user.items():\n cats.extend(label.get(\"cats\", []))\n entities.extend(label.get(\"entities\", []))\n return {\"all\": {\"entities\": entities, \"cats\": cats}}\n", "path": "backend/data_export/pipeline/repositories.py"}], "after_files": [{"content": "import abc\nimport itertools\nfrom collections import defaultdict\nfrom typing import Any, Dict, Iterator, List, Tuple\n\nfrom .data import Record\nfrom examples.models import Example\nfrom projects.models import Project\n\nSpanType = Tuple[int, int, str]\n\n\nclass BaseRepository:\n def __init__(self, project: Project):\n self.project = project\n\n def list(self, export_approved=False) -> Iterator[Record]:\n raise NotImplementedError()\n\n def create_unlabeled_record(self, example: Example) -> Record:\n raise NotImplementedError()\n\n\nclass FileRepository(BaseRepository):\n def list(self, export_approved=False) -> Iterator[Record]:\n examples = self.project.examples.all()\n if export_approved:\n examples = examples.exclude(states=None)\n\n for example in examples:\n label_per_user = self.label_per_user(example)\n if self.project.collaborative_annotation:\n label_per_user = self.reduce_user(label_per_user)\n for user, label in label_per_user.items():\n yield Record(\n data_id=example.id,\n data=example.upload_name,\n label=label,\n user=user,\n metadata=example.meta,\n )\n # todo:\n # If there is no label, export the doc with `unknown` user.\n # This is a quick solution.\n # In the future, the doc without label will be exported\n # with the user who approved the doc.\n # This means I will allow each user to be able to approve the doc.\n if len(label_per_user) == 0:\n yield self.create_unlabeled_record(example)\n\n def create_unlabeled_record(self, example: Example) -> Record:\n return Record(data_id=example.id, data=example.upload_name, label=[], user=\"unknown\", metadata=example.meta)\n\n def label_per_user(self, example) -> Dict:\n label_per_user = defaultdict(list)\n for a in example.categories.all():\n label_per_user[a.user.username].append(a.label.text)\n return label_per_user\n\n def reduce_user(self, label_per_user: Dict[str, Any]):\n value = list(itertools.chain(*label_per_user.values()))\n return {\"all\": value}\n\n\nclass Speech2TextRepository(FileRepository):\n def label_per_user(self, example) -> Dict:\n label_per_user = defaultdict(list)\n for a in example.texts.all():\n label_per_user[a.user.username].append(a.text)\n return label_per_user\n\n\nclass TextRepository(BaseRepository):\n @property\n def docs(self):\n return Example.objects.filter(project=self.project)\n\n def list(self, export_approved=False):\n docs = self.docs\n if export_approved:\n docs = docs.exclude(states=None)\n\n for doc in docs:\n label_per_user = self.label_per_user(doc)\n if self.project.collaborative_annotation:\n label_per_user = self.reduce_user(label_per_user)\n for user, label in label_per_user.items():\n yield Record(data_id=doc.id, data=doc.text, label=label, user=user, metadata=doc.meta)\n # todo:\n # If there is no label, export the doc with `unknown` user.\n # This is a quick solution.\n # In the future, the doc without label will be exported\n # with the user who approved the doc.\n # This means I will allow each user to be able to approve the doc.\n if len(label_per_user) == 0:\n yield self.create_unlabeled_record(doc)\n\n def create_unlabeled_record(self, example: Example) -> Record:\n return Record(data_id=example.id, data=example.text, label=[], user=\"unknown\", metadata=example.meta)\n\n @abc.abstractmethod\n def label_per_user(self, doc) -> Dict:\n raise NotImplementedError()\n\n def reduce_user(self, label_per_user: Dict[str, Any]):\n value = list(itertools.chain(*label_per_user.values()))\n return {\"all\": value}\n\n\nclass TextClassificationRepository(TextRepository):\n @property\n def docs(self):\n return Example.objects.filter(project=self.project).prefetch_related(\"categories__user\", \"categories__label\")\n\n def label_per_user(self, doc) -> Dict:\n label_per_user = defaultdict(list)\n for a in doc.categories.all():\n label_per_user[a.user.username].append(a.label.text)\n return label_per_user\n\n\nclass SequenceLabelingRepository(TextRepository):\n @property\n def docs(self):\n return Example.objects.filter(project=self.project).prefetch_related(\"spans__user\", \"spans__label\")\n\n def label_per_user(self, doc) -> Dict:\n label_per_user = defaultdict(list)\n for a in doc.spans.all():\n label = (a.start_offset, a.end_offset, a.label.text)\n label_per_user[a.user.username].append(label)\n return label_per_user\n\n\nclass RelationExtractionRepository(TextRepository):\n @property\n def docs(self):\n return Example.objects.filter(project=self.project).prefetch_related(\n \"spans__user\", \"spans__label\", \"relations__user\", \"relations__type\"\n )\n\n def create_unlabeled_record(self, example: Example) -> Record:\n return Record(\n data_id=example.id,\n data=example.text,\n label={\"entities\": [], \"relations\": []},\n user=\"unknown\",\n metadata=example.meta,\n )\n\n def label_per_user(self, doc) -> Dict:\n relation_per_user: Dict = defaultdict(list)\n span_per_user: Dict = defaultdict(list)\n label_per_user: Dict = defaultdict(dict)\n for relation in doc.relations.all():\n relation_per_user[relation.user.username].append(\n {\n \"id\": relation.id,\n \"from_id\": relation.from_id.id,\n \"to_id\": relation.to_id.id,\n \"type\": relation.type.text,\n }\n )\n for span in doc.spans.all():\n span_per_user[span.user.username].append(\n {\n \"id\": span.id,\n \"start_offset\": span.start_offset,\n \"end_offset\": span.end_offset,\n \"label\": span.label.text,\n }\n )\n for user, relations in relation_per_user.items():\n label_per_user[user][\"relations\"] = relations\n for user, span in span_per_user.items():\n label_per_user[user][\"entities\"] = span\n return label_per_user\n\n def reduce_user(self, label_per_user: Dict[str, Any]):\n entities = []\n relations = []\n for user, label in label_per_user.items():\n entities.extend(label.get(\"entities\", []))\n relations.extend(label.get(\"relations\", []))\n return {\"all\": {\"entities\": entities, \"relations\": relations}}\n\n\nclass Seq2seqRepository(TextRepository):\n @property\n def docs(self):\n return Example.objects.filter(project=self.project).prefetch_related(\"texts__user\")\n\n def label_per_user(self, doc) -> Dict:\n label_per_user = defaultdict(list)\n for a in doc.texts.all():\n label_per_user[a.user.username].append(a.text)\n return label_per_user\n\n\nclass IntentDetectionSlotFillingRepository(TextRepository):\n @property\n def docs(self):\n return Example.objects.filter(project=self.project).prefetch_related(\n \"categories__user\", \"categories__label\", \"spans__user\", \"spans__label\"\n )\n\n def create_unlabeled_record(self, example: Example) -> Record:\n return Record(\n data_id=example.id,\n data=example.text,\n label={\"entities\": [], \"cats\": []},\n user=\"unknown\",\n metadata=example.meta,\n )\n\n def label_per_user(self, doc) -> Dict:\n category_per_user: Dict[str, List[str]] = defaultdict(list)\n span_per_user: Dict[str, List[SpanType]] = defaultdict(list)\n label_per_user: Dict[str, Dict[str, List]] = defaultdict(dict)\n for a in doc.categories.all():\n category_per_user[a.user.username].append(a.label.text)\n for a in doc.spans.all():\n span_per_user[a.user.username].append((a.start_offset, a.end_offset, a.label.text))\n for user, cats in category_per_user.items():\n label_per_user[user][\"cats\"] = cats\n for user, span in span_per_user.items():\n label_per_user[user][\"entities\"] = span\n for label in label_per_user.values():\n label.setdefault(\"cats\", [])\n label.setdefault(\"entities\", [])\n return label_per_user\n\n def reduce_user(self, label_per_user: Dict[str, Any]):\n cats = []\n entities = []\n for user, label in label_per_user.items():\n cats.extend(label.get(\"cats\", []))\n entities.extend(label.get(\"entities\", []))\n return {\"all\": {\"entities\": entities, \"cats\": cats}}\n", "path": "backend/data_export/pipeline/repositories.py"}]} | 2,544 | 836 |
gh_patches_debug_34829 | rasdani/github-patches | git_diff | liqd__adhocracy4-168 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
improve accessibility of image upload
* add alt attribute with the filename
* if there is no image uploaded the image tag should not be there
* the label's `for` attribute doesn't reference the file input's id.
The first part causes the HTML to be invalid, which is part of the BITV Test "4.1.1a Valides HTML".
The third part is part of the BITV Test "3.3.2a Formularfelder richtig beschriftet".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `adhocracy4/images/widgets.py`
Content:
```
1 from os.path import basename
2
3 from django.contrib.staticfiles.storage import staticfiles_storage
4 from django.forms import widgets
5 from django.template import loader
6 from django.utils.html import conditional_escape
7 from django.utils.translation import ugettext
8
9
10 class ImageInputWidget(widgets.ClearableFileInput):
11
12 """
13 A project-specific improved version of the clearable file upload.
14
15 Allows to upload and delete uploaded files. It doesn't passing attributes
16 using the positional `attrs` argument and hard codes css files.
17 """
18 class Media:
19 js = (staticfiles_storage.url('a4images/imageUploader.js'),)
20
21 def render(self, name, value, attrs=None):
22
23 has_image_set = self.is_initial(value)
24 is_required = self.is_required
25
26 file_placeholder = ugettext('Select a picture from your local folder.')
27 file_input = super().render(name, None, {
28 'id': name,
29 'class': 'form-control form-control-file'
30 })
31
32 if has_image_set:
33 file_name = basename(value.name)
34 file_url = conditional_escape(value.url)
35 else:
36 file_name = ""
37 file_url = ""
38
39 text_input = widgets.TextInput().render('__noname__', file_name, {
40 'class': 'form-control form-control-file-dummy',
41 'placeholder': file_placeholder,
42 'tabindex': '-1'
43 })
44
45 checkbox_id = self.clear_checkbox_id(name)
46 checkbox_name = self.clear_checkbox_name(name)
47 checkbox_input = widgets.CheckboxInput().render(checkbox_name, False, {
48 'id': checkbox_id,
49 'class': 'clear-image',
50 'data-upload-clear': name,
51 })
52
53 context = {
54 'name': name,
55 'has_image_set': has_image_set,
56 'is_required': is_required,
57 'file_url': file_url,
58 'file_input': file_input,
59 'file_id': name + '-file',
60 'text_input': text_input,
61 'checkbox_input': checkbox_input,
62 'checkbox_id': checkbox_id
63 }
64
65 return loader.render_to_string(
66 'a4images/image_upload_widget.html',
67 context
68 )
69
70 def value_from_datadict(self, data, files, name):
71 """
72 Modify value_from_datadict, so that delete takes precedence over
73 upload.
74 """
75 file_value = super(widgets.ClearableFileInput, self)\
76 .value_from_datadict(data, files, name)
77 checkbox_value = widgets.CheckboxInput()\
78 .value_from_datadict(data, files, self.clear_checkbox_name(name))
79 if not self.is_required and checkbox_value:
80 return False
81 return file_value
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/adhocracy4/images/widgets.py b/adhocracy4/images/widgets.py
--- a/adhocracy4/images/widgets.py
+++ b/adhocracy4/images/widgets.py
@@ -19,13 +19,13 @@
js = (staticfiles_storage.url('a4images/imageUploader.js'),)
def render(self, name, value, attrs=None):
-
+ html_id = attrs and attrs.get('id', name) or name
has_image_set = self.is_initial(value)
is_required = self.is_required
file_placeholder = ugettext('Select a picture from your local folder.')
file_input = super().render(name, None, {
- 'id': name,
+ 'id': html_id,
'class': 'form-control form-control-file'
})
@@ -39,7 +39,8 @@
text_input = widgets.TextInput().render('__noname__', file_name, {
'class': 'form-control form-control-file-dummy',
'placeholder': file_placeholder,
- 'tabindex': '-1'
+ 'tabindex': '-1',
+ 'id': 'text-{}'.format(html_id)
})
checkbox_id = self.clear_checkbox_id(name)
@@ -47,16 +48,16 @@
checkbox_input = widgets.CheckboxInput().render(checkbox_name, False, {
'id': checkbox_id,
'class': 'clear-image',
- 'data-upload-clear': name,
+ 'data-upload-clear': html_id,
})
context = {
- 'name': name,
+ 'id': html_id,
'has_image_set': has_image_set,
'is_required': is_required,
'file_url': file_url,
'file_input': file_input,
- 'file_id': name + '-file',
+ 'file_id': html_id + '-file',
'text_input': text_input,
'checkbox_input': checkbox_input,
'checkbox_id': checkbox_id
| {"golden_diff": "diff --git a/adhocracy4/images/widgets.py b/adhocracy4/images/widgets.py\n--- a/adhocracy4/images/widgets.py\n+++ b/adhocracy4/images/widgets.py\n@@ -19,13 +19,13 @@\n js = (staticfiles_storage.url('a4images/imageUploader.js'),)\n \n def render(self, name, value, attrs=None):\n-\n+ html_id = attrs and attrs.get('id', name) or name\n has_image_set = self.is_initial(value)\n is_required = self.is_required\n \n file_placeholder = ugettext('Select a picture from your local folder.')\n file_input = super().render(name, None, {\n- 'id': name,\n+ 'id': html_id,\n 'class': 'form-control form-control-file'\n })\n \n@@ -39,7 +39,8 @@\n text_input = widgets.TextInput().render('__noname__', file_name, {\n 'class': 'form-control form-control-file-dummy',\n 'placeholder': file_placeholder,\n- 'tabindex': '-1'\n+ 'tabindex': '-1',\n+ 'id': 'text-{}'.format(html_id)\n })\n \n checkbox_id = self.clear_checkbox_id(name)\n@@ -47,16 +48,16 @@\n checkbox_input = widgets.CheckboxInput().render(checkbox_name, False, {\n 'id': checkbox_id,\n 'class': 'clear-image',\n- 'data-upload-clear': name,\n+ 'data-upload-clear': html_id,\n })\n \n context = {\n- 'name': name,\n+ 'id': html_id,\n 'has_image_set': has_image_set,\n 'is_required': is_required,\n 'file_url': file_url,\n 'file_input': file_input,\n- 'file_id': name + '-file',\n+ 'file_id': html_id + '-file',\n 'text_input': text_input,\n 'checkbox_input': checkbox_input,\n 'checkbox_id': checkbox_id\n", "issue": "improve accessibility of image upload\n* add alt attribute with the filename\r\n* if there is no image uploaded the image tag should not be there\r\n* the label's `for` attribute doesn't reference the file input's id.\r\n\r\nThe first part causes the HTML to be invalid, which is part of the BITV Test \"4.1.1a Valides HTML\".\r\nThe third part is part of the BITV Test \"3.3.2a Formularfelder richtig beschriftet\".\n", "before_files": [{"content": "from os.path import basename\n\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.forms import widgets\nfrom django.template import loader\nfrom django.utils.html import conditional_escape\nfrom django.utils.translation import ugettext\n\n\nclass ImageInputWidget(widgets.ClearableFileInput):\n\n \"\"\"\n A project-specific improved version of the clearable file upload.\n\n Allows to upload and delete uploaded files. It doesn't passing attributes\n using the positional `attrs` argument and hard codes css files.\n \"\"\"\n class Media:\n js = (staticfiles_storage.url('a4images/imageUploader.js'),)\n\n def render(self, name, value, attrs=None):\n\n has_image_set = self.is_initial(value)\n is_required = self.is_required\n\n file_placeholder = ugettext('Select a picture from your local folder.')\n file_input = super().render(name, None, {\n 'id': name,\n 'class': 'form-control form-control-file'\n })\n\n if has_image_set:\n file_name = basename(value.name)\n file_url = conditional_escape(value.url)\n else:\n file_name = \"\"\n file_url = \"\"\n\n text_input = widgets.TextInput().render('__noname__', file_name, {\n 'class': 'form-control form-control-file-dummy',\n 'placeholder': file_placeholder,\n 'tabindex': '-1'\n })\n\n checkbox_id = self.clear_checkbox_id(name)\n checkbox_name = self.clear_checkbox_name(name)\n checkbox_input = widgets.CheckboxInput().render(checkbox_name, False, {\n 'id': checkbox_id,\n 'class': 'clear-image',\n 'data-upload-clear': name,\n })\n\n context = {\n 'name': name,\n 'has_image_set': has_image_set,\n 'is_required': is_required,\n 'file_url': file_url,\n 'file_input': file_input,\n 'file_id': name + '-file',\n 'text_input': text_input,\n 'checkbox_input': checkbox_input,\n 'checkbox_id': checkbox_id\n }\n\n return loader.render_to_string(\n 'a4images/image_upload_widget.html',\n context\n )\n\n def value_from_datadict(self, data, files, name):\n \"\"\"\n Modify value_from_datadict, so that delete takes precedence over\n upload.\n \"\"\"\n file_value = super(widgets.ClearableFileInput, self)\\\n .value_from_datadict(data, files, name)\n checkbox_value = widgets.CheckboxInput()\\\n .value_from_datadict(data, files, self.clear_checkbox_name(name))\n if not self.is_required and checkbox_value:\n return False\n return file_value\n", "path": "adhocracy4/images/widgets.py"}], "after_files": [{"content": "from os.path import basename\n\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.forms import widgets\nfrom django.template import loader\nfrom django.utils.html import conditional_escape\nfrom django.utils.translation import ugettext\n\n\nclass ImageInputWidget(widgets.ClearableFileInput):\n\n \"\"\"\n A project-specific improved version of the clearable file upload.\n\n Allows to upload and delete uploaded files. It doesn't passing attributes\n using the positional `attrs` argument and hard codes css files.\n \"\"\"\n class Media:\n js = (staticfiles_storage.url('a4images/imageUploader.js'),)\n\n def render(self, name, value, attrs=None):\n html_id = attrs and attrs.get('id', name) or name\n has_image_set = self.is_initial(value)\n is_required = self.is_required\n\n file_placeholder = ugettext('Select a picture from your local folder.')\n file_input = super().render(name, None, {\n 'id': html_id,\n 'class': 'form-control form-control-file'\n })\n\n if has_image_set:\n file_name = basename(value.name)\n file_url = conditional_escape(value.url)\n else:\n file_name = \"\"\n file_url = \"\"\n\n text_input = widgets.TextInput().render('__noname__', file_name, {\n 'class': 'form-control form-control-file-dummy',\n 'placeholder': file_placeholder,\n 'tabindex': '-1',\n 'id': 'text-{}'.format(html_id)\n })\n\n checkbox_id = self.clear_checkbox_id(name)\n checkbox_name = self.clear_checkbox_name(name)\n checkbox_input = widgets.CheckboxInput().render(checkbox_name, False, {\n 'id': checkbox_id,\n 'class': 'clear-image',\n 'data-upload-clear': html_id,\n })\n\n context = {\n 'id': html_id,\n 'has_image_set': has_image_set,\n 'is_required': is_required,\n 'file_url': file_url,\n 'file_input': file_input,\n 'file_id': html_id + '-file',\n 'text_input': text_input,\n 'checkbox_input': checkbox_input,\n 'checkbox_id': checkbox_id\n }\n\n return loader.render_to_string(\n 'a4images/image_upload_widget.html',\n context\n )\n\n def value_from_datadict(self, data, files, name):\n \"\"\"\n Modify value_from_datadict, so that delete takes precedence over\n upload.\n \"\"\"\n file_value = super(widgets.ClearableFileInput, self)\\\n .value_from_datadict(data, files, name)\n checkbox_value = widgets.CheckboxInput()\\\n .value_from_datadict(data, files, self.clear_checkbox_name(name))\n if not self.is_required and checkbox_value:\n return False\n return file_value\n", "path": "adhocracy4/images/widgets.py"}]} | 1,087 | 436 |
gh_patches_debug_15533 | rasdani/github-patches | git_diff | voxel51__fiftyone-1660 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Support Fortran ordered masks in the App
Currently fortran ordered masks are flipped.
```py
import fiftyone as fo
import fiftyone.zoo as foz
import numpy as np
dataset = foz.load_zoo_dataset("quickstart", max_samples=1).select_fields().clone()
sample = dataset.first()
contiguous = np.asarray([[True, False], [True, False]])
sample["contiguous"] = fo.Segmentation(mask=contiguous)
sample["fortran"] = fo.Segmentation(mask=np.asfortranarray(contiguous))
sample.save()
session = fo.Session(dataset)
```
<img width="1792" alt="flipped" src="https://user-images.githubusercontent.com/19821840/159953546-5eef71bc-d111-4667-a271-6c4e34e1b7da.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fiftyone/server/json_util.py`
Content:
```
1 """
2 FiftyOne server json utilies.
3
4 | Copyright 2017-2022, Voxel51, Inc.
5 | `voxel51.com <https://voxel51.com/>`_
6 |
7 """
8 from bson import ObjectId, json_util
9 from collections import OrderedDict
10 from datetime import date, datetime
11 from json import JSONEncoder
12 import math
13
14 from fiftyone.core.sample import Sample, SampleView
15 from fiftyone.core.stages import ViewStage
16 import fiftyone.core.utils as fou
17
18
19 _MASK_CLASSES = {"Detection", "Heatmap", "Segmentation"}
20
21
22 def _handle_bytes(o):
23 for k, v in o.items():
24 if isinstance(v, bytes):
25 o[k] = str(fou.deserialize_numpy_array(v).shape)
26 elif isinstance(v, dict):
27 o[k] = _handle_bytes(v)
28
29 return o
30
31
32 def _handle_numpy_array(raw, _cls=None):
33 if _cls not in _MASK_CLASSES:
34 return str(fou.deserialize_numpy_array(raw).shape)
35
36 return fou.serialize_numpy_array(
37 fou.deserialize_numpy_array(raw), ascii=True
38 )
39
40
41 def _handle_date(dt):
42 return {
43 "_cls": "DateTime",
44 "datetime": fou.datetime_to_timestamp(dt),
45 }
46
47
48 def _is_invalid_number(value):
49 if not isinstance(value, float):
50 return False
51
52 return math.isnan(value) or math.isinf(value)
53
54
55 def convert(d):
56 if isinstance(d, (dict, OrderedDict)):
57 for k, v in d.items():
58 if isinstance(v, bytes):
59 d[k] = _handle_numpy_array(v, d.get("_cls", None))
60 elif isinstance(v, (date, datetime)):
61 d[k] = _handle_date(v)
62 elif isinstance(v, ObjectId):
63 d[k] = str(v)
64 elif isinstance(v, (dict, OrderedDict, list)):
65 convert(v)
66 elif _is_invalid_number(v):
67 d[k] = str(v)
68
69 if isinstance(d, list):
70 for idx, i in enumerate(d):
71 if isinstance(i, tuple):
72 d[idx] = list(i)
73 i = d[idx]
74
75 if isinstance(i, bytes):
76 d[idx] = _handle_numpy_array(i)
77 elif isinstance(i, (date, datetime)):
78 d[idx] = _handle_date(i)
79 elif isinstance(i, ObjectId):
80 d[idx] = str(i)
81 elif isinstance(i, (dict, OrderedDict, list)):
82 convert(i)
83 elif _is_invalid_number(i):
84 d[idx] = str(i)
85
86
87 class FiftyOneJSONEncoder(JSONEncoder):
88 """JSON encoder for the FiftyOne server.
89
90 Any classes with non-standard serialization methods should
91 be accounted for in the `default()` method.
92 """
93
94 def default(self, o): # pylint: disable=E0202
95 """Returns the serialized representation of the objects
96
97 Args:
98 o: the object
99
100 Returns:
101 str
102 """
103 if isinstance(o, (Sample, SampleView)):
104 return _handle_bytes(o.to_mongo_dict(include_id=True))
105 if issubclass(type(o), ViewStage):
106 return o._serialize()
107 if isinstance(o, ObjectId):
108 return str(o)
109 if isinstance(o, float):
110 return json_util.dumps(o)
111 return super().default(o)
112
113 @staticmethod
114 def dumps(*args, **kwargs):
115 """Defined for overriding the default SocketIO `json` interface"""
116 kwargs["cls"] = FiftyOneJSONEncoder
117 return json_util.dumps(
118 json_util.loads(
119 json_util.dumps(*args, **kwargs), parse_constant=lambda c: c
120 ),
121 **kwargs
122 )
123
124 @staticmethod
125 def loads(*args, **kwargs):
126 """Defined for overriding the default SocketIO `json` interface"""
127 return json_util.loads(*args, **kwargs)
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fiftyone/server/json_util.py b/fiftyone/server/json_util.py
--- a/fiftyone/server/json_util.py
+++ b/fiftyone/server/json_util.py
@@ -10,6 +10,7 @@
from datetime import date, datetime
from json import JSONEncoder
import math
+import numpy as np
from fiftyone.core.sample import Sample, SampleView
from fiftyone.core.stages import ViewStage
@@ -33,9 +34,12 @@
if _cls not in _MASK_CLASSES:
return str(fou.deserialize_numpy_array(raw).shape)
- return fou.serialize_numpy_array(
- fou.deserialize_numpy_array(raw), ascii=True
- )
+ array = fou.deserialize_numpy_array(raw)
+
+ if np.isfortran(array):
+ array = np.ascontiguousarray(array)
+
+ return fou.serialize_numpy_array(array, ascii=True)
def _handle_date(dt):
| {"golden_diff": "diff --git a/fiftyone/server/json_util.py b/fiftyone/server/json_util.py\n--- a/fiftyone/server/json_util.py\n+++ b/fiftyone/server/json_util.py\n@@ -10,6 +10,7 @@\n from datetime import date, datetime\n from json import JSONEncoder\n import math\n+import numpy as np\n \n from fiftyone.core.sample import Sample, SampleView\n from fiftyone.core.stages import ViewStage\n@@ -33,9 +34,12 @@\n if _cls not in _MASK_CLASSES:\n return str(fou.deserialize_numpy_array(raw).shape)\n \n- return fou.serialize_numpy_array(\n- fou.deserialize_numpy_array(raw), ascii=True\n- )\n+ array = fou.deserialize_numpy_array(raw)\n+\n+ if np.isfortran(array):\n+ array = np.ascontiguousarray(array)\n+\n+ return fou.serialize_numpy_array(array, ascii=True)\n \n \n def _handle_date(dt):\n", "issue": "[BUG] Support Fortran ordered masks in the App\nCurrently fortran ordered masks are flipped.\r\n\r\n```py\r\nimport fiftyone as fo\r\nimport fiftyone.zoo as foz\r\nimport numpy as np\r\n\r\ndataset = foz.load_zoo_dataset(\"quickstart\", max_samples=1).select_fields().clone()\r\nsample = dataset.first()\r\n\r\ncontiguous = np.asarray([[True, False], [True, False]])\r\nsample[\"contiguous\"] = fo.Segmentation(mask=contiguous)\r\nsample[\"fortran\"] = fo.Segmentation(mask=np.asfortranarray(contiguous))\r\nsample.save()\r\n\r\nsession = fo.Session(dataset)\r\n```\r\n<img width=\"1792\" alt=\"flipped\" src=\"https://user-images.githubusercontent.com/19821840/159953546-5eef71bc-d111-4667-a271-6c4e34e1b7da.png\">\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nFiftyOne server json utilies.\n\n| Copyright 2017-2022, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom bson import ObjectId, json_util\nfrom collections import OrderedDict\nfrom datetime import date, datetime\nfrom json import JSONEncoder\nimport math\n\nfrom fiftyone.core.sample import Sample, SampleView\nfrom fiftyone.core.stages import ViewStage\nimport fiftyone.core.utils as fou\n\n\n_MASK_CLASSES = {\"Detection\", \"Heatmap\", \"Segmentation\"}\n\n\ndef _handle_bytes(o):\n for k, v in o.items():\n if isinstance(v, bytes):\n o[k] = str(fou.deserialize_numpy_array(v).shape)\n elif isinstance(v, dict):\n o[k] = _handle_bytes(v)\n\n return o\n\n\ndef _handle_numpy_array(raw, _cls=None):\n if _cls not in _MASK_CLASSES:\n return str(fou.deserialize_numpy_array(raw).shape)\n\n return fou.serialize_numpy_array(\n fou.deserialize_numpy_array(raw), ascii=True\n )\n\n\ndef _handle_date(dt):\n return {\n \"_cls\": \"DateTime\",\n \"datetime\": fou.datetime_to_timestamp(dt),\n }\n\n\ndef _is_invalid_number(value):\n if not isinstance(value, float):\n return False\n\n return math.isnan(value) or math.isinf(value)\n\n\ndef convert(d):\n if isinstance(d, (dict, OrderedDict)):\n for k, v in d.items():\n if isinstance(v, bytes):\n d[k] = _handle_numpy_array(v, d.get(\"_cls\", None))\n elif isinstance(v, (date, datetime)):\n d[k] = _handle_date(v)\n elif isinstance(v, ObjectId):\n d[k] = str(v)\n elif isinstance(v, (dict, OrderedDict, list)):\n convert(v)\n elif _is_invalid_number(v):\n d[k] = str(v)\n\n if isinstance(d, list):\n for idx, i in enumerate(d):\n if isinstance(i, tuple):\n d[idx] = list(i)\n i = d[idx]\n\n if isinstance(i, bytes):\n d[idx] = _handle_numpy_array(i)\n elif isinstance(i, (date, datetime)):\n d[idx] = _handle_date(i)\n elif isinstance(i, ObjectId):\n d[idx] = str(i)\n elif isinstance(i, (dict, OrderedDict, list)):\n convert(i)\n elif _is_invalid_number(i):\n d[idx] = str(i)\n\n\nclass FiftyOneJSONEncoder(JSONEncoder):\n \"\"\"JSON encoder for the FiftyOne server.\n\n Any classes with non-standard serialization methods should\n be accounted for in the `default()` method.\n \"\"\"\n\n def default(self, o): # pylint: disable=E0202\n \"\"\"Returns the serialized representation of the objects\n\n Args:\n o: the object\n\n Returns:\n str\n \"\"\"\n if isinstance(o, (Sample, SampleView)):\n return _handle_bytes(o.to_mongo_dict(include_id=True))\n if issubclass(type(o), ViewStage):\n return o._serialize()\n if isinstance(o, ObjectId):\n return str(o)\n if isinstance(o, float):\n return json_util.dumps(o)\n return super().default(o)\n\n @staticmethod\n def dumps(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n kwargs[\"cls\"] = FiftyOneJSONEncoder\n return json_util.dumps(\n json_util.loads(\n json_util.dumps(*args, **kwargs), parse_constant=lambda c: c\n ),\n **kwargs\n )\n\n @staticmethod\n def loads(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n return json_util.loads(*args, **kwargs)\n", "path": "fiftyone/server/json_util.py"}], "after_files": [{"content": "\"\"\"\nFiftyOne server json utilies.\n\n| Copyright 2017-2022, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom bson import ObjectId, json_util\nfrom collections import OrderedDict\nfrom datetime import date, datetime\nfrom json import JSONEncoder\nimport math\nimport numpy as np\n\nfrom fiftyone.core.sample import Sample, SampleView\nfrom fiftyone.core.stages import ViewStage\nimport fiftyone.core.utils as fou\n\n\n_MASK_CLASSES = {\"Detection\", \"Heatmap\", \"Segmentation\"}\n\n\ndef _handle_bytes(o):\n for k, v in o.items():\n if isinstance(v, bytes):\n o[k] = str(fou.deserialize_numpy_array(v).shape)\n elif isinstance(v, dict):\n o[k] = _handle_bytes(v)\n\n return o\n\n\ndef _handle_numpy_array(raw, _cls=None):\n if _cls not in _MASK_CLASSES:\n return str(fou.deserialize_numpy_array(raw).shape)\n\n array = fou.deserialize_numpy_array(raw)\n\n if np.isfortran(array):\n array = np.ascontiguousarray(array)\n\n return fou.serialize_numpy_array(array, ascii=True)\n\n\ndef _handle_date(dt):\n return {\n \"_cls\": \"DateTime\",\n \"datetime\": fou.datetime_to_timestamp(dt),\n }\n\n\ndef _is_invalid_number(value):\n if not isinstance(value, float):\n return False\n\n return math.isnan(value) or math.isinf(value)\n\n\ndef convert(d):\n if isinstance(d, (dict, OrderedDict)):\n for k, v in d.items():\n if isinstance(v, bytes):\n d[k] = _handle_numpy_array(v, d.get(\"_cls\", None))\n elif isinstance(v, (date, datetime)):\n d[k] = _handle_date(v)\n elif isinstance(v, ObjectId):\n d[k] = str(v)\n elif isinstance(v, (dict, OrderedDict, list)):\n convert(v)\n elif _is_invalid_number(v):\n d[k] = str(v)\n\n if isinstance(d, list):\n for idx, i in enumerate(d):\n if isinstance(i, tuple):\n d[idx] = list(i)\n i = d[idx]\n\n if isinstance(i, bytes):\n d[idx] = _handle_numpy_array(i)\n elif isinstance(i, (date, datetime)):\n d[idx] = _handle_date(i)\n elif isinstance(i, ObjectId):\n d[idx] = str(i)\n elif isinstance(i, (dict, OrderedDict, list)):\n convert(i)\n elif _is_invalid_number(i):\n d[idx] = str(i)\n\n\nclass FiftyOneJSONEncoder(JSONEncoder):\n \"\"\"JSON encoder for the FiftyOne server.\n\n Any classes with non-standard serialization methods should\n be accounted for in the `default()` method.\n \"\"\"\n\n def default(self, o): # pylint: disable=E0202\n \"\"\"Returns the serialized representation of the objects\n\n Args:\n o: the object\n\n Returns:\n str\n \"\"\"\n if isinstance(o, (Sample, SampleView)):\n return _handle_bytes(o.to_mongo_dict(include_id=True))\n if issubclass(type(o), ViewStage):\n return o._serialize()\n if isinstance(o, ObjectId):\n return str(o)\n if isinstance(o, float):\n return json_util.dumps(o)\n return super().default(o)\n\n @staticmethod\n def dumps(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n kwargs[\"cls\"] = FiftyOneJSONEncoder\n return json_util.dumps(\n json_util.loads(\n json_util.dumps(*args, **kwargs), parse_constant=lambda c: c\n ),\n **kwargs\n )\n\n @staticmethod\n def loads(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n return json_util.loads(*args, **kwargs)\n", "path": "fiftyone/server/json_util.py"}]} | 1,570 | 204 |
gh_patches_debug_1764 | rasdani/github-patches | git_diff | apple__coremltools-298 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Why is six pinned to 1.10.0?
Is there any reason for [six to be pinned to version 1.10.0](https://github.com/apple/coremltools/blob/master/setup.py#L44). This gives transitive dependency issues sometimes.
/cc @mats-claassen
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 from setuptools import setup
5
6 README = os.path.join(os.getcwd(), "README.rst")
7
8
9 with open(README) as f:
10 long_description = f.read()
11
12 setup(name='coremltools',
13 version='2.0',
14 description='Community Tools for CoreML',
15 long_description=long_description,
16 author='Apple Inc.',
17 author_email='[email protected]',
18 url='',
19 packages=[
20 'coremltools',
21 'coremltools._deps',
22 'coremltools.converters',
23 'coremltools.converters.caffe',
24 'coremltools.converters.sklearn',
25 'coremltools.converters.xgboost',
26 'coremltools.converters.libsvm',
27 'coremltools.converters.keras',
28 'coremltools.graph_visualization',
29 'coremltools.models',
30 'coremltools.models.neural_network',
31 'coremltools.proto',
32 'coremltools._scripts'
33 ],
34 package_data={'': ['LICENSE.txt', 'README.rst', 'libcaffeconverter.so', 'libcoremlpython.so'],
35 'coremltools': ['graph_visualization/__init__.py',
36 'graph_visualization/app.js',
37 'graph_visualization/index.html',
38 'graph_visualization/style.css',
39 'graph_visualization/assets/*',
40 'graph_visualization/icons/*']
41 },
42 install_requires=[
43 'numpy >= 1.10.0',
44 'protobuf >= 3.1.0',
45 'six==1.10.0'
46 ],
47 entry_points = {
48 'console_scripts': ['coremlconverter = coremltools:_main']
49 },
50 classifiers=[
51 'Development Status :: 4 - Beta',
52 'Intended Audience :: End Users/Desktop',
53 'Intended Audience :: Developers',
54 'Operating System :: MacOS :: MacOS X',
55 'Programming Language :: Python :: 2.7',
56 'Programming Language :: Python :: 3.5',
57 'Programming Language :: Python :: 3.6',
58 'Topic :: Scientific/Engineering',
59 'Topic :: Software Development'
60 ],
61 license='BSD'
62 )
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -42,7 +42,7 @@
install_requires=[
'numpy >= 1.10.0',
'protobuf >= 3.1.0',
- 'six==1.10.0'
+ 'six>=1.10.0'
],
entry_points = {
'console_scripts': ['coremlconverter = coremltools:_main']
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -42,7 +42,7 @@\n install_requires=[\n 'numpy >= 1.10.0',\n 'protobuf >= 3.1.0',\n- 'six==1.10.0'\n+ 'six>=1.10.0'\n ],\n entry_points = {\n 'console_scripts': ['coremlconverter = coremltools:_main']\n", "issue": "Why is six pinned to 1.10.0?\nIs there any reason for [six to be pinned to version 1.10.0](https://github.com/apple/coremltools/blob/master/setup.py#L44). This gives transitive dependency issues sometimes.\r\n\r\n/cc @mats-claassen\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup\n\nREADME = os.path.join(os.getcwd(), \"README.rst\")\n\n\nwith open(README) as f:\n long_description = f.read()\n\nsetup(name='coremltools',\n version='2.0',\n description='Community Tools for CoreML',\n long_description=long_description,\n author='Apple Inc.',\n author_email='[email protected]',\n url='',\n packages=[\n 'coremltools',\n 'coremltools._deps',\n 'coremltools.converters',\n 'coremltools.converters.caffe',\n 'coremltools.converters.sklearn',\n 'coremltools.converters.xgboost',\n 'coremltools.converters.libsvm',\n 'coremltools.converters.keras',\n 'coremltools.graph_visualization',\n 'coremltools.models',\n 'coremltools.models.neural_network',\n 'coremltools.proto',\n 'coremltools._scripts'\n ],\n package_data={'': ['LICENSE.txt', 'README.rst', 'libcaffeconverter.so', 'libcoremlpython.so'],\n 'coremltools': ['graph_visualization/__init__.py',\n 'graph_visualization/app.js',\n 'graph_visualization/index.html',\n 'graph_visualization/style.css',\n 'graph_visualization/assets/*',\n 'graph_visualization/icons/*']\n },\n install_requires=[\n 'numpy >= 1.10.0',\n 'protobuf >= 3.1.0',\n 'six==1.10.0'\n ],\n entry_points = {\n 'console_scripts': ['coremlconverter = coremltools:_main']\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Developers',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development'\n ],\n license='BSD'\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup\n\nREADME = os.path.join(os.getcwd(), \"README.rst\")\n\n\nwith open(README) as f:\n long_description = f.read()\n\nsetup(name='coremltools',\n version='2.0',\n description='Community Tools for CoreML',\n long_description=long_description,\n author='Apple Inc.',\n author_email='[email protected]',\n url='',\n packages=[\n 'coremltools',\n 'coremltools._deps',\n 'coremltools.converters',\n 'coremltools.converters.caffe',\n 'coremltools.converters.sklearn',\n 'coremltools.converters.xgboost',\n 'coremltools.converters.libsvm',\n 'coremltools.converters.keras',\n 'coremltools.graph_visualization',\n 'coremltools.models',\n 'coremltools.models.neural_network',\n 'coremltools.proto',\n 'coremltools._scripts'\n ],\n package_data={'': ['LICENSE.txt', 'README.rst', 'libcaffeconverter.so', 'libcoremlpython.so'],\n 'coremltools': ['graph_visualization/__init__.py',\n 'graph_visualization/app.js',\n 'graph_visualization/index.html',\n 'graph_visualization/style.css',\n 'graph_visualization/assets/*',\n 'graph_visualization/icons/*']\n },\n install_requires=[\n 'numpy >= 1.10.0',\n 'protobuf >= 3.1.0',\n 'six>=1.10.0'\n ],\n entry_points = {\n 'console_scripts': ['coremlconverter = coremltools:_main']\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Developers',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development'\n ],\n license='BSD'\n)\n", "path": "setup.py"}]} | 902 | 106 |
gh_patches_debug_36300 | rasdani/github-patches | git_diff | fidals__shopelectro-767 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use SiteDriver class instead of seleniumrequests.Remote
It will bring ability to use `shopelectro.selenium` classes in tests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/selenium/pages/order.py`
Content:
```
1 from shopelectro.models import PaymentOptions
2 from shopelectro.selenium.elements import Input, Button
3 from shopelectro.selenium.pages import Page
4
5 from selenium.webdriver.common.by import By
6
7 from pages.models import CustomPage
8
9 # @todo #682:120m Implement and reuse shopelectro.selenium.OrderPage for selenium tests.
10
11
12 class OrderPage(Page):
13
14 def __init__(self, driver):
15 super().__init__(driver)
16 self.submit_button = Button(self.driver, (By.ID, 'submit-order'))
17
18 @property
19 def path(self):
20 return CustomPage.objects.get(slug='order').url
21
22 def fill_contacts(
23 self, name='Name', city='Санкт-Петербург', phone='2222222222', email='[email protected]',
24 ):
25 contacts = {
26 'id_name': name,
27 'id_city': city,
28 'id_phone': phone,
29 'id_email': email,
30 }
31
32 for id_, value in contacts.items():
33 Input(self.driver, (By.ID, id_)).send_keys(value)
34
35 def make_order(self):
36 self.submit_button.click()
37
38 def select_payment_type(self, payment_option: PaymentOptions):
39 if payment_option not in PaymentOptions:
40 raise ValueError(
41 'An invalid payment type provided.'
42 f'It should be one of: {PaymentOptions}'
43 )
44
45 item = Button(
46 self.driver,
47 (By.CSS, f'input[name="payment_type"][value="{payment_option.name}"]'),
48 )
49 item.click()
50
```
Path: `shopelectro/selenium/pages/page.py`
Content:
```
1 from shopelectro.selenium import SiteDriver
2
3 from selenium.webdriver.common.by import By
4 from selenium.webdriver.support import expected_conditions as EC
5
6
7 class Page:
8 """
9 Represent a typical Shopelectro's page.
10
11 Contains cross-page elements: header, footer, ...
12 """
13
14 def __init__(self, driver: SiteDriver):
15 if not isinstance(driver, SiteDriver):
16 raise TypeError('Driver must be an instance of shopelectro.selenium.SiteDriver')
17 self.driver = driver
18 self.path: str
19
20 def load(self):
21 if not self.path:
22 raise ValueError(f'Set a page path to {self.__class__.__name__}')
23 self.driver.get(self.path)
24 self.driver.wait.until(EC.visibility_of_element_located(
25 (By.TAG_NAME, 'body')
26 ))
27
```
Path: `shopelectro/selenium/pages/success.py`
Content:
```
1 from shopelectro.selenium.pages import Page
2
3 from pages.models import CustomPage
4
5
6 class SuccessPage(Page):
7
8 @property
9 def path(self):
10 CustomPage.objects.get(slug='order-success').url
11
12 def is_success(self):
13 return 'Заказ принят' in self.driver.find_element_by_tag_name('h1').text
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/selenium/pages/order.py b/shopelectro/selenium/pages/order.py
--- a/shopelectro/selenium/pages/order.py
+++ b/shopelectro/selenium/pages/order.py
@@ -3,6 +3,7 @@
from shopelectro.selenium.pages import Page
from selenium.webdriver.common.by import By
+from selenium.webdriver.support import expected_conditions as EC
from pages.models import CustomPage
@@ -34,6 +35,7 @@
def make_order(self):
self.submit_button.click()
+ self.driver.wait.until(EC.url_changes(self.path))
def select_payment_type(self, payment_option: PaymentOptions):
if payment_option not in PaymentOptions:
diff --git a/shopelectro/selenium/pages/page.py b/shopelectro/selenium/pages/page.py
--- a/shopelectro/selenium/pages/page.py
+++ b/shopelectro/selenium/pages/page.py
@@ -1,3 +1,5 @@
+from functools import wraps
+
from shopelectro.selenium import SiteDriver
from selenium.webdriver.common.by import By
@@ -17,10 +19,17 @@
self.driver = driver
self.path: str
+ def wait_loaded(self):
+ def loaded(driver):
+ is_sync = EC.url_contains(self.path)
+ is_rendered = EC.visibility_of_element_located(
+ (By.TAG_NAME, 'body')
+ )
+ return is_sync(driver) and is_rendered(driver)
+ self.driver.wait.until(loaded)
+
def load(self):
if not self.path:
raise ValueError(f'Set a page path to {self.__class__.__name__}')
self.driver.get(self.path)
- self.driver.wait.until(EC.visibility_of_element_located(
- (By.TAG_NAME, 'body')
- ))
+ self.wait_loaded()
diff --git a/shopelectro/selenium/pages/success.py b/shopelectro/selenium/pages/success.py
--- a/shopelectro/selenium/pages/success.py
+++ b/shopelectro/selenium/pages/success.py
@@ -1,3 +1,6 @@
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support import expected_conditions as EC
+
from shopelectro.selenium.pages import Page
from pages.models import CustomPage
@@ -7,7 +10,10 @@
@property
def path(self):
- CustomPage.objects.get(slug='order-success').url
+ return CustomPage.objects.get(slug='order-success').url
def is_success(self):
- return 'Заказ принят' in self.driver.find_element_by_tag_name('h1').text
+ h1 = self.driver.wait.until(
+ EC.visibility_of_element_located((By.TAG_NAME, 'h1'))
+ ).text
+ return 'Заказ принят' in h1
| {"golden_diff": "diff --git a/shopelectro/selenium/pages/order.py b/shopelectro/selenium/pages/order.py\n--- a/shopelectro/selenium/pages/order.py\n+++ b/shopelectro/selenium/pages/order.py\n@@ -3,6 +3,7 @@\n from shopelectro.selenium.pages import Page\n \n from selenium.webdriver.common.by import By\n+from selenium.webdriver.support import expected_conditions as EC\n \n from pages.models import CustomPage\n \n@@ -34,6 +35,7 @@\n \n def make_order(self):\n self.submit_button.click()\n+ self.driver.wait.until(EC.url_changes(self.path))\n \n def select_payment_type(self, payment_option: PaymentOptions):\n if payment_option not in PaymentOptions:\ndiff --git a/shopelectro/selenium/pages/page.py b/shopelectro/selenium/pages/page.py\n--- a/shopelectro/selenium/pages/page.py\n+++ b/shopelectro/selenium/pages/page.py\n@@ -1,3 +1,5 @@\n+from functools import wraps\n+\n from shopelectro.selenium import SiteDriver\n \n from selenium.webdriver.common.by import By\n@@ -17,10 +19,17 @@\n self.driver = driver\n self.path: str\n \n+ def wait_loaded(self):\n+ def loaded(driver):\n+ is_sync = EC.url_contains(self.path)\n+ is_rendered = EC.visibility_of_element_located(\n+ (By.TAG_NAME, 'body')\n+ )\n+ return is_sync(driver) and is_rendered(driver)\n+ self.driver.wait.until(loaded)\n+\n def load(self):\n if not self.path:\n raise ValueError(f'Set a page path to {self.__class__.__name__}')\n self.driver.get(self.path)\n- self.driver.wait.until(EC.visibility_of_element_located(\n- (By.TAG_NAME, 'body')\n- ))\n+ self.wait_loaded()\ndiff --git a/shopelectro/selenium/pages/success.py b/shopelectro/selenium/pages/success.py\n--- a/shopelectro/selenium/pages/success.py\n+++ b/shopelectro/selenium/pages/success.py\n@@ -1,3 +1,6 @@\n+from selenium.webdriver.common.by import By\n+from selenium.webdriver.support import expected_conditions as EC\n+\n from shopelectro.selenium.pages import Page\n \n from pages.models import CustomPage\n@@ -7,7 +10,10 @@\n \n @property\n def path(self):\n- CustomPage.objects.get(slug='order-success').url\n+ return CustomPage.objects.get(slug='order-success').url\n \n def is_success(self):\n- return '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442' in self.driver.find_element_by_tag_name('h1').text\n+ h1 = self.driver.wait.until(\n+ EC.visibility_of_element_located((By.TAG_NAME, 'h1'))\n+ ).text\n+ return '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442' in h1\n", "issue": "Use SiteDriver class instead of seleniumrequests.Remote\nIt will bring ability to use `shopelectro.selenium` classes in tests. \n", "before_files": [{"content": "from shopelectro.models import PaymentOptions\nfrom shopelectro.selenium.elements import Input, Button\nfrom shopelectro.selenium.pages import Page\n\nfrom selenium.webdriver.common.by import By\n\nfrom pages.models import CustomPage\n\n# @todo #682:120m Implement and reuse shopelectro.selenium.OrderPage for selenium tests.\n\n\nclass OrderPage(Page):\n\n def __init__(self, driver):\n super().__init__(driver)\n self.submit_button = Button(self.driver, (By.ID, 'submit-order'))\n\n @property\n def path(self):\n return CustomPage.objects.get(slug='order').url\n\n def fill_contacts(\n self, name='Name', city='\u0421\u0430\u043d\u043a\u0442-\u041f\u0435\u0442\u0435\u0440\u0431\u0443\u0440\u0433', phone='2222222222', email='[email protected]',\n ):\n contacts = {\n 'id_name': name,\n 'id_city': city,\n 'id_phone': phone,\n 'id_email': email,\n }\n\n for id_, value in contacts.items():\n Input(self.driver, (By.ID, id_)).send_keys(value)\n\n def make_order(self):\n self.submit_button.click()\n\n def select_payment_type(self, payment_option: PaymentOptions):\n if payment_option not in PaymentOptions:\n raise ValueError(\n 'An invalid payment type provided.'\n f'It should be one of: {PaymentOptions}'\n )\n\n item = Button(\n self.driver,\n (By.CSS, f'input[name=\"payment_type\"][value=\"{payment_option.name}\"]'),\n )\n item.click()\n", "path": "shopelectro/selenium/pages/order.py"}, {"content": "from shopelectro.selenium import SiteDriver\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass Page:\n \"\"\"\n Represent a typical Shopelectro's page.\n\n Contains cross-page elements: header, footer, ...\n \"\"\"\n\n def __init__(self, driver: SiteDriver):\n if not isinstance(driver, SiteDriver):\n raise TypeError('Driver must be an instance of shopelectro.selenium.SiteDriver')\n self.driver = driver\n self.path: str\n\n def load(self):\n if not self.path:\n raise ValueError(f'Set a page path to {self.__class__.__name__}')\n self.driver.get(self.path)\n self.driver.wait.until(EC.visibility_of_element_located(\n (By.TAG_NAME, 'body')\n ))\n", "path": "shopelectro/selenium/pages/page.py"}, {"content": "from shopelectro.selenium.pages import Page\n\nfrom pages.models import CustomPage\n\n\nclass SuccessPage(Page):\n\n @property\n def path(self):\n CustomPage.objects.get(slug='order-success').url\n\n def is_success(self):\n return '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442' in self.driver.find_element_by_tag_name('h1').text\n", "path": "shopelectro/selenium/pages/success.py"}], "after_files": [{"content": "from shopelectro.models import PaymentOptions\nfrom shopelectro.selenium.elements import Input, Button\nfrom shopelectro.selenium.pages import Page\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom pages.models import CustomPage\n\n# @todo #682:120m Implement and reuse shopelectro.selenium.OrderPage for selenium tests.\n\n\nclass OrderPage(Page):\n\n def __init__(self, driver):\n super().__init__(driver)\n self.submit_button = Button(self.driver, (By.ID, 'submit-order'))\n\n @property\n def path(self):\n return CustomPage.objects.get(slug='order').url\n\n def fill_contacts(\n self, name='Name', city='\u0421\u0430\u043d\u043a\u0442-\u041f\u0435\u0442\u0435\u0440\u0431\u0443\u0440\u0433', phone='2222222222', email='[email protected]',\n ):\n contacts = {\n 'id_name': name,\n 'id_city': city,\n 'id_phone': phone,\n 'id_email': email,\n }\n\n for id_, value in contacts.items():\n Input(self.driver, (By.ID, id_)).send_keys(value)\n\n def make_order(self):\n self.submit_button.click()\n self.driver.wait.until(EC.url_changes(self.path))\n\n def select_payment_type(self, payment_option: PaymentOptions):\n if payment_option not in PaymentOptions:\n raise ValueError(\n 'An invalid payment type provided.'\n f'It should be one of: {PaymentOptions}'\n )\n\n item = Button(\n self.driver,\n (By.CSS, f'input[name=\"payment_type\"][value=\"{payment_option.name}\"]'),\n )\n item.click()\n", "path": "shopelectro/selenium/pages/order.py"}, {"content": "from functools import wraps\n\nfrom shopelectro.selenium import SiteDriver\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass Page:\n \"\"\"\n Represent a typical Shopelectro's page.\n\n Contains cross-page elements: header, footer, ...\n \"\"\"\n\n def __init__(self, driver: SiteDriver):\n if not isinstance(driver, SiteDriver):\n raise TypeError('Driver must be an instance of shopelectro.selenium.SiteDriver')\n self.driver = driver\n self.path: str\n\n def wait_loaded(self):\n def loaded(driver):\n is_sync = EC.url_contains(self.path)\n is_rendered = EC.visibility_of_element_located(\n (By.TAG_NAME, 'body')\n )\n return is_sync(driver) and is_rendered(driver)\n self.driver.wait.until(loaded)\n\n def load(self):\n if not self.path:\n raise ValueError(f'Set a page path to {self.__class__.__name__}')\n self.driver.get(self.path)\n self.wait_loaded()\n", "path": "shopelectro/selenium/pages/page.py"}, {"content": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom shopelectro.selenium.pages import Page\n\nfrom pages.models import CustomPage\n\n\nclass SuccessPage(Page):\n\n @property\n def path(self):\n return CustomPage.objects.get(slug='order-success').url\n\n def is_success(self):\n h1 = self.driver.wait.until(\n EC.visibility_of_element_located((By.TAG_NAME, 'h1'))\n ).text\n return '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442' in h1\n", "path": "shopelectro/selenium/pages/success.py"}]} | 1,077 | 629 |
gh_patches_debug_564 | rasdani/github-patches | git_diff | mabel-dev__opteryx-1695 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
✨ Memory Pool Optimizations
### Thanks for stopping by to let us know something could be better!
**Is your feature request related to a problem? Please describe.** _A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]_
**Describe the solution you'd like** _A clear and concise description of what you want to happen._
**Describe alternatives you've considered** _A clear and concise description of any alternative solutions or features you've considered._
**Additional context** _Add any other context or screenshots about the feature request here._
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opteryx/__version__.py`
Content:
```
1 __build__ = 527
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Store the version here so:
17 1) we don't load dependencies by storing it in __init__.py
18 2) we can import it in setup.py for the same reason
19 """
20 from enum import Enum # isort: skip
21
22
23 class VersionStatus(Enum):
24 ALPHA = "alpha"
25 BETA = "beta"
26 RELEASE = "release"
27
28
29 _major = 0
30 _minor = 16
31 _revision = 0
32 _status = VersionStatus.ALPHA
33
34 __author__ = "@joocer"
35 __version__ = f"{_major}.{_minor}.{_revision}" + (
36 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else ""
37 )
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opteryx/__version__.py b/opteryx/__version__.py
--- a/opteryx/__version__.py
+++ b/opteryx/__version__.py
@@ -1,4 +1,4 @@
-__build__ = 527
+__build__ = 532
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
| {"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 527\n+__build__ = 532\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n", "issue": "\u2728 Memory Pool Optimizations\n### Thanks for stopping by to let us know something could be better!\r\n\r\n**Is your feature request related to a problem? Please describe.** _A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]_\r\n\r\n**Describe the solution you'd like** _A clear and concise description of what you want to happen._\r\n\r\n**Describe alternatives you've considered** _A clear and concise description of any alternative solutions or features you've considered._\r\n\r\n**Additional context** _Add any other context or screenshots about the feature request here._\r\n\n", "before_files": [{"content": "__build__ = 527\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 16\n_revision = 0\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}], "after_files": [{"content": "__build__ = 532\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 16\n_revision = 0\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]} | 734 | 101 |
gh_patches_debug_308 | rasdani/github-patches | git_diff | zulip__zulip-13077 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade pip from 19.1.1 and pip-tools from 3.8.0
Followup issue from #13067. pip-tools 3.9.0 or 4.0.0 fails to resolve dependencies from Git URLs (jazzband/pip-tools#851):
`pip._internal.exceptions.DistributionNotFound: No matching distribution found for zulip==0.6.1_git (from -r requirements/common.in (line 135))`
while pip 19.2 breaks pip-tools 3.8.0 (jazzband/pip-tools#853):
`TypeError: __init__() got an unexpected keyword argument 'find_links'`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `version.py`
Content:
```
1 import os
2
3 ZULIP_VERSION = "2.0.4+git"
4 # Add information on number of commits and commit hash to version, if available
5 zulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version')
6 if os.path.exists(zulip_git_version_file):
7 with open(zulip_git_version_file) as f:
8 version = f.read().strip()
9 if version:
10 ZULIP_VERSION = version
11
12 LATEST_MAJOR_VERSION = "2.0"
13 LATEST_RELEASE_VERSION = "2.0.4"
14 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.org/2019/03/01/zulip-2-0-released/"
15
16 # Bump the minor PROVISION_VERSION to indicate that folks should provision
17 # only when going from an old version of the code to a newer version. Bump
18 # the major version to indicate that folks should provision in both
19 # directions.
20
21 # Typically,
22 # * adding a dependency only requires a minor version bump;
23 # * removing a dependency requires a major version bump;
24 # * upgrading a dependency requires a major version bump, unless the
25 # upgraded dependency is backwards compatible with all of our
26 # historical commits sharing the same major version, in which case a
27 # minor version bump suffices.
28
29 PROVISION_VERSION = '49.2'
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -26,4 +26,4 @@
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
-PROVISION_VERSION = '49.2'
+PROVISION_VERSION = '49.3'
| {"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -26,4 +26,4 @@\n # historical commits sharing the same major version, in which case a\n # minor version bump suffices.\n \n-PROVISION_VERSION = '49.2'\n+PROVISION_VERSION = '49.3'\n", "issue": "Upgrade pip from 19.1.1 and pip-tools from 3.8.0\nFollowup issue from #13067. pip-tools 3.9.0 or 4.0.0 fails to resolve dependencies from Git URLs (jazzband/pip-tools#851):\r\n\r\n`pip._internal.exceptions.DistributionNotFound: No matching distribution found for zulip==0.6.1_git (from -r requirements/common.in (line 135))`\r\n\r\nwhile pip 19.2 breaks pip-tools 3.8.0 (jazzband/pip-tools#853):\r\n\r\n`TypeError: __init__() got an unexpected keyword argument 'find_links'`\n", "before_files": [{"content": "import os\n\nZULIP_VERSION = \"2.0.4+git\"\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version')\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n version = f.read().strip()\n if version:\n ZULIP_VERSION = version\n\nLATEST_MAJOR_VERSION = \"2.0\"\nLATEST_RELEASE_VERSION = \"2.0.4\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.org/2019/03/01/zulip-2-0-released/\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = '49.2'\n", "path": "version.py"}], "after_files": [{"content": "import os\n\nZULIP_VERSION = \"2.0.4+git\"\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version')\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n version = f.read().strip()\n if version:\n ZULIP_VERSION = version\n\nLATEST_MAJOR_VERSION = \"2.0\"\nLATEST_RELEASE_VERSION = \"2.0.4\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.org/2019/03/01/zulip-2-0-released/\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = '49.3'\n", "path": "version.py"}]} | 760 | 78 |
gh_patches_debug_4166 | rasdani/github-patches | git_diff | ray-project__ray-9517 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tune] Population-based training: broken when using keep_checkpoint_num
When using **population-based** training TUNE stops after some times throwing the following error:
`There are paused trials, but no more pending trials with sufficient resources.`
This is caused by not finding the latest checkpoint:
```
Failure # 1 (occurred at 2020-06-19_11-26-36)
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 294, in start_trial
self._start_trial(trial, checkpoint, train=train)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 235, in _start_trial
self.restore(trial, checkpoint)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 673, in restore
data_dict = TrainableUtil.pickle_checkpoint(value)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 62, in pickle_checkpoint
checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 87, in find_checkpoint_dir
raise FileNotFoundError("Path does not exist", checkpoint_path)
FileNotFoundError: [Errno Path does not exist] /content/TRASH_TUNE_PBT_oversampling_mimic_densenet121/TUNE_Model_0_2020-06-19_11-24-215xncry9c/checkpoint_6/
```
The error appears to be somewhat random since it only appears after quite some iterations
The error can be reproduced in this [colab notebook](https://colab.research.google.com/drive/1-o896bEUm7DTvS24Do0btlqbSHre49MH?usp=sharing). **It is not a COLAB related issue since the same problem arises on our own server.**
@richardliaw Is this related to #8772 ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/tune/checkpoint_manager.py`
Content:
```
1 # coding: utf-8
2 import heapq
3 import logging
4
5 from ray.tune.result import TRAINING_ITERATION
6
7 logger = logging.getLogger(__name__)
8
9
10 class Checkpoint:
11 """Describes a checkpoint of trial state.
12
13 Checkpoint may be saved in different storage.
14
15 Attributes:
16 storage (str): Storage type.
17 value (str): If storage==MEMORY, it is a Python object.
18 If storage==PERSISTENT, it is a path to persistent storage,
19 or a future that will be resolved to such a path.
20 """
21
22 MEMORY = "memory"
23 PERSISTENT = "persistent"
24
25 def __init__(self, storage, value, result=None):
26 self.storage = storage
27 self.value = value
28 self.result = result or {}
29
30 @staticmethod
31 def from_object(value=None):
32 """Creates a checkpoint from a Python object."""
33 return Checkpoint(Checkpoint.MEMORY, value)
34
35 @property
36 def is_ready(self):
37 """Returns whether the checkpoint is ready to be used for restoration.
38
39 A PERSISTENT checkpoint is considered ready once its value is resolved
40 to an actual path. MEMORY checkpoints are always considered ready since
41 they are transient.
42 """
43 if self.storage == Checkpoint.PERSISTENT:
44 return isinstance(self.value, str)
45 return self.storage == Checkpoint.MEMORY
46
47
48 class QueueItem:
49 def __init__(self, priority, value):
50 self.priority = priority
51 self.value = value
52
53 def __lt__(self, other):
54 return self.priority < other.priority
55
56
57 class CheckpointManager:
58 """Manages checkpoints on the driver for a trial."""
59
60 def __init__(self, keep_checkpoints_num, checkpoint_score_attr, delete_fn):
61 """Initializes a new CheckpointManager.
62
63 `newest_persistent_checkpoint` and `newest_memory_checkpoint` are
64 initialized to Checkpoint objects with values of None.
65
66 Args:
67 keep_checkpoints_num (int): Keep at least this many checkpoints.
68 checkpoint_score_attr (str): Attribute to use to determine which
69 checkpoints to keep.
70 delete_fn (function): Function that deletes checkpoints. Must be
71 idempotent.
72 """
73 self.keep_checkpoints_num = keep_checkpoints_num or float("inf")
74 assert self.keep_checkpoints_num > 0, (
75 "keep_checkpoints_num must be greater than 0.")
76 self._checkpoint_score_desc = checkpoint_score_attr.startswith("min-")
77 if self._checkpoint_score_desc:
78 self._checkpoint_score_attr = checkpoint_score_attr[4:]
79 else:
80 self._checkpoint_score_attr = checkpoint_score_attr
81
82 self.delete = delete_fn
83 self.newest_persistent_checkpoint = Checkpoint(Checkpoint.PERSISTENT,
84 None)
85 self.newest_memory_checkpoint = Checkpoint(Checkpoint.MEMORY, None)
86 self._best_checkpoints = []
87 self._membership = set()
88
89 @property
90 def newest_checkpoint(self):
91 """Returns the newest checkpoint (based on training iteration)."""
92 newest_checkpoint = max(
93 [self.newest_persistent_checkpoint, self.newest_memory_checkpoint],
94 key=lambda c: c.result.get(TRAINING_ITERATION, -1))
95 return newest_checkpoint
96
97 def on_checkpoint(self, checkpoint):
98 """Starts tracking checkpoint metadata on checkpoint.
99
100 Sets the newest checkpoint. For PERSISTENT checkpoints: Deletes
101 previous checkpoint as long as it isn't one of the best ones. Also
102 deletes the worst checkpoint if at capacity.
103
104 Args:
105 checkpoint (Checkpoint): Trial state checkpoint.
106 """
107 if checkpoint.storage == Checkpoint.MEMORY:
108 self.newest_memory_checkpoint = checkpoint
109 return
110
111 old_checkpoint = self.newest_persistent_checkpoint
112 self.newest_persistent_checkpoint = checkpoint
113
114 # Remove the old checkpoint if it isn't one of the best ones.
115 if old_checkpoint.value and old_checkpoint not in self._membership:
116 self.delete(old_checkpoint)
117
118 try:
119 queue_item = QueueItem(self._priority(checkpoint), checkpoint)
120 except KeyError:
121 logger.error("Result dict has no key: {}. "
122 "checkpoint_score_attr must be set to a key in the "
123 "result dict.".format(self._checkpoint_score_attr))
124 return
125
126 if len(self._best_checkpoints) < self.keep_checkpoints_num:
127 heapq.heappush(self._best_checkpoints, queue_item)
128 self._membership.add(checkpoint)
129 elif queue_item.priority >= self._best_checkpoints[0].priority:
130 worst = heapq.heappushpop(self._best_checkpoints, queue_item).value
131 self._membership.add(checkpoint)
132 if worst in self._membership:
133 self._membership.remove(worst)
134 # Don't delete the newest checkpoint. It will be deleted on the
135 # next on_checkpoint() call since it isn't in self._membership.
136 if worst != checkpoint:
137 self.delete(worst)
138
139 def best_checkpoints(self):
140 """Returns best PERSISTENT checkpoints, sorted by score."""
141 checkpoints = sorted(self._best_checkpoints, key=lambda c: c.priority)
142 return [queue_item.value for queue_item in checkpoints]
143
144 def _priority(self, checkpoint):
145 priority = checkpoint.result[self._checkpoint_score_attr]
146 return -priority if self._checkpoint_score_desc else priority
147
148 def __getstate__(self):
149 state = self.__dict__.copy()
150 # Avoid serializing lambda since it may capture cyclical dependencies.
151 state.pop("delete")
152 return state
153
154 def __setstate__(self, state):
155 self.__dict__.update(state)
156 self.delete = None
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/tune/checkpoint_manager.py b/python/ray/tune/checkpoint_manager.py
--- a/python/ray/tune/checkpoint_manager.py
+++ b/python/ray/tune/checkpoint_manager.py
@@ -109,6 +109,10 @@
return
old_checkpoint = self.newest_persistent_checkpoint
+
+ if old_checkpoint.value == checkpoint.value:
+ return
+
self.newest_persistent_checkpoint = checkpoint
# Remove the old checkpoint if it isn't one of the best ones.
| {"golden_diff": "diff --git a/python/ray/tune/checkpoint_manager.py b/python/ray/tune/checkpoint_manager.py\n--- a/python/ray/tune/checkpoint_manager.py\n+++ b/python/ray/tune/checkpoint_manager.py\n@@ -109,6 +109,10 @@\n return\n \n old_checkpoint = self.newest_persistent_checkpoint\n+\n+ if old_checkpoint.value == checkpoint.value:\n+ return\n+\n self.newest_persistent_checkpoint = checkpoint\n \n # Remove the old checkpoint if it isn't one of the best ones.\n", "issue": "[tune] Population-based training: broken when using keep_checkpoint_num\nWhen using **population-based** training TUNE stops after some times throwing the following error:\r\n\r\n`There are paused trials, but no more pending trials with sufficient resources.`\r\n\r\nThis is caused by not finding the latest checkpoint:\r\n\r\n```\r\nFailure # 1 (occurred at 2020-06-19_11-26-36)\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py\", line 294, in start_trial\r\n self._start_trial(trial, checkpoint, train=train)\r\n File \"/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py\", line 235, in _start_trial\r\n self.restore(trial, checkpoint)\r\n File \"/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py\", line 673, in restore\r\n data_dict = TrainableUtil.pickle_checkpoint(value)\r\n File \"/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py\", line 62, in pickle_checkpoint\r\n checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)\r\n File \"/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py\", line 87, in find_checkpoint_dir\r\n raise FileNotFoundError(\"Path does not exist\", checkpoint_path)\r\nFileNotFoundError: [Errno Path does not exist] /content/TRASH_TUNE_PBT_oversampling_mimic_densenet121/TUNE_Model_0_2020-06-19_11-24-215xncry9c/checkpoint_6/\r\n```\r\n\r\nThe error appears to be somewhat random since it only appears after quite some iterations\r\n\r\nThe error can be reproduced in this [colab notebook](https://colab.research.google.com/drive/1-o896bEUm7DTvS24Do0btlqbSHre49MH?usp=sharing). **It is not a COLAB related issue since the same problem arises on our own server.**\r\n\r\n@richardliaw Is this related to #8772 ?\n", "before_files": [{"content": "# coding: utf-8\nimport heapq\nimport logging\n\nfrom ray.tune.result import TRAINING_ITERATION\n\nlogger = logging.getLogger(__name__)\n\n\nclass Checkpoint:\n \"\"\"Describes a checkpoint of trial state.\n\n Checkpoint may be saved in different storage.\n\n Attributes:\n storage (str): Storage type.\n value (str): If storage==MEMORY, it is a Python object.\n If storage==PERSISTENT, it is a path to persistent storage,\n or a future that will be resolved to such a path.\n \"\"\"\n\n MEMORY = \"memory\"\n PERSISTENT = \"persistent\"\n\n def __init__(self, storage, value, result=None):\n self.storage = storage\n self.value = value\n self.result = result or {}\n\n @staticmethod\n def from_object(value=None):\n \"\"\"Creates a checkpoint from a Python object.\"\"\"\n return Checkpoint(Checkpoint.MEMORY, value)\n\n @property\n def is_ready(self):\n \"\"\"Returns whether the checkpoint is ready to be used for restoration.\n\n A PERSISTENT checkpoint is considered ready once its value is resolved\n to an actual path. MEMORY checkpoints are always considered ready since\n they are transient.\n \"\"\"\n if self.storage == Checkpoint.PERSISTENT:\n return isinstance(self.value, str)\n return self.storage == Checkpoint.MEMORY\n\n\nclass QueueItem:\n def __init__(self, priority, value):\n self.priority = priority\n self.value = value\n\n def __lt__(self, other):\n return self.priority < other.priority\n\n\nclass CheckpointManager:\n \"\"\"Manages checkpoints on the driver for a trial.\"\"\"\n\n def __init__(self, keep_checkpoints_num, checkpoint_score_attr, delete_fn):\n \"\"\"Initializes a new CheckpointManager.\n\n `newest_persistent_checkpoint` and `newest_memory_checkpoint` are\n initialized to Checkpoint objects with values of None.\n\n Args:\n keep_checkpoints_num (int): Keep at least this many checkpoints.\n checkpoint_score_attr (str): Attribute to use to determine which\n checkpoints to keep.\n delete_fn (function): Function that deletes checkpoints. Must be\n idempotent.\n \"\"\"\n self.keep_checkpoints_num = keep_checkpoints_num or float(\"inf\")\n assert self.keep_checkpoints_num > 0, (\n \"keep_checkpoints_num must be greater than 0.\")\n self._checkpoint_score_desc = checkpoint_score_attr.startswith(\"min-\")\n if self._checkpoint_score_desc:\n self._checkpoint_score_attr = checkpoint_score_attr[4:]\n else:\n self._checkpoint_score_attr = checkpoint_score_attr\n\n self.delete = delete_fn\n self.newest_persistent_checkpoint = Checkpoint(Checkpoint.PERSISTENT,\n None)\n self.newest_memory_checkpoint = Checkpoint(Checkpoint.MEMORY, None)\n self._best_checkpoints = []\n self._membership = set()\n\n @property\n def newest_checkpoint(self):\n \"\"\"Returns the newest checkpoint (based on training iteration).\"\"\"\n newest_checkpoint = max(\n [self.newest_persistent_checkpoint, self.newest_memory_checkpoint],\n key=lambda c: c.result.get(TRAINING_ITERATION, -1))\n return newest_checkpoint\n\n def on_checkpoint(self, checkpoint):\n \"\"\"Starts tracking checkpoint metadata on checkpoint.\n\n Sets the newest checkpoint. For PERSISTENT checkpoints: Deletes\n previous checkpoint as long as it isn't one of the best ones. Also\n deletes the worst checkpoint if at capacity.\n\n Args:\n checkpoint (Checkpoint): Trial state checkpoint.\n \"\"\"\n if checkpoint.storage == Checkpoint.MEMORY:\n self.newest_memory_checkpoint = checkpoint\n return\n\n old_checkpoint = self.newest_persistent_checkpoint\n self.newest_persistent_checkpoint = checkpoint\n\n # Remove the old checkpoint if it isn't one of the best ones.\n if old_checkpoint.value and old_checkpoint not in self._membership:\n self.delete(old_checkpoint)\n\n try:\n queue_item = QueueItem(self._priority(checkpoint), checkpoint)\n except KeyError:\n logger.error(\"Result dict has no key: {}. \"\n \"checkpoint_score_attr must be set to a key in the \"\n \"result dict.\".format(self._checkpoint_score_attr))\n return\n\n if len(self._best_checkpoints) < self.keep_checkpoints_num:\n heapq.heappush(self._best_checkpoints, queue_item)\n self._membership.add(checkpoint)\n elif queue_item.priority >= self._best_checkpoints[0].priority:\n worst = heapq.heappushpop(self._best_checkpoints, queue_item).value\n self._membership.add(checkpoint)\n if worst in self._membership:\n self._membership.remove(worst)\n # Don't delete the newest checkpoint. It will be deleted on the\n # next on_checkpoint() call since it isn't in self._membership.\n if worst != checkpoint:\n self.delete(worst)\n\n def best_checkpoints(self):\n \"\"\"Returns best PERSISTENT checkpoints, sorted by score.\"\"\"\n checkpoints = sorted(self._best_checkpoints, key=lambda c: c.priority)\n return [queue_item.value for queue_item in checkpoints]\n\n def _priority(self, checkpoint):\n priority = checkpoint.result[self._checkpoint_score_attr]\n return -priority if self._checkpoint_score_desc else priority\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # Avoid serializing lambda since it may capture cyclical dependencies.\n state.pop(\"delete\")\n return state\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n self.delete = None\n", "path": "python/ray/tune/checkpoint_manager.py"}], "after_files": [{"content": "# coding: utf-8\nimport heapq\nimport logging\n\nfrom ray.tune.result import TRAINING_ITERATION\n\nlogger = logging.getLogger(__name__)\n\n\nclass Checkpoint:\n \"\"\"Describes a checkpoint of trial state.\n\n Checkpoint may be saved in different storage.\n\n Attributes:\n storage (str): Storage type.\n value (str): If storage==MEMORY, it is a Python object.\n If storage==PERSISTENT, it is a path to persistent storage,\n or a future that will be resolved to such a path.\n \"\"\"\n\n MEMORY = \"memory\"\n PERSISTENT = \"persistent\"\n\n def __init__(self, storage, value, result=None):\n self.storage = storage\n self.value = value\n self.result = result or {}\n\n @staticmethod\n def from_object(value=None):\n \"\"\"Creates a checkpoint from a Python object.\"\"\"\n return Checkpoint(Checkpoint.MEMORY, value)\n\n @property\n def is_ready(self):\n \"\"\"Returns whether the checkpoint is ready to be used for restoration.\n\n A PERSISTENT checkpoint is considered ready once its value is resolved\n to an actual path. MEMORY checkpoints are always considered ready since\n they are transient.\n \"\"\"\n if self.storage == Checkpoint.PERSISTENT:\n return isinstance(self.value, str)\n return self.storage == Checkpoint.MEMORY\n\n\nclass QueueItem:\n def __init__(self, priority, value):\n self.priority = priority\n self.value = value\n\n def __lt__(self, other):\n return self.priority < other.priority\n\n\nclass CheckpointManager:\n \"\"\"Manages checkpoints on the driver for a trial.\"\"\"\n\n def __init__(self, keep_checkpoints_num, checkpoint_score_attr, delete_fn):\n \"\"\"Initializes a new CheckpointManager.\n\n `newest_persistent_checkpoint` and `newest_memory_checkpoint` are\n initialized to Checkpoint objects with values of None.\n\n Args:\n keep_checkpoints_num (int): Keep at least this many checkpoints.\n checkpoint_score_attr (str): Attribute to use to determine which\n checkpoints to keep.\n delete_fn (function): Function that deletes checkpoints. Must be\n idempotent.\n \"\"\"\n self.keep_checkpoints_num = keep_checkpoints_num or float(\"inf\")\n assert self.keep_checkpoints_num > 0, (\n \"keep_checkpoints_num must be greater than 0.\")\n self._checkpoint_score_desc = checkpoint_score_attr.startswith(\"min-\")\n if self._checkpoint_score_desc:\n self._checkpoint_score_attr = checkpoint_score_attr[4:]\n else:\n self._checkpoint_score_attr = checkpoint_score_attr\n\n self.delete = delete_fn\n self.newest_persistent_checkpoint = Checkpoint(Checkpoint.PERSISTENT,\n None)\n self.newest_memory_checkpoint = Checkpoint(Checkpoint.MEMORY, None)\n self._best_checkpoints = []\n self._membership = set()\n\n @property\n def newest_checkpoint(self):\n \"\"\"Returns the newest checkpoint (based on training iteration).\"\"\"\n newest_checkpoint = max(\n [self.newest_persistent_checkpoint, self.newest_memory_checkpoint],\n key=lambda c: c.result.get(TRAINING_ITERATION, -1))\n return newest_checkpoint\n\n def on_checkpoint(self, checkpoint):\n \"\"\"Starts tracking checkpoint metadata on checkpoint.\n\n Sets the newest checkpoint. For PERSISTENT checkpoints: Deletes\n previous checkpoint as long as it isn't one of the best ones. Also\n deletes the worst checkpoint if at capacity.\n\n Args:\n checkpoint (Checkpoint): Trial state checkpoint.\n \"\"\"\n if checkpoint.storage == Checkpoint.MEMORY:\n self.newest_memory_checkpoint = checkpoint\n return\n\n old_checkpoint = self.newest_persistent_checkpoint\n\n if old_checkpoint.value == checkpoint.value:\n return\n\n self.newest_persistent_checkpoint = checkpoint\n\n # Remove the old checkpoint if it isn't one of the best ones.\n if old_checkpoint.value and old_checkpoint not in self._membership:\n self.delete(old_checkpoint)\n\n try:\n queue_item = QueueItem(self._priority(checkpoint), checkpoint)\n except KeyError:\n logger.error(\"Result dict has no key: {}. \"\n \"checkpoint_score_attr must be set to a key in the \"\n \"result dict.\".format(self._checkpoint_score_attr))\n return\n\n if len(self._best_checkpoints) < self.keep_checkpoints_num:\n heapq.heappush(self._best_checkpoints, queue_item)\n self._membership.add(checkpoint)\n elif queue_item.priority >= self._best_checkpoints[0].priority:\n worst = heapq.heappushpop(self._best_checkpoints, queue_item).value\n self._membership.add(checkpoint)\n if worst in self._membership:\n self._membership.remove(worst)\n # Don't delete the newest checkpoint. It will be deleted on the\n # next on_checkpoint() call since it isn't in self._membership.\n if worst != checkpoint:\n self.delete(worst)\n\n def best_checkpoints(self):\n \"\"\"Returns best PERSISTENT checkpoints, sorted by score.\"\"\"\n checkpoints = sorted(self._best_checkpoints, key=lambda c: c.priority)\n return [queue_item.value for queue_item in checkpoints]\n\n def _priority(self, checkpoint):\n priority = checkpoint.result[self._checkpoint_score_attr]\n return -priority if self._checkpoint_score_desc else priority\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # Avoid serializing lambda since it may capture cyclical dependencies.\n state.pop(\"delete\")\n return state\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n self.delete = None\n", "path": "python/ray/tune/checkpoint_manager.py"}]} | 2,319 | 119 |
gh_patches_debug_17290 | rasdani/github-patches | git_diff | joke2k__faker-919 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Brazilian RG (identity card)
Add Generator to Brazilian RG (identity card)
### Steps to reproduce
fake = Faker('pt_Br')
fake.rg()
### Expected behavior
return like this rules:
https://www.ngmatematica.com/2014/02/como-determinar-o-digito-verificador-do.html
8 digits + 1 checksum digit
### Actual behavior
New feature
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/ssn/pt_BR/__init__.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4 from .. import Provider as SsnProvider
5
6
7 def checksum(digits):
8 """
9 Returns the checksum of CPF digits.
10 References to the algorithm:
11 https://pt.wikipedia.org/wiki/Cadastro_de_pessoas_f%C3%ADsicas#Algoritmo
12 https://metacpan.org/source/MAMAWE/Algorithm-CheckDigits-v1.3.0/lib/Algorithm/CheckDigits/M11_004.pm
13 """
14 s = 0
15 p = len(digits) + 1
16 for i in range(0, len(digits)):
17 s += digits[i] * p
18 p -= 1
19
20 reminder = s % 11
21 if reminder == 0 or reminder == 1:
22 return 0
23 else:
24 return 11 - reminder
25
26
27 class Provider(SsnProvider):
28 """
29 Provider for Brazilian SSN also known in Brazil as CPF.
30 There are two methods Provider.ssn and Provider.cpf
31 The snn returns a valid number with numbers only
32 The cpf return a valid number formatted with brazilian mask. eg nnn.nnn.nnn-nn
33 """
34
35 def ssn(self):
36 digits = self.generator.random.sample(range(10), 9)
37
38 dv = checksum(digits)
39 digits.append(dv)
40 digits.append(checksum(digits))
41
42 return ''.join(map(str, digits))
43
44 def cpf(self):
45 c = self.ssn()
46 return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:]
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/providers/ssn/pt_BR/__init__.py b/faker/providers/ssn/pt_BR/__init__.py
--- a/faker/providers/ssn/pt_BR/__init__.py
+++ b/faker/providers/ssn/pt_BR/__init__.py
@@ -1,6 +1,7 @@
# coding=utf-8
from __future__ import unicode_literals
+
from .. import Provider as SsnProvider
@@ -44,3 +45,22 @@
def cpf(self):
c = self.ssn()
return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:]
+
+ def rg(self):
+ """
+ Brazilian RG, return plain numbers.
+ Check: https://www.ngmatematica.com/2014/02/como-determinar-o-digito-verificador-do.html
+ """
+
+ digits = self.generator.random.sample(range(0, 9), 8)
+ checksum = sum(i * digits[i - 2] for i in range(2, 10))
+ last_digit = 11 - (checksum % 11)
+
+ if last_digit == 10:
+ digits.append('X')
+ elif last_digit == 11:
+ digits.append(0)
+ else:
+ digits.append(last_digit)
+
+ return ''.join(map(str, digits))
| {"golden_diff": "diff --git a/faker/providers/ssn/pt_BR/__init__.py b/faker/providers/ssn/pt_BR/__init__.py\n--- a/faker/providers/ssn/pt_BR/__init__.py\n+++ b/faker/providers/ssn/pt_BR/__init__.py\n@@ -1,6 +1,7 @@\n # coding=utf-8\n \n from __future__ import unicode_literals\n+\n from .. import Provider as SsnProvider\n \n \n@@ -44,3 +45,22 @@\n def cpf(self):\n c = self.ssn()\n return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:]\n+\n+ def rg(self):\n+ \"\"\"\n+ Brazilian RG, return plain numbers.\n+ Check: https://www.ngmatematica.com/2014/02/como-determinar-o-digito-verificador-do.html\n+ \"\"\"\n+\n+ digits = self.generator.random.sample(range(0, 9), 8)\n+ checksum = sum(i * digits[i - 2] for i in range(2, 10))\n+ last_digit = 11 - (checksum % 11)\n+\n+ if last_digit == 10:\n+ digits.append('X')\n+ elif last_digit == 11:\n+ digits.append(0)\n+ else:\n+ digits.append(last_digit)\n+\n+ return ''.join(map(str, digits))\n", "issue": "Brazilian RG (identity card)\nAdd Generator to Brazilian RG (identity card)\r\n\r\n### Steps to reproduce\r\nfake = Faker('pt_Br')\r\nfake.rg()\r\n\r\n### Expected behavior\r\nreturn like this rules:\r\nhttps://www.ngmatematica.com/2014/02/como-determinar-o-digito-verificador-do.html\r\n8 digits + 1 checksum digit\r\n### Actual behavior\r\nNew feature\r\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\nfrom .. import Provider as SsnProvider\n\n\ndef checksum(digits):\n \"\"\"\n Returns the checksum of CPF digits.\n References to the algorithm:\n https://pt.wikipedia.org/wiki/Cadastro_de_pessoas_f%C3%ADsicas#Algoritmo\n https://metacpan.org/source/MAMAWE/Algorithm-CheckDigits-v1.3.0/lib/Algorithm/CheckDigits/M11_004.pm\n \"\"\"\n s = 0\n p = len(digits) + 1\n for i in range(0, len(digits)):\n s += digits[i] * p\n p -= 1\n\n reminder = s % 11\n if reminder == 0 or reminder == 1:\n return 0\n else:\n return 11 - reminder\n\n\nclass Provider(SsnProvider):\n \"\"\"\n Provider for Brazilian SSN also known in Brazil as CPF.\n There are two methods Provider.ssn and Provider.cpf\n The snn returns a valid number with numbers only\n The cpf return a valid number formatted with brazilian mask. eg nnn.nnn.nnn-nn\n \"\"\"\n\n def ssn(self):\n digits = self.generator.random.sample(range(10), 9)\n\n dv = checksum(digits)\n digits.append(dv)\n digits.append(checksum(digits))\n\n return ''.join(map(str, digits))\n\n def cpf(self):\n c = self.ssn()\n return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:]\n", "path": "faker/providers/ssn/pt_BR/__init__.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nfrom .. import Provider as SsnProvider\n\n\ndef checksum(digits):\n \"\"\"\n Returns the checksum of CPF digits.\n References to the algorithm:\n https://pt.wikipedia.org/wiki/Cadastro_de_pessoas_f%C3%ADsicas#Algoritmo\n https://metacpan.org/source/MAMAWE/Algorithm-CheckDigits-v1.3.0/lib/Algorithm/CheckDigits/M11_004.pm\n \"\"\"\n s = 0\n p = len(digits) + 1\n for i in range(0, len(digits)):\n s += digits[i] * p\n p -= 1\n\n reminder = s % 11\n if reminder == 0 or reminder == 1:\n return 0\n else:\n return 11 - reminder\n\n\nclass Provider(SsnProvider):\n \"\"\"\n Provider for Brazilian SSN also known in Brazil as CPF.\n There are two methods Provider.ssn and Provider.cpf\n The snn returns a valid number with numbers only\n The cpf return a valid number formatted with brazilian mask. eg nnn.nnn.nnn-nn\n \"\"\"\n\n def ssn(self):\n digits = self.generator.random.sample(range(10), 9)\n\n dv = checksum(digits)\n digits.append(dv)\n digits.append(checksum(digits))\n\n return ''.join(map(str, digits))\n\n def cpf(self):\n c = self.ssn()\n return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:]\n\n def rg(self):\n \"\"\"\n Brazilian RG, return plain numbers.\n Check: https://www.ngmatematica.com/2014/02/como-determinar-o-digito-verificador-do.html\n \"\"\"\n\n digits = self.generator.random.sample(range(0, 9), 8)\n checksum = sum(i * digits[i - 2] for i in range(2, 10))\n last_digit = 11 - (checksum % 11)\n\n if last_digit == 10:\n digits.append('X')\n elif last_digit == 11:\n digits.append(0)\n else:\n digits.append(last_digit)\n\n return ''.join(map(str, digits))\n", "path": "faker/providers/ssn/pt_BR/__init__.py"}]} | 797 | 324 |
gh_patches_debug_34130 | rasdani/github-patches | git_diff | azavea__raster-vision-1560 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve Dataset.from_uris methods
When using the `from_uris` methods (such as in `SemanticSegmentationSlidingWindowGeoDataset`), it's easy to forget to pass in an important argument due to the use of kwargs. For example, size and stride are needed, and `label_vector_default_class_id` defaults to None which counterintuitively removes all the vectors. We should fix these and related problems.
This issue was originally noted in https://github.com/azavea/raster-vision/pull/1476
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py`
Content:
```
1 from typing import TYPE_CHECKING, Dict, Optional
2 from copy import deepcopy
3
4 from rastervision.core.data.vector_transformer import VectorTransformer
5 from rastervision.core.data.vector_transformer.label_maker.filter import (
6 create_filter)
7 from rastervision.core.data.utils.geojson import features_to_geojson
8
9 if TYPE_CHECKING:
10 from rastervision.core.data import ClassConfig, CRSTransformer
11
12
13 class ClassInferenceTransformer(VectorTransformer):
14 """Infers missing class_ids from GeoJSON features.
15
16 Rules:
17 1) If class_id is in feature['properties'], use it.
18 2) If class_config is set and class_name or label are in
19 feature['properties'] and in class_config, use corresponding
20 class_id.
21 3) If class_id_to_filter is set and filter is true when applied to
22 feature, use corresponding class_id.
23 4) Otherwise, return the default_class_id
24 """
25
26 def __init__(self,
27 default_class_id: Optional[int],
28 class_config: Optional['ClassConfig'] = None,
29 class_id_to_filter: Optional[Dict[int, list]] = None):
30 self.class_config = class_config
31 self.class_id_to_filter = class_id_to_filter
32 self.default_class_id = default_class_id
33
34 if self.class_id_to_filter is not None:
35 self.class_id_to_filter = {}
36 for class_id, filter_exp in class_id_to_filter.items():
37 self.class_id_to_filter[int(class_id)] = create_filter(
38 filter_exp)
39
40 @staticmethod
41 def infer_feature_class_id(
42 feature: dict,
43 default_class_id: Optional[int],
44 class_config: Optional['ClassConfig'] = None,
45 class_id_to_filter: Optional[Dict[int, list]] = None
46 ) -> Optional[int]:
47 """Infer the class_id for a GeoJSON feature.
48
49 Rules:
50 1) If class_id is in feature['properties'], use it.
51 2) If class_config is set and class_name or label are in
52 feature['properties'] and in class_config, use corresponding
53 class_id.
54 3) If class_id_to_filter is set and filter is true when applied to
55 feature, use corresponding class_id.
56 4) Otherwise, return the default_class_id.
57
58 Args:
59 feature (dict): GeoJSON feature.
60
61 Returns:
62 Optional[int]: Inferred class ID.
63 """
64 class_id = feature.get('properties', {}).get('class_id')
65 if class_id is not None:
66 return class_id
67
68 if class_config is not None:
69 class_name = feature.get('properties', {}).get('class_name')
70 if class_name in class_config.names:
71 return class_config.names.index(class_name)
72
73 label = feature.get('properties', {}).get('label')
74 if label in class_config.names:
75 return class_config.names.index(label)
76
77 if class_id_to_filter is not None:
78 for class_id, filter_fn in class_id_to_filter.items():
79 if filter_fn(feature):
80 return class_id
81
82 return default_class_id
83
84 def transform(self,
85 geojson: dict,
86 crs_transformer: Optional['CRSTransformer'] = None) -> dict:
87 """Add class_id to feature properties and drop features with no class.
88
89 For each feature in geojson, the class_id is inferred and is set into
90 feature['properties']. If the class_id is None (because none of the
91 rules apply and the default_class_id is None), the feature is dropped.
92 """
93 new_features = []
94 for feature in geojson['features']:
95 class_id = self.infer_feature_class_id(
96 feature,
97 default_class_id=self.default_class_id,
98 class_config=self.class_config,
99 class_id_to_filter=self.class_id_to_filter)
100 if class_id is not None:
101 feature = deepcopy(feature)
102 properties = feature.get('properties', {})
103 properties['class_id'] = class_id
104 feature['properties'] = properties
105 new_features.append(feature)
106 new_geojson = features_to_geojson(new_features)
107 return new_geojson
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py b/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py
--- a/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py
+++ b/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py
@@ -1,5 +1,6 @@
from typing import TYPE_CHECKING, Dict, Optional
from copy import deepcopy
+import logging
from rastervision.core.data.vector_transformer import VectorTransformer
from rastervision.core.data.vector_transformer.label_maker.filter import (
@@ -9,6 +10,8 @@
if TYPE_CHECKING:
from rastervision.core.data import ClassConfig, CRSTransformer
+log = logging.getLogger(__name__)
+
class ClassInferenceTransformer(VectorTransformer):
"""Infers missing class_ids from GeoJSON features.
@@ -91,6 +94,7 @@
rules apply and the default_class_id is None), the feature is dropped.
"""
new_features = []
+ warned = False
for feature in geojson['features']:
class_id = self.infer_feature_class_id(
feature,
@@ -103,5 +107,13 @@
properties['class_id'] = class_id
feature['properties'] = properties
new_features.append(feature)
+ elif not warned:
+ log.warning(
+ 'ClassInferenceTransformer is dropping vector features because '
+ 'class_id cannot be inferred. To avoid this behavior, '
+ 'set default_class_id to a non-None value in '
+ 'ClassInferenceTransformer.')
+ warned = True
+
new_geojson = features_to_geojson(new_features)
return new_geojson
| {"golden_diff": "diff --git a/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py b/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py\n--- a/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py\n+++ b/rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py\n@@ -1,5 +1,6 @@\n from typing import TYPE_CHECKING, Dict, Optional\n from copy import deepcopy\n+import logging\n \n from rastervision.core.data.vector_transformer import VectorTransformer\n from rastervision.core.data.vector_transformer.label_maker.filter import (\n@@ -9,6 +10,8 @@\n if TYPE_CHECKING:\n from rastervision.core.data import ClassConfig, CRSTransformer\n \n+log = logging.getLogger(__name__)\n+\n \n class ClassInferenceTransformer(VectorTransformer):\n \"\"\"Infers missing class_ids from GeoJSON features.\n@@ -91,6 +94,7 @@\n rules apply and the default_class_id is None), the feature is dropped.\n \"\"\"\n new_features = []\n+ warned = False\n for feature in geojson['features']:\n class_id = self.infer_feature_class_id(\n feature,\n@@ -103,5 +107,13 @@\n properties['class_id'] = class_id\n feature['properties'] = properties\n new_features.append(feature)\n+ elif not warned:\n+ log.warning(\n+ 'ClassInferenceTransformer is dropping vector features because '\n+ 'class_id cannot be inferred. To avoid this behavior, '\n+ 'set default_class_id to a non-None value in '\n+ 'ClassInferenceTransformer.')\n+ warned = True\n+\n new_geojson = features_to_geojson(new_features)\n return new_geojson\n", "issue": "Improve Dataset.from_uris methods\nWhen using the `from_uris` methods (such as in `SemanticSegmentationSlidingWindowGeoDataset`), it's easy to forget to pass in an important argument due to the use of kwargs. For example, size and stride are needed, and `label_vector_default_class_id` defaults to None which counterintuitively removes all the vectors. We should fix these and related problems.\r\n\r\nThis issue was originally noted in https://github.com/azavea/raster-vision/pull/1476\r\n\r\n\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Dict, Optional\nfrom copy import deepcopy\n\nfrom rastervision.core.data.vector_transformer import VectorTransformer\nfrom rastervision.core.data.vector_transformer.label_maker.filter import (\n create_filter)\nfrom rastervision.core.data.utils.geojson import features_to_geojson\n\nif TYPE_CHECKING:\n from rastervision.core.data import ClassConfig, CRSTransformer\n\n\nclass ClassInferenceTransformer(VectorTransformer):\n \"\"\"Infers missing class_ids from GeoJSON features.\n\n Rules:\n 1) If class_id is in feature['properties'], use it.\n 2) If class_config is set and class_name or label are in\n feature['properties'] and in class_config, use corresponding\n class_id.\n 3) If class_id_to_filter is set and filter is true when applied to\n feature, use corresponding class_id.\n 4) Otherwise, return the default_class_id\n \"\"\"\n\n def __init__(self,\n default_class_id: Optional[int],\n class_config: Optional['ClassConfig'] = None,\n class_id_to_filter: Optional[Dict[int, list]] = None):\n self.class_config = class_config\n self.class_id_to_filter = class_id_to_filter\n self.default_class_id = default_class_id\n\n if self.class_id_to_filter is not None:\n self.class_id_to_filter = {}\n for class_id, filter_exp in class_id_to_filter.items():\n self.class_id_to_filter[int(class_id)] = create_filter(\n filter_exp)\n\n @staticmethod\n def infer_feature_class_id(\n feature: dict,\n default_class_id: Optional[int],\n class_config: Optional['ClassConfig'] = None,\n class_id_to_filter: Optional[Dict[int, list]] = None\n ) -> Optional[int]:\n \"\"\"Infer the class_id for a GeoJSON feature.\n\n Rules:\n 1) If class_id is in feature['properties'], use it.\n 2) If class_config is set and class_name or label are in\n feature['properties'] and in class_config, use corresponding\n class_id.\n 3) If class_id_to_filter is set and filter is true when applied to\n feature, use corresponding class_id.\n 4) Otherwise, return the default_class_id.\n\n Args:\n feature (dict): GeoJSON feature.\n\n Returns:\n Optional[int]: Inferred class ID.\n \"\"\"\n class_id = feature.get('properties', {}).get('class_id')\n if class_id is not None:\n return class_id\n\n if class_config is not None:\n class_name = feature.get('properties', {}).get('class_name')\n if class_name in class_config.names:\n return class_config.names.index(class_name)\n\n label = feature.get('properties', {}).get('label')\n if label in class_config.names:\n return class_config.names.index(label)\n\n if class_id_to_filter is not None:\n for class_id, filter_fn in class_id_to_filter.items():\n if filter_fn(feature):\n return class_id\n\n return default_class_id\n\n def transform(self,\n geojson: dict,\n crs_transformer: Optional['CRSTransformer'] = None) -> dict:\n \"\"\"Add class_id to feature properties and drop features with no class.\n\n For each feature in geojson, the class_id is inferred and is set into\n feature['properties']. If the class_id is None (because none of the\n rules apply and the default_class_id is None), the feature is dropped.\n \"\"\"\n new_features = []\n for feature in geojson['features']:\n class_id = self.infer_feature_class_id(\n feature,\n default_class_id=self.default_class_id,\n class_config=self.class_config,\n class_id_to_filter=self.class_id_to_filter)\n if class_id is not None:\n feature = deepcopy(feature)\n properties = feature.get('properties', {})\n properties['class_id'] = class_id\n feature['properties'] = properties\n new_features.append(feature)\n new_geojson = features_to_geojson(new_features)\n return new_geojson\n", "path": "rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py"}], "after_files": [{"content": "from typing import TYPE_CHECKING, Dict, Optional\nfrom copy import deepcopy\nimport logging\n\nfrom rastervision.core.data.vector_transformer import VectorTransformer\nfrom rastervision.core.data.vector_transformer.label_maker.filter import (\n create_filter)\nfrom rastervision.core.data.utils.geojson import features_to_geojson\n\nif TYPE_CHECKING:\n from rastervision.core.data import ClassConfig, CRSTransformer\n\nlog = logging.getLogger(__name__)\n\n\nclass ClassInferenceTransformer(VectorTransformer):\n \"\"\"Infers missing class_ids from GeoJSON features.\n\n Rules:\n 1) If class_id is in feature['properties'], use it.\n 2) If class_config is set and class_name or label are in\n feature['properties'] and in class_config, use corresponding\n class_id.\n 3) If class_id_to_filter is set and filter is true when applied to\n feature, use corresponding class_id.\n 4) Otherwise, return the default_class_id\n \"\"\"\n\n def __init__(self,\n default_class_id: Optional[int],\n class_config: Optional['ClassConfig'] = None,\n class_id_to_filter: Optional[Dict[int, list]] = None):\n self.class_config = class_config\n self.class_id_to_filter = class_id_to_filter\n self.default_class_id = default_class_id\n\n if self.class_id_to_filter is not None:\n self.class_id_to_filter = {}\n for class_id, filter_exp in class_id_to_filter.items():\n self.class_id_to_filter[int(class_id)] = create_filter(\n filter_exp)\n\n @staticmethod\n def infer_feature_class_id(\n feature: dict,\n default_class_id: Optional[int],\n class_config: Optional['ClassConfig'] = None,\n class_id_to_filter: Optional[Dict[int, list]] = None\n ) -> Optional[int]:\n \"\"\"Infer the class_id for a GeoJSON feature.\n\n Rules:\n 1) If class_id is in feature['properties'], use it.\n 2) If class_config is set and class_name or label are in\n feature['properties'] and in class_config, use corresponding\n class_id.\n 3) If class_id_to_filter is set and filter is true when applied to\n feature, use corresponding class_id.\n 4) Otherwise, return the default_class_id.\n\n Args:\n feature (dict): GeoJSON feature.\n\n Returns:\n Optional[int]: Inferred class ID.\n \"\"\"\n class_id = feature.get('properties', {}).get('class_id')\n if class_id is not None:\n return class_id\n\n if class_config is not None:\n class_name = feature.get('properties', {}).get('class_name')\n if class_name in class_config.names:\n return class_config.names.index(class_name)\n\n label = feature.get('properties', {}).get('label')\n if label in class_config.names:\n return class_config.names.index(label)\n\n if class_id_to_filter is not None:\n for class_id, filter_fn in class_id_to_filter.items():\n if filter_fn(feature):\n return class_id\n\n return default_class_id\n\n def transform(self,\n geojson: dict,\n crs_transformer: Optional['CRSTransformer'] = None) -> dict:\n \"\"\"Add class_id to feature properties and drop features with no class.\n\n For each feature in geojson, the class_id is inferred and is set into\n feature['properties']. If the class_id is None (because none of the\n rules apply and the default_class_id is None), the feature is dropped.\n \"\"\"\n new_features = []\n warned = False\n for feature in geojson['features']:\n class_id = self.infer_feature_class_id(\n feature,\n default_class_id=self.default_class_id,\n class_config=self.class_config,\n class_id_to_filter=self.class_id_to_filter)\n if class_id is not None:\n feature = deepcopy(feature)\n properties = feature.get('properties', {})\n properties['class_id'] = class_id\n feature['properties'] = properties\n new_features.append(feature)\n elif not warned:\n log.warning(\n 'ClassInferenceTransformer is dropping vector features because '\n 'class_id cannot be inferred. To avoid this behavior, '\n 'set default_class_id to a non-None value in '\n 'ClassInferenceTransformer.')\n warned = True\n\n new_geojson = features_to_geojson(new_features)\n return new_geojson\n", "path": "rastervision_core/rastervision/core/data/vector_transformer/class_inference_transformer.py"}]} | 1,494 | 404 |
gh_patches_debug_29012 | rasdani/github-patches | git_diff | mlflow__mlflow-4002 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FR] AzureBlobArtifactRepository connection using Service Principal credentials
## Willingness to contribute
The MLflow Community encourages new feature contributions. Would you or another member of your organization be willing to contribute an implementation of this feature (either as an MLflow Plugin or an enhancement to the MLflow code base)?
- [x] Yes. I can contribute this feature independently.
- [x] Yes. I would be willing to contribute this feature with guidance from the MLflow community.
- [ ] No. I cannot contribute this feature at this time.
## Proposal Summary
Allow the user to connect with the AzureBlobArtifactRepository using service principal credentials taken from the following environment variables:
* AZURE_TENANT_ID
* AZURE_CLIENT_ID
* AZURE_CLIENT_SECRET
## Motivation
- What is the use case for this feature?
Having the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCESS_KEY variables set will give the user of that environment complete control over the whole storage account. This is not always desirable in a situation where a storage account has multiple blob containers and the users of one blob container should not have access to another user's container.
- Why is this use case valuable to support for MLflow users in general?
It gives the user more options to choose from while connecting with their artifact repository.
- Why is this use case valuable to support for your project(s) or organization
My organizations hosts multiple tracking servers (with corresponding artifact repositories) in which groups of users should be separated. Allowing the connection via service principal credentials gives us the ability assign permissions on a container to a service principal
- Why is it currently difficult to achieve this use case? (please be as specific as possible about why related MLflow features and components are insufficient)
The AzureBlobArtifactRepository requires either the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCESS_KEY to be set, which gives the user full control of the whole storage account.
### What component(s), interfaces, languages, and integrations does this feature affect?
Components
- [x] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [x] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs
- [ ] `area/server-infra`: MLflow server, JavaScript dev server
- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging
Interfaces
- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
Languages
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
Integrations
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
## Details
The solution would entail adding another elif branch in the AzureBlobArtifactRepository __init__ function, that checks for the AZURE_TENANT_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables, creates a [ClientSecretCredential](https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity.clientsecretcredential?view=azure-python) and uses it to create the BlobServiceClient.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/store/artifact/azure_blob_artifact_repo.py`
Content:
```
1 import os
2 import posixpath
3 import re
4 import urllib.parse
5
6 from mlflow.entities import FileInfo
7 from mlflow.exceptions import MlflowException
8 from mlflow.store.artifact.artifact_repo import ArtifactRepository
9
10
11 class AzureBlobArtifactRepository(ArtifactRepository):
12 """
13 Stores artifacts on Azure Blob Storage.
14
15 This repository is used with URIs of the form
16 ``wasbs://<container-name>@<ystorage-account-name>.blob.core.windows.net/<path>``,
17 following the same URI scheme as Hadoop on Azure blob storage. It requires that your Azure
18 storage access key be available in the environment variable ``AZURE_STORAGE_ACCESS_KEY``.
19 """
20
21 def __init__(self, artifact_uri, client=None):
22 super().__init__(artifact_uri)
23
24 # Allow override for testing
25 if client:
26 self.client = client
27 return
28
29 from azure.storage.blob import BlobServiceClient
30
31 (_, account, _) = AzureBlobArtifactRepository.parse_wasbs_uri(artifact_uri)
32 if "AZURE_STORAGE_CONNECTION_STRING" in os.environ:
33 self.client = BlobServiceClient.from_connection_string(
34 conn_str=os.environ.get("AZURE_STORAGE_CONNECTION_STRING")
35 )
36 elif "AZURE_STORAGE_ACCESS_KEY" in os.environ:
37 account_url = "https://{account}.blob.core.windows.net".format(account=account)
38 self.client = BlobServiceClient(
39 account_url=account_url, credential=os.environ.get("AZURE_STORAGE_ACCESS_KEY")
40 )
41 else:
42 raise Exception(
43 "You need to set one of AZURE_STORAGE_CONNECTION_STRING or "
44 "AZURE_STORAGE_ACCESS_KEY to access Azure storage."
45 )
46
47 @staticmethod
48 def parse_wasbs_uri(uri):
49 """Parse a wasbs:// URI, returning (container, storage_account, path)."""
50 parsed = urllib.parse.urlparse(uri)
51 if parsed.scheme != "wasbs":
52 raise Exception("Not a WASBS URI: %s" % uri)
53 match = re.match("([^@]+)@([^.]+)\\.blob\\.core\\.windows\\.net", parsed.netloc)
54 if match is None:
55 raise Exception(
56 "WASBS URI must be of the form " "<container>@<account>.blob.core.windows.net"
57 )
58 container = match.group(1)
59 storage_account = match.group(2)
60 path = parsed.path
61 if path.startswith("/"):
62 path = path[1:]
63 return container, storage_account, path
64
65 def log_artifact(self, local_file, artifact_path=None):
66 (container, _, dest_path) = self.parse_wasbs_uri(self.artifact_uri)
67 container_client = self.client.get_container_client(container)
68 if artifact_path:
69 dest_path = posixpath.join(dest_path, artifact_path)
70 dest_path = posixpath.join(dest_path, os.path.basename(local_file))
71 with open(local_file, "rb") as file:
72 container_client.upload_blob(dest_path, file)
73
74 def log_artifacts(self, local_dir, artifact_path=None):
75 (container, _, dest_path) = self.parse_wasbs_uri(self.artifact_uri)
76 container_client = self.client.get_container_client(container)
77 if artifact_path:
78 dest_path = posixpath.join(dest_path, artifact_path)
79 local_dir = os.path.abspath(local_dir)
80 for (root, _, filenames) in os.walk(local_dir):
81 upload_path = dest_path
82 if root != local_dir:
83 rel_path = os.path.relpath(root, local_dir)
84 upload_path = posixpath.join(dest_path, rel_path)
85 for f in filenames:
86 remote_file_path = posixpath.join(upload_path, f)
87 local_file_path = os.path.join(root, f)
88 with open(local_file_path, "rb") as file:
89 container_client.upload_blob(remote_file_path, file)
90
91 def list_artifacts(self, path=None):
92 # Newer versions of `azure-storage-blob` (>= 12.4.0) provide a public
93 # `azure.storage.blob.BlobPrefix` object to signify that a blob is a directory,
94 # while older versions only expose this API internally as
95 # `azure.storage.blob._models.BlobPrefix`
96 try:
97 from azure.storage.blob import BlobPrefix
98 except ImportError:
99 from azure.storage.blob._models import BlobPrefix
100
101 (container, _, artifact_path) = self.parse_wasbs_uri(self.artifact_uri)
102 container_client = self.client.get_container_client(container)
103 dest_path = artifact_path
104 if path:
105 dest_path = posixpath.join(dest_path, path)
106 infos = []
107 prefix = dest_path if dest_path.endswith("/") else dest_path + "/"
108 results = container_client.walk_blobs(name_starts_with=prefix)
109 for r in results:
110 if not r.name.startswith(artifact_path):
111 raise MlflowException(
112 "The name of the listed Azure blob does not begin with the specified"
113 " artifact path. Artifact path: {artifact_path}. Blob name:"
114 " {blob_name}".format(artifact_path=artifact_path, blob_name=r.name)
115 )
116 if isinstance(r, BlobPrefix): # This is a prefix for items in a subdirectory
117 subdir = posixpath.relpath(path=r.name, start=artifact_path)
118 if subdir.endswith("/"):
119 subdir = subdir[:-1]
120 infos.append(FileInfo(subdir, True, None))
121 else: # Just a plain old blob
122 file_name = posixpath.relpath(path=r.name, start=artifact_path)
123 infos.append(FileInfo(file_name, False, r.size))
124 # The list_artifacts API expects us to return an empty list if the
125 # the path references a single file.
126 rel_path = dest_path[len(artifact_path) + 1 :]
127 if (len(infos) == 1) and not infos[0].is_dir and (infos[0].path == rel_path):
128 return []
129 return sorted(infos, key=lambda f: f.path)
130
131 def _download_file(self, remote_file_path, local_path):
132 (container, _, remote_root_path) = self.parse_wasbs_uri(self.artifact_uri)
133 container_client = self.client.get_container_client(container)
134 remote_full_path = posixpath.join(remote_root_path, remote_file_path)
135 with open(local_path, "wb") as file:
136 container_client.download_blob(remote_full_path).readinto(file)
137
138 def delete_artifacts(self, artifact_path=None):
139 raise MlflowException("Not implemented yet")
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlflow/store/artifact/azure_blob_artifact_repo.py b/mlflow/store/artifact/azure_blob_artifact_repo.py
--- a/mlflow/store/artifact/azure_blob_artifact_repo.py
+++ b/mlflow/store/artifact/azure_blob_artifact_repo.py
@@ -14,8 +14,10 @@
This repository is used with URIs of the form
``wasbs://<container-name>@<ystorage-account-name>.blob.core.windows.net/<path>``,
- following the same URI scheme as Hadoop on Azure blob storage. It requires that your Azure
- storage access key be available in the environment variable ``AZURE_STORAGE_ACCESS_KEY``.
+ following the same URI scheme as Hadoop on Azure blob storage. It requires either that:
+ - Azure storage connection string is in the env var ``AZURE_STORAGE_CONNECTION_STRING``
+ - Azure storage access key is in the env var ``AZURE_STORAGE_ACCESS_KEY``
+ - DefaultAzureCredential is configured
"""
def __init__(self, artifact_uri, client=None):
@@ -39,9 +41,17 @@
account_url=account_url, credential=os.environ.get("AZURE_STORAGE_ACCESS_KEY")
)
else:
- raise Exception(
- "You need to set one of AZURE_STORAGE_CONNECTION_STRING or "
- "AZURE_STORAGE_ACCESS_KEY to access Azure storage."
+ try:
+ from azure.identity import DefaultAzureCredential
+ except ImportError as exc:
+ raise ImportError(
+ "Using DefaultAzureCredential requires the azure-identity package. "
+ "Please install it via: pip install azure-identity"
+ ) from exc
+
+ account_url = "https://{account}.blob.core.windows.net".format(account=account)
+ self.client = BlobServiceClient(
+ account_url=account_url, credential=DefaultAzureCredential()
)
@staticmethod
| {"golden_diff": "diff --git a/mlflow/store/artifact/azure_blob_artifact_repo.py b/mlflow/store/artifact/azure_blob_artifact_repo.py\n--- a/mlflow/store/artifact/azure_blob_artifact_repo.py\n+++ b/mlflow/store/artifact/azure_blob_artifact_repo.py\n@@ -14,8 +14,10 @@\n \n This repository is used with URIs of the form\n ``wasbs://<container-name>@<ystorage-account-name>.blob.core.windows.net/<path>``,\n- following the same URI scheme as Hadoop on Azure blob storage. It requires that your Azure\n- storage access key be available in the environment variable ``AZURE_STORAGE_ACCESS_KEY``.\n+ following the same URI scheme as Hadoop on Azure blob storage. It requires either that:\n+ - Azure storage connection string is in the env var ``AZURE_STORAGE_CONNECTION_STRING``\n+ - Azure storage access key is in the env var ``AZURE_STORAGE_ACCESS_KEY``\n+ - DefaultAzureCredential is configured\n \"\"\"\n \n def __init__(self, artifact_uri, client=None):\n@@ -39,9 +41,17 @@\n account_url=account_url, credential=os.environ.get(\"AZURE_STORAGE_ACCESS_KEY\")\n )\n else:\n- raise Exception(\n- \"You need to set one of AZURE_STORAGE_CONNECTION_STRING or \"\n- \"AZURE_STORAGE_ACCESS_KEY to access Azure storage.\"\n+ try:\n+ from azure.identity import DefaultAzureCredential\n+ except ImportError as exc:\n+ raise ImportError(\n+ \"Using DefaultAzureCredential requires the azure-identity package. \"\n+ \"Please install it via: pip install azure-identity\"\n+ ) from exc\n+\n+ account_url = \"https://{account}.blob.core.windows.net\".format(account=account)\n+ self.client = BlobServiceClient(\n+ account_url=account_url, credential=DefaultAzureCredential()\n )\n \n @staticmethod\n", "issue": "[FR] AzureBlobArtifactRepository connection using Service Principal credentials\n## Willingness to contribute\r\nThe MLflow Community encourages new feature contributions. Would you or another member of your organization be willing to contribute an implementation of this feature (either as an MLflow Plugin or an enhancement to the MLflow code base)?\r\n\r\n- [x] Yes. I can contribute this feature independently.\r\n- [x] Yes. I would be willing to contribute this feature with guidance from the MLflow community.\r\n- [ ] No. I cannot contribute this feature at this time.\r\n\r\n## Proposal Summary\r\n\r\nAllow the user to connect with the AzureBlobArtifactRepository using service principal credentials taken from the following environment variables:\r\n* AZURE_TENANT_ID\r\n* AZURE_CLIENT_ID\r\n* AZURE_CLIENT_SECRET\r\n\r\n## Motivation\r\n- What is the use case for this feature?\r\nHaving the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCESS_KEY variables set will give the user of that environment complete control over the whole storage account. This is not always desirable in a situation where a storage account has multiple blob containers and the users of one blob container should not have access to another user's container. \r\n\r\n- Why is this use case valuable to support for MLflow users in general?\r\nIt gives the user more options to choose from while connecting with their artifact repository.\r\n\r\n- Why is this use case valuable to support for your project(s) or organization\r\nMy organizations hosts multiple tracking servers (with corresponding artifact repositories) in which groups of users should be separated. Allowing the connection via service principal credentials gives us the ability assign permissions on a container to a service principal\r\n\r\n- Why is it currently difficult to achieve this use case? (please be as specific as possible about why related MLflow features and components are insufficient)\r\nThe AzureBlobArtifactRepository requires either the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCESS_KEY to be set, which gives the user full control of the whole storage account.\r\n\r\n### What component(s), interfaces, languages, and integrations does this feature affect?\r\nComponents \r\n- [x] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [x] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs\r\n- [ ] `area/server-infra`: MLflow server, JavaScript dev server\r\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\nInterfaces\r\n- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\nLanguages \r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\nIntegrations\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\r\n\r\n## Details\r\n\r\nThe solution would entail adding another elif branch in the AzureBlobArtifactRepository __init__ function, that checks for the AZURE_TENANT_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables, creates a [ClientSecretCredential](https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity.clientsecretcredential?view=azure-python) and uses it to create the BlobServiceClient.\r\n\r\n\n", "before_files": [{"content": "import os\nimport posixpath\nimport re\nimport urllib.parse\n\nfrom mlflow.entities import FileInfo\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.store.artifact.artifact_repo import ArtifactRepository\n\n\nclass AzureBlobArtifactRepository(ArtifactRepository):\n \"\"\"\n Stores artifacts on Azure Blob Storage.\n\n This repository is used with URIs of the form\n ``wasbs://<container-name>@<ystorage-account-name>.blob.core.windows.net/<path>``,\n following the same URI scheme as Hadoop on Azure blob storage. It requires that your Azure\n storage access key be available in the environment variable ``AZURE_STORAGE_ACCESS_KEY``.\n \"\"\"\n\n def __init__(self, artifact_uri, client=None):\n super().__init__(artifact_uri)\n\n # Allow override for testing\n if client:\n self.client = client\n return\n\n from azure.storage.blob import BlobServiceClient\n\n (_, account, _) = AzureBlobArtifactRepository.parse_wasbs_uri(artifact_uri)\n if \"AZURE_STORAGE_CONNECTION_STRING\" in os.environ:\n self.client = BlobServiceClient.from_connection_string(\n conn_str=os.environ.get(\"AZURE_STORAGE_CONNECTION_STRING\")\n )\n elif \"AZURE_STORAGE_ACCESS_KEY\" in os.environ:\n account_url = \"https://{account}.blob.core.windows.net\".format(account=account)\n self.client = BlobServiceClient(\n account_url=account_url, credential=os.environ.get(\"AZURE_STORAGE_ACCESS_KEY\")\n )\n else:\n raise Exception(\n \"You need to set one of AZURE_STORAGE_CONNECTION_STRING or \"\n \"AZURE_STORAGE_ACCESS_KEY to access Azure storage.\"\n )\n\n @staticmethod\n def parse_wasbs_uri(uri):\n \"\"\"Parse a wasbs:// URI, returning (container, storage_account, path).\"\"\"\n parsed = urllib.parse.urlparse(uri)\n if parsed.scheme != \"wasbs\":\n raise Exception(\"Not a WASBS URI: %s\" % uri)\n match = re.match(\"([^@]+)@([^.]+)\\\\.blob\\\\.core\\\\.windows\\\\.net\", parsed.netloc)\n if match is None:\n raise Exception(\n \"WASBS URI must be of the form \" \"<container>@<account>.blob.core.windows.net\"\n )\n container = match.group(1)\n storage_account = match.group(2)\n path = parsed.path\n if path.startswith(\"/\"):\n path = path[1:]\n return container, storage_account, path\n\n def log_artifact(self, local_file, artifact_path=None):\n (container, _, dest_path) = self.parse_wasbs_uri(self.artifact_uri)\n container_client = self.client.get_container_client(container)\n if artifact_path:\n dest_path = posixpath.join(dest_path, artifact_path)\n dest_path = posixpath.join(dest_path, os.path.basename(local_file))\n with open(local_file, \"rb\") as file:\n container_client.upload_blob(dest_path, file)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n (container, _, dest_path) = self.parse_wasbs_uri(self.artifact_uri)\n container_client = self.client.get_container_client(container)\n if artifact_path:\n dest_path = posixpath.join(dest_path, artifact_path)\n local_dir = os.path.abspath(local_dir)\n for (root, _, filenames) in os.walk(local_dir):\n upload_path = dest_path\n if root != local_dir:\n rel_path = os.path.relpath(root, local_dir)\n upload_path = posixpath.join(dest_path, rel_path)\n for f in filenames:\n remote_file_path = posixpath.join(upload_path, f)\n local_file_path = os.path.join(root, f)\n with open(local_file_path, \"rb\") as file:\n container_client.upload_blob(remote_file_path, file)\n\n def list_artifacts(self, path=None):\n # Newer versions of `azure-storage-blob` (>= 12.4.0) provide a public\n # `azure.storage.blob.BlobPrefix` object to signify that a blob is a directory,\n # while older versions only expose this API internally as\n # `azure.storage.blob._models.BlobPrefix`\n try:\n from azure.storage.blob import BlobPrefix\n except ImportError:\n from azure.storage.blob._models import BlobPrefix\n\n (container, _, artifact_path) = self.parse_wasbs_uri(self.artifact_uri)\n container_client = self.client.get_container_client(container)\n dest_path = artifact_path\n if path:\n dest_path = posixpath.join(dest_path, path)\n infos = []\n prefix = dest_path if dest_path.endswith(\"/\") else dest_path + \"/\"\n results = container_client.walk_blobs(name_starts_with=prefix)\n for r in results:\n if not r.name.startswith(artifact_path):\n raise MlflowException(\n \"The name of the listed Azure blob does not begin with the specified\"\n \" artifact path. Artifact path: {artifact_path}. Blob name:\"\n \" {blob_name}\".format(artifact_path=artifact_path, blob_name=r.name)\n )\n if isinstance(r, BlobPrefix): # This is a prefix for items in a subdirectory\n subdir = posixpath.relpath(path=r.name, start=artifact_path)\n if subdir.endswith(\"/\"):\n subdir = subdir[:-1]\n infos.append(FileInfo(subdir, True, None))\n else: # Just a plain old blob\n file_name = posixpath.relpath(path=r.name, start=artifact_path)\n infos.append(FileInfo(file_name, False, r.size))\n # The list_artifacts API expects us to return an empty list if the\n # the path references a single file.\n rel_path = dest_path[len(artifact_path) + 1 :]\n if (len(infos) == 1) and not infos[0].is_dir and (infos[0].path == rel_path):\n return []\n return sorted(infos, key=lambda f: f.path)\n\n def _download_file(self, remote_file_path, local_path):\n (container, _, remote_root_path) = self.parse_wasbs_uri(self.artifact_uri)\n container_client = self.client.get_container_client(container)\n remote_full_path = posixpath.join(remote_root_path, remote_file_path)\n with open(local_path, \"wb\") as file:\n container_client.download_blob(remote_full_path).readinto(file)\n\n def delete_artifacts(self, artifact_path=None):\n raise MlflowException(\"Not implemented yet\")\n", "path": "mlflow/store/artifact/azure_blob_artifact_repo.py"}], "after_files": [{"content": "import os\nimport posixpath\nimport re\nimport urllib.parse\n\nfrom mlflow.entities import FileInfo\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.store.artifact.artifact_repo import ArtifactRepository\n\n\nclass AzureBlobArtifactRepository(ArtifactRepository):\n \"\"\"\n Stores artifacts on Azure Blob Storage.\n\n This repository is used with URIs of the form\n ``wasbs://<container-name>@<ystorage-account-name>.blob.core.windows.net/<path>``,\n following the same URI scheme as Hadoop on Azure blob storage. It requires either that:\n - Azure storage connection string is in the env var ``AZURE_STORAGE_CONNECTION_STRING``\n - Azure storage access key is in the env var ``AZURE_STORAGE_ACCESS_KEY``\n - DefaultAzureCredential is configured\n \"\"\"\n\n def __init__(self, artifact_uri, client=None):\n super().__init__(artifact_uri)\n\n # Allow override for testing\n if client:\n self.client = client\n return\n\n from azure.storage.blob import BlobServiceClient\n\n (_, account, _) = AzureBlobArtifactRepository.parse_wasbs_uri(artifact_uri)\n if \"AZURE_STORAGE_CONNECTION_STRING\" in os.environ:\n self.client = BlobServiceClient.from_connection_string(\n conn_str=os.environ.get(\"AZURE_STORAGE_CONNECTION_STRING\")\n )\n elif \"AZURE_STORAGE_ACCESS_KEY\" in os.environ:\n account_url = \"https://{account}.blob.core.windows.net\".format(account=account)\n self.client = BlobServiceClient(\n account_url=account_url, credential=os.environ.get(\"AZURE_STORAGE_ACCESS_KEY\")\n )\n else:\n try:\n from azure.identity import DefaultAzureCredential\n except ImportError as exc:\n raise ImportError(\n \"Using DefaultAzureCredential requires the azure-identity package. \"\n \"Please install it via: pip install azure-identity\"\n ) from exc\n\n account_url = \"https://{account}.blob.core.windows.net\".format(account=account)\n self.client = BlobServiceClient(\n account_url=account_url, credential=DefaultAzureCredential()\n )\n\n @staticmethod\n def parse_wasbs_uri(uri):\n \"\"\"Parse a wasbs:// URI, returning (container, storage_account, path).\"\"\"\n parsed = urllib.parse.urlparse(uri)\n if parsed.scheme != \"wasbs\":\n raise Exception(\"Not a WASBS URI: %s\" % uri)\n match = re.match(\"([^@]+)@([^.]+)\\\\.blob\\\\.core\\\\.windows\\\\.net\", parsed.netloc)\n if match is None:\n raise Exception(\n \"WASBS URI must be of the form \" \"<container>@<account>.blob.core.windows.net\"\n )\n container = match.group(1)\n storage_account = match.group(2)\n path = parsed.path\n if path.startswith(\"/\"):\n path = path[1:]\n return container, storage_account, path\n\n def log_artifact(self, local_file, artifact_path=None):\n (container, _, dest_path) = self.parse_wasbs_uri(self.artifact_uri)\n container_client = self.client.get_container_client(container)\n if artifact_path:\n dest_path = posixpath.join(dest_path, artifact_path)\n dest_path = posixpath.join(dest_path, os.path.basename(local_file))\n with open(local_file, \"rb\") as file:\n container_client.upload_blob(dest_path, file)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n (container, _, dest_path) = self.parse_wasbs_uri(self.artifact_uri)\n container_client = self.client.get_container_client(container)\n if artifact_path:\n dest_path = posixpath.join(dest_path, artifact_path)\n local_dir = os.path.abspath(local_dir)\n for (root, _, filenames) in os.walk(local_dir):\n upload_path = dest_path\n if root != local_dir:\n rel_path = os.path.relpath(root, local_dir)\n upload_path = posixpath.join(dest_path, rel_path)\n for f in filenames:\n remote_file_path = posixpath.join(upload_path, f)\n local_file_path = os.path.join(root, f)\n with open(local_file_path, \"rb\") as file:\n container_client.upload_blob(remote_file_path, file)\n\n def list_artifacts(self, path=None):\n # Newer versions of `azure-storage-blob` (>= 12.4.0) provide a public\n # `azure.storage.blob.BlobPrefix` object to signify that a blob is a directory,\n # while older versions only expose this API internally as\n # `azure.storage.blob._models.BlobPrefix`\n try:\n from azure.storage.blob import BlobPrefix\n except ImportError:\n from azure.storage.blob._models import BlobPrefix\n\n (container, _, artifact_path) = self.parse_wasbs_uri(self.artifact_uri)\n container_client = self.client.get_container_client(container)\n dest_path = artifact_path\n if path:\n dest_path = posixpath.join(dest_path, path)\n infos = []\n prefix = dest_path if dest_path.endswith(\"/\") else dest_path + \"/\"\n results = container_client.walk_blobs(name_starts_with=prefix)\n for r in results:\n if not r.name.startswith(artifact_path):\n raise MlflowException(\n \"The name of the listed Azure blob does not begin with the specified\"\n \" artifact path. Artifact path: {artifact_path}. Blob name:\"\n \" {blob_name}\".format(artifact_path=artifact_path, blob_name=r.name)\n )\n if isinstance(r, BlobPrefix): # This is a prefix for items in a subdirectory\n subdir = posixpath.relpath(path=r.name, start=artifact_path)\n if subdir.endswith(\"/\"):\n subdir = subdir[:-1]\n infos.append(FileInfo(subdir, True, None))\n else: # Just a plain old blob\n file_name = posixpath.relpath(path=r.name, start=artifact_path)\n infos.append(FileInfo(file_name, False, r.size))\n # The list_artifacts API expects us to return an empty list if the\n # the path references a single file.\n rel_path = dest_path[len(artifact_path) + 1 :]\n if (len(infos) == 1) and not infos[0].is_dir and (infos[0].path == rel_path):\n return []\n return sorted(infos, key=lambda f: f.path)\n\n def _download_file(self, remote_file_path, local_path):\n (container, _, remote_root_path) = self.parse_wasbs_uri(self.artifact_uri)\n container_client = self.client.get_container_client(container)\n remote_full_path = posixpath.join(remote_root_path, remote_file_path)\n with open(local_path, \"wb\") as file:\n container_client.download_blob(remote_full_path).readinto(file)\n\n def delete_artifacts(self, artifact_path=None):\n raise MlflowException(\"Not implemented yet\")\n", "path": "mlflow/store/artifact/azure_blob_artifact_repo.py"}]} | 2,790 | 414 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.