problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_44613 | rasdani/github-patches | git_diff | encode__starlette-1377 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFiles middleware doesn't follow symlinks
### Checklist
- [x] The bug is reproducible against the latest release and/or `master`.
- [x] There are no similar issues or pull requests to fix it yet.
### Describe the bug
The StaticFiles middleware is checking the `os.realpath` of a file and returning a 404 for symlinks that lead outside the static directory.
### To reproduce
1. create a minimal app with a staticfiles middleware
1. put a symlink in your static directory. the link's target must be above the static directory.
1. you'll get a 404
### Expected behavior
Support symlinks in static directory.
The use case for symlinks in static is to target frontend assets that are being generated in file-watch mode.
### Actual behavior
404.
### Debugging material
It's happening here:
https://github.com/encode/starlette/blob/b95acea973c20eea3e7cbbca42d09b1f5d4a3412/starlette/staticfiles.py#L147-L149
### Environment
- OS: linux
- Python version: 3.7.5
- Starlette version: 0.13.8
### Additional context
I'm happy to post a PR for this if useful, ideally adding a bool param to the StaticFiles middleware that allows symlinks.
</issue>
<code>
[start of starlette/staticfiles.py]
1 import importlib.util
2 import os
3 import stat
4 import typing
5 from email.utils import parsedate
6
7 import anyio
8
9 from starlette.datastructures import URL, Headers
10 from starlette.exceptions import HTTPException
11 from starlette.responses import FileResponse, RedirectResponse, Response
12 from starlette.types import Receive, Scope, Send
13
14 PathLike = typing.Union[str, "os.PathLike[str]"]
15
16
17 class NotModifiedResponse(Response):
18 NOT_MODIFIED_HEADERS = (
19 "cache-control",
20 "content-location",
21 "date",
22 "etag",
23 "expires",
24 "vary",
25 )
26
27 def __init__(self, headers: Headers):
28 super().__init__(
29 status_code=304,
30 headers={
31 name: value
32 for name, value in headers.items()
33 if name in self.NOT_MODIFIED_HEADERS
34 },
35 )
36
37
38 class StaticFiles:
39 def __init__(
40 self,
41 *,
42 directory: typing.Optional[PathLike] = None,
43 packages: typing.Optional[
44 typing.List[typing.Union[str, typing.Tuple[str, str]]]
45 ] = None,
46 html: bool = False,
47 check_dir: bool = True,
48 ) -> None:
49 self.directory = directory
50 self.packages = packages
51 self.all_directories = self.get_directories(directory, packages)
52 self.html = html
53 self.config_checked = False
54 if check_dir and directory is not None and not os.path.isdir(directory):
55 raise RuntimeError(f"Directory '{directory}' does not exist")
56
57 def get_directories(
58 self,
59 directory: typing.Optional[PathLike] = None,
60 packages: typing.Optional[
61 typing.List[typing.Union[str, typing.Tuple[str, str]]]
62 ] = None,
63 ) -> typing.List[PathLike]:
64 """
65 Given `directory` and `packages` arguments, return a list of all the
66 directories that should be used for serving static files from.
67 """
68 directories = []
69 if directory is not None:
70 directories.append(directory)
71
72 for package in packages or []:
73 if isinstance(package, tuple):
74 package, statics_dir = package
75 else:
76 statics_dir = "statics"
77 spec = importlib.util.find_spec(package)
78 assert spec is not None, f"Package {package!r} could not be found."
79 assert spec.origin is not None, f"Package {package!r} could not be found."
80 package_directory = os.path.normpath(
81 os.path.join(spec.origin, "..", statics_dir)
82 )
83 assert os.path.isdir(
84 package_directory
85 ), f"Directory '{statics_dir!r}' in package {package!r} could not be found."
86 directories.append(package_directory)
87
88 return directories
89
90 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
91 """
92 The ASGI entry point.
93 """
94 assert scope["type"] == "http"
95
96 if not self.config_checked:
97 await self.check_config()
98 self.config_checked = True
99
100 path = self.get_path(scope)
101 response = await self.get_response(path, scope)
102 await response(scope, receive, send)
103
104 def get_path(self, scope: Scope) -> str:
105 """
106 Given the ASGI scope, return the `path` string to serve up,
107 with OS specific path separators, and any '..', '.' components removed.
108 """
109 return os.path.normpath(os.path.join(*scope["path"].split("/")))
110
111 async def get_response(self, path: str, scope: Scope) -> Response:
112 """
113 Returns an HTTP response, given the incoming path, method and request headers.
114 """
115 if scope["method"] not in ("GET", "HEAD"):
116 raise HTTPException(status_code=405)
117
118 try:
119 full_path, stat_result = await anyio.to_thread.run_sync(
120 self.lookup_path, path
121 )
122 except PermissionError:
123 raise HTTPException(status_code=401)
124 except OSError:
125 raise
126
127 if stat_result and stat.S_ISREG(stat_result.st_mode):
128 # We have a static file to serve.
129 return self.file_response(full_path, stat_result, scope)
130
131 elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:
132 # We're in HTML mode, and have got a directory URL.
133 # Check if we have 'index.html' file to serve.
134 index_path = os.path.join(path, "index.html")
135 full_path, stat_result = await anyio.to_thread.run_sync(
136 self.lookup_path, index_path
137 )
138 if stat_result is not None and stat.S_ISREG(stat_result.st_mode):
139 if not scope["path"].endswith("/"):
140 # Directory URLs should redirect to always end in "/".
141 url = URL(scope=scope)
142 url = url.replace(path=url.path + "/")
143 return RedirectResponse(url=url)
144 return self.file_response(full_path, stat_result, scope)
145
146 if self.html:
147 # Check for '404.html' if we're in HTML mode.
148 full_path, stat_result = await anyio.to_thread.run_sync(
149 self.lookup_path, "404.html"
150 )
151 if stat_result and stat.S_ISREG(stat_result.st_mode):
152 return FileResponse(
153 full_path,
154 stat_result=stat_result,
155 method=scope["method"],
156 status_code=404,
157 )
158 raise HTTPException(status_code=404)
159
160 def lookup_path(
161 self, path: str
162 ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:
163 for directory in self.all_directories:
164 full_path = os.path.realpath(os.path.join(directory, path))
165 directory = os.path.realpath(directory)
166 if os.path.commonprefix([full_path, directory]) != directory:
167 # Don't allow misbehaving clients to break out of the static files
168 # directory.
169 continue
170 try:
171 return full_path, os.stat(full_path)
172 except (FileNotFoundError, NotADirectoryError):
173 continue
174 return "", None
175
176 def file_response(
177 self,
178 full_path: PathLike,
179 stat_result: os.stat_result,
180 scope: Scope,
181 status_code: int = 200,
182 ) -> Response:
183 method = scope["method"]
184 request_headers = Headers(scope=scope)
185
186 response = FileResponse(
187 full_path, status_code=status_code, stat_result=stat_result, method=method
188 )
189 if self.is_not_modified(response.headers, request_headers):
190 return NotModifiedResponse(response.headers)
191 return response
192
193 async def check_config(self) -> None:
194 """
195 Perform a one-off configuration check that StaticFiles is actually
196 pointed at a directory, so that we can raise loud errors rather than
197 just returning 404 responses.
198 """
199 if self.directory is None:
200 return
201
202 try:
203 stat_result = await anyio.to_thread.run_sync(os.stat, self.directory)
204 except FileNotFoundError:
205 raise RuntimeError(
206 f"StaticFiles directory '{self.directory}' does not exist."
207 )
208 if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):
209 raise RuntimeError(
210 f"StaticFiles path '{self.directory}' is not a directory."
211 )
212
213 def is_not_modified(
214 self, response_headers: Headers, request_headers: Headers
215 ) -> bool:
216 """
217 Given the request and response headers, return `True` if an HTTP
218 "Not Modified" response could be returned instead.
219 """
220 try:
221 if_none_match = request_headers["if-none-match"]
222 etag = response_headers["etag"]
223 if if_none_match == etag:
224 return True
225 except KeyError:
226 pass
227
228 try:
229 if_modified_since = parsedate(request_headers["if-modified-since"])
230 last_modified = parsedate(response_headers["last-modified"])
231 if (
232 if_modified_since is not None
233 and last_modified is not None
234 and if_modified_since >= last_modified
235 ):
236 return True
237 except KeyError:
238 pass
239
240 return False
241
[end of starlette/staticfiles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py
--- a/starlette/staticfiles.py
+++ b/starlette/staticfiles.py
@@ -3,6 +3,7 @@
import stat
import typing
from email.utils import parsedate
+from pathlib import Path
import anyio
@@ -51,7 +52,7 @@
self.all_directories = self.get_directories(directory, packages)
self.html = html
self.config_checked = False
- if check_dir and directory is not None and not os.path.isdir(directory):
+ if check_dir and directory is not None and not Path(directory).is_dir():
raise RuntimeError(f"Directory '{directory}' does not exist")
def get_directories(
@@ -77,11 +78,9 @@
spec = importlib.util.find_spec(package)
assert spec is not None, f"Package {package!r} could not be found."
assert spec.origin is not None, f"Package {package!r} could not be found."
- package_directory = os.path.normpath(
- os.path.join(spec.origin, "..", statics_dir)
- )
- assert os.path.isdir(
- package_directory
+ package_directory = Path(spec.origin).joinpath("..", statics_dir).resolve()
+ assert (
+ package_directory.is_dir()
), f"Directory '{statics_dir!r}' in package {package!r} could not be found."
directories.append(package_directory)
@@ -101,14 +100,14 @@
response = await self.get_response(path, scope)
await response(scope, receive, send)
- def get_path(self, scope: Scope) -> str:
+ def get_path(self, scope: Scope) -> Path:
"""
Given the ASGI scope, return the `path` string to serve up,
with OS specific path separators, and any '..', '.' components removed.
"""
- return os.path.normpath(os.path.join(*scope["path"].split("/")))
+ return Path(*scope["path"].split("/"))
- async def get_response(self, path: str, scope: Scope) -> Response:
+ async def get_response(self, path: Path, scope: Scope) -> Response:
"""
Returns an HTTP response, given the incoming path, method and request headers.
"""
@@ -131,7 +130,7 @@
elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:
# We're in HTML mode, and have got a directory URL.
# Check if we have 'index.html' file to serve.
- index_path = os.path.join(path, "index.html")
+ index_path = path.joinpath("index.html")
full_path, stat_result = await anyio.to_thread.run_sync(
self.lookup_path, index_path
)
@@ -158,20 +157,25 @@
raise HTTPException(status_code=404)
def lookup_path(
- self, path: str
- ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:
+ self, path: Path
+ ) -> typing.Tuple[Path, typing.Optional[os.stat_result]]:
for directory in self.all_directories:
- full_path = os.path.realpath(os.path.join(directory, path))
- directory = os.path.realpath(directory)
- if os.path.commonprefix([full_path, directory]) != directory:
- # Don't allow misbehaving clients to break out of the static files
- # directory.
- continue
+ original_path = Path(directory).joinpath(path)
+ full_path = original_path.resolve()
+ directory = Path(directory).resolve()
try:
- return full_path, os.stat(full_path)
+ stat_result = os.lstat(original_path)
+ full_path.relative_to(directory)
+ return full_path, stat_result
+ except ValueError:
+ # Allow clients to break out of the static files directory
+ # if following symlinks.
+ if stat.S_ISLNK(stat_result.st_mode):
+ stat_result = os.lstat(full_path)
+ return full_path, stat_result
except (FileNotFoundError, NotADirectoryError):
continue
- return "", None
+ return Path(), None
def file_response(
self,
| {"golden_diff": "diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py\n--- a/starlette/staticfiles.py\n+++ b/starlette/staticfiles.py\n@@ -3,6 +3,7 @@\n import stat\n import typing\n from email.utils import parsedate\n+from pathlib import Path\n \n import anyio\n \n@@ -51,7 +52,7 @@\n self.all_directories = self.get_directories(directory, packages)\n self.html = html\n self.config_checked = False\n- if check_dir and directory is not None and not os.path.isdir(directory):\n+ if check_dir and directory is not None and not Path(directory).is_dir():\n raise RuntimeError(f\"Directory '{directory}' does not exist\")\n \n def get_directories(\n@@ -77,11 +78,9 @@\n spec = importlib.util.find_spec(package)\n assert spec is not None, f\"Package {package!r} could not be found.\"\n assert spec.origin is not None, f\"Package {package!r} could not be found.\"\n- package_directory = os.path.normpath(\n- os.path.join(spec.origin, \"..\", statics_dir)\n- )\n- assert os.path.isdir(\n- package_directory\n+ package_directory = Path(spec.origin).joinpath(\"..\", statics_dir).resolve()\n+ assert (\n+ package_directory.is_dir()\n ), f\"Directory '{statics_dir!r}' in package {package!r} could not be found.\"\n directories.append(package_directory)\n \n@@ -101,14 +100,14 @@\n response = await self.get_response(path, scope)\n await response(scope, receive, send)\n \n- def get_path(self, scope: Scope) -> str:\n+ def get_path(self, scope: Scope) -> Path:\n \"\"\"\n Given the ASGI scope, return the `path` string to serve up,\n with OS specific path separators, and any '..', '.' components removed.\n \"\"\"\n- return os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n+ return Path(*scope[\"path\"].split(\"/\"))\n \n- async def get_response(self, path: str, scope: Scope) -> Response:\n+ async def get_response(self, path: Path, scope: Scope) -> Response:\n \"\"\"\n Returns an HTTP response, given the incoming path, method and request headers.\n \"\"\"\n@@ -131,7 +130,7 @@\n elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:\n # We're in HTML mode, and have got a directory URL.\n # Check if we have 'index.html' file to serve.\n- index_path = os.path.join(path, \"index.html\")\n+ index_path = path.joinpath(\"index.html\")\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, index_path\n )\n@@ -158,20 +157,25 @@\n raise HTTPException(status_code=404)\n \n def lookup_path(\n- self, path: str\n- ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:\n+ self, path: Path\n+ ) -> typing.Tuple[Path, typing.Optional[os.stat_result]]:\n for directory in self.all_directories:\n- full_path = os.path.realpath(os.path.join(directory, path))\n- directory = os.path.realpath(directory)\n- if os.path.commonprefix([full_path, directory]) != directory:\n- # Don't allow misbehaving clients to break out of the static files\n- # directory.\n- continue\n+ original_path = Path(directory).joinpath(path)\n+ full_path = original_path.resolve()\n+ directory = Path(directory).resolve()\n try:\n- return full_path, os.stat(full_path)\n+ stat_result = os.lstat(original_path)\n+ full_path.relative_to(directory)\n+ return full_path, stat_result\n+ except ValueError:\n+ # Allow clients to break out of the static files directory\n+ # if following symlinks.\n+ if stat.S_ISLNK(stat_result.st_mode):\n+ stat_result = os.lstat(full_path)\n+ return full_path, stat_result\n except (FileNotFoundError, NotADirectoryError):\n continue\n- return \"\", None\n+ return Path(), None\n \n def file_response(\n self,\n", "issue": "StaticFiles middleware doesn't follow symlinks\n### Checklist\r\n\r\n- [x] The bug is reproducible against the latest release and/or `master`.\r\n- [x] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\nThe StaticFiles middleware is checking the `os.realpath` of a file and returning a 404 for symlinks that lead outside the static directory.\r\n\r\n### To reproduce\r\n\r\n1. create a minimal app with a staticfiles middleware\r\n1. put a symlink in your static directory. the link's target must be above the static directory.\r\n1. you'll get a 404\r\n\r\n### Expected behavior\r\n\r\nSupport symlinks in static directory.\r\n\r\nThe use case for symlinks in static is to target frontend assets that are being generated in file-watch mode.\r\n\r\n### Actual behavior\r\n\r\n404.\r\n\r\n### Debugging material\r\n\r\nIt's happening here:\r\nhttps://github.com/encode/starlette/blob/b95acea973c20eea3e7cbbca42d09b1f5d4a3412/starlette/staticfiles.py#L147-L149\r\n\r\n### Environment\r\n\r\n- OS: linux\r\n- Python version: 3.7.5\r\n- Starlette version: 0.13.8\r\n\r\n### Additional context\r\n\r\nI'm happy to post a PR for this if useful, ideally adding a bool param to the StaticFiles middleware that allows symlinks.\n", "before_files": [{"content": "import importlib.util\nimport os\nimport stat\nimport typing\nfrom email.utils import parsedate\n\nimport anyio\n\nfrom starlette.datastructures import URL, Headers\nfrom starlette.exceptions import HTTPException\nfrom starlette.responses import FileResponse, RedirectResponse, Response\nfrom starlette.types import Receive, Scope, Send\n\nPathLike = typing.Union[str, \"os.PathLike[str]\"]\n\n\nclass NotModifiedResponse(Response):\n NOT_MODIFIED_HEADERS = (\n \"cache-control\",\n \"content-location\",\n \"date\",\n \"etag\",\n \"expires\",\n \"vary\",\n )\n\n def __init__(self, headers: Headers):\n super().__init__(\n status_code=304,\n headers={\n name: value\n for name, value in headers.items()\n if name in self.NOT_MODIFIED_HEADERS\n },\n )\n\n\nclass StaticFiles:\n def __init__(\n self,\n *,\n directory: typing.Optional[PathLike] = None,\n packages: typing.Optional[\n typing.List[typing.Union[str, typing.Tuple[str, str]]]\n ] = None,\n html: bool = False,\n check_dir: bool = True,\n ) -> None:\n self.directory = directory\n self.packages = packages\n self.all_directories = self.get_directories(directory, packages)\n self.html = html\n self.config_checked = False\n if check_dir and directory is not None and not os.path.isdir(directory):\n raise RuntimeError(f\"Directory '{directory}' does not exist\")\n\n def get_directories(\n self,\n directory: typing.Optional[PathLike] = None,\n packages: typing.Optional[\n typing.List[typing.Union[str, typing.Tuple[str, str]]]\n ] = None,\n ) -> typing.List[PathLike]:\n \"\"\"\n Given `directory` and `packages` arguments, return a list of all the\n directories that should be used for serving static files from.\n \"\"\"\n directories = []\n if directory is not None:\n directories.append(directory)\n\n for package in packages or []:\n if isinstance(package, tuple):\n package, statics_dir = package\n else:\n statics_dir = \"statics\"\n spec = importlib.util.find_spec(package)\n assert spec is not None, f\"Package {package!r} could not be found.\"\n assert spec.origin is not None, f\"Package {package!r} could not be found.\"\n package_directory = os.path.normpath(\n os.path.join(spec.origin, \"..\", statics_dir)\n )\n assert os.path.isdir(\n package_directory\n ), f\"Directory '{statics_dir!r}' in package {package!r} could not be found.\"\n directories.append(package_directory)\n\n return directories\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n The ASGI entry point.\n \"\"\"\n assert scope[\"type\"] == \"http\"\n\n if not self.config_checked:\n await self.check_config()\n self.config_checked = True\n\n path = self.get_path(scope)\n response = await self.get_response(path, scope)\n await response(scope, receive, send)\n\n def get_path(self, scope: Scope) -> str:\n \"\"\"\n Given the ASGI scope, return the `path` string to serve up,\n with OS specific path separators, and any '..', '.' components removed.\n \"\"\"\n return os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n\n async def get_response(self, path: str, scope: Scope) -> Response:\n \"\"\"\n Returns an HTTP response, given the incoming path, method and request headers.\n \"\"\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n raise HTTPException(status_code=405)\n\n try:\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, path\n )\n except PermissionError:\n raise HTTPException(status_code=401)\n except OSError:\n raise\n\n if stat_result and stat.S_ISREG(stat_result.st_mode):\n # We have a static file to serve.\n return self.file_response(full_path, stat_result, scope)\n\n elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:\n # We're in HTML mode, and have got a directory URL.\n # Check if we have 'index.html' file to serve.\n index_path = os.path.join(path, \"index.html\")\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, index_path\n )\n if stat_result is not None and stat.S_ISREG(stat_result.st_mode):\n if not scope[\"path\"].endswith(\"/\"):\n # Directory URLs should redirect to always end in \"/\".\n url = URL(scope=scope)\n url = url.replace(path=url.path + \"/\")\n return RedirectResponse(url=url)\n return self.file_response(full_path, stat_result, scope)\n\n if self.html:\n # Check for '404.html' if we're in HTML mode.\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, \"404.html\"\n )\n if stat_result and stat.S_ISREG(stat_result.st_mode):\n return FileResponse(\n full_path,\n stat_result=stat_result,\n method=scope[\"method\"],\n status_code=404,\n )\n raise HTTPException(status_code=404)\n\n def lookup_path(\n self, path: str\n ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:\n for directory in self.all_directories:\n full_path = os.path.realpath(os.path.join(directory, path))\n directory = os.path.realpath(directory)\n if os.path.commonprefix([full_path, directory]) != directory:\n # Don't allow misbehaving clients to break out of the static files\n # directory.\n continue\n try:\n return full_path, os.stat(full_path)\n except (FileNotFoundError, NotADirectoryError):\n continue\n return \"\", None\n\n def file_response(\n self,\n full_path: PathLike,\n stat_result: os.stat_result,\n scope: Scope,\n status_code: int = 200,\n ) -> Response:\n method = scope[\"method\"]\n request_headers = Headers(scope=scope)\n\n response = FileResponse(\n full_path, status_code=status_code, stat_result=stat_result, method=method\n )\n if self.is_not_modified(response.headers, request_headers):\n return NotModifiedResponse(response.headers)\n return response\n\n async def check_config(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n if self.directory is None:\n return\n\n try:\n stat_result = await anyio.to_thread.run_sync(os.stat, self.directory)\n except FileNotFoundError:\n raise RuntimeError(\n f\"StaticFiles directory '{self.directory}' does not exist.\"\n )\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\n f\"StaticFiles path '{self.directory}' is not a directory.\"\n )\n\n def is_not_modified(\n self, response_headers: Headers, request_headers: Headers\n ) -> bool:\n \"\"\"\n Given the request and response headers, return `True` if an HTTP\n \"Not Modified\" response could be returned instead.\n \"\"\"\n try:\n if_none_match = request_headers[\"if-none-match\"]\n etag = response_headers[\"etag\"]\n if if_none_match == etag:\n return True\n except KeyError:\n pass\n\n try:\n if_modified_since = parsedate(request_headers[\"if-modified-since\"])\n last_modified = parsedate(response_headers[\"last-modified\"])\n if (\n if_modified_since is not None\n and last_modified is not None\n and if_modified_since >= last_modified\n ):\n return True\n except KeyError:\n pass\n\n return False\n", "path": "starlette/staticfiles.py"}]} | 3,213 | 950 |
gh_patches_debug_24494 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-11 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
clean_names breaks for MultiIndex Columns
MultiIndex columns returns `expected str not tuple` type error.
If you instead used,
`df.rename(columns=lambda x: x.lower().replace('_', ''))`
this would work for standard and MultiIndex DataFrames.
Can do PR if required.
</issue>
<code>
[start of janitor/functions.py]
1 import datetime as dt
2 from functools import reduce
3
4 import pandas as pd
5
6 from .errors import JanitorError
7
8 import re
9
10
11 def clean_names(df):
12 """
13 Clean column names.
14
15 Takes all column names, converts them to lowercase, then replaces all
16 spaces with underscores.
17
18 Functional usage example:
19
20 .. code-block:: python
21
22 df = clean_names(df)
23
24 Method chaining example:
25
26 .. code-block:: python
27
28 df = pd.DataFrame(...)
29 df = jn.DataFrame(df).clean_names()
30
31 :param df: The pandas DataFrame object.
32 :returns: A pandas DataFrame.
33 """
34 columns = [(c.lower()
35 .replace(' ', '_')
36 .replace('/', '_')
37 .replace(':', '_')
38 .replace("'", '')
39 .replace('’', '')
40 .replace(',', '_')
41 .replace('?', '_')
42 .replace('-', '_')
43 .replace('(', '_')
44 .replace(')', '_')
45 .replace('.', '_')
46 ) for c in df.columns]
47
48 newcolumns = []
49 for col in columns:
50 # NOTE: Replace repeating underscores with single ones
51 newcol = re.sub('[_]{2,}', '_', col)
52 newcolumns.append(newcol)
53 df.columns = newcolumns
54 return df
55
56
57 def remove_empty(df):
58 """
59 Drop all rows and columns that are completely null.
60
61 Implementation is shamelessly copied from `StackOverflow`_.
62
63 .. _StackOverflow: https://stackoverflow.com/questions/38884538/python-pandas-find-all-rows-where-all-values-are-nan # noqa: E501
64
65 Functional usage example:
66
67 .. code-block:: python
68
69 df = remove_empty(df)
70
71 Method chaining example:
72
73 .. code-block:: python
74
75 df = pd.DataFrame(...)
76 df = jn.DataFrame(df).remove_empty()
77
78 :param df: The pandas DataFrame object.
79 :returns: A pandas DataFrame.
80 """
81
82 nanrows = df.index[df.isnull().all(axis=1)]
83 df.drop(index=nanrows, inplace=True)
84
85 nancols = df.columns[df.isnull().all(axis=0)]
86 df.drop(columns=nancols, inplace=True)
87
88 return df
89
90
91 def get_dupes(df, columns=None):
92 """
93 Returns all duplicate rows.
94
95 Functional usage example:
96
97 .. code-block:: python
98
99 get_dupes(df)
100
101 Method chaining example:
102
103 .. code-block:: python
104
105 df = pd.DataFrame(...)
106 jn.DataFrame(df).get_dupes()
107
108 :param df: The pandas DataFrame object.
109 :param str/iterable columns: (optional) A column name or an iterable (list
110 or tuple) of column names. Following pandas API, this only considers
111 certain columns for identifying duplicates. Defaults to using all
112 columns.
113 :returns: The duplicate rows, as a pandas DataFrame.
114 """
115 dupes = df.duplicated(subset=columns, keep=False)
116 return df[dupes == True] # noqa: E712
117
118
119 def encode_categorical(df, columns):
120 """
121 Encode the specified columns as categorical.
122
123 Functional usage example:
124
125 .. code-block:: python
126
127 encode_categorical(df, columns="my_categorical_column") # one way
128
129 Method chaining example:
130
131 .. code-block:: python
132
133 df = pd.DataFrame(...)
134 categorical_cols = ['col1', 'col2', 'col4']
135 jn.DataFrame(df).encode_categorical(columns=categorical_cols)
136
137 :param df: The pandas DataFrame object.
138 :param str/iterable columns: A column name or an iterable (list or tuple)
139 of column names.
140 :returns: A pandas DataFrame
141 """
142 if isinstance(columns, list) or isinstance(columns, tuple):
143 for col in columns:
144 assert col in df.columns, \
145 JanitorError("{col} missing from dataframe columns!".format(col=col)) # noqa: E501
146 df[col] = pd.Categorical(df[col])
147 elif isinstance(columns, str):
148 df[columns] = pd.Categorical(df[columns])
149 else:
150 raise JanitorError('kwarg `columns` must be a string or iterable!')
151 return df
152
153
154 def get_features_targets(df, target_columns, feature_columns=None):
155 """
156 Get the features and targets as separate DataFrames/Series.
157
158 The behaviour is as such:
159
160 - `target_columns` is mandatory.
161 - If `feature_columns` is present, then we will respect the column names
162 inside there.
163 - If `feature_columns` is not passed in, then we will assume that the
164 rest of the columns are feature columns, and return them.
165
166 Functional usage example:
167
168 .. code-block:: python
169
170 X, y = get_features_targets(df, target_columns="measurement")
171
172 Method chaining example:
173
174 .. code-block:: python
175
176 df = pd.DataFrame(...)
177 target_cols = ['output1', 'output2']
178 X, y = jn.DataFrame(df).get_features_targets(target_columns=target_cols) # noqa: E501
179
180 :param df: The pandas DataFrame object.
181 :param str/iterable target_columns: Either a column name or an iterable
182 (list or tuple) of column names that are the target(s) to be predicted.
183 :param str/iterable feature_columns: (optional) The column name or iterable
184 of column names that are the features (a.k.a. predictors) used to
185 predict the targets.
186 :returns: (X, Y) the feature matrix (X) and the target matrix (Y). Both are
187 pandas DataFrames.
188 """
189 Y = df[target_columns]
190
191 if feature_columns:
192 X = df[feature_columns]
193 else:
194 if isinstance(target_columns, str):
195 xcols = [c for c in df.columns if target_columns != c]
196 elif (isinstance(target_columns, list)
197 or isinstance(target_columns, tuple)):
198 xcols = [c for c in df.columns if c not in target_columns]
199 X = df[xcols]
200 return X, Y
201
202
203 def rename_column(df, old, new):
204 """
205 Rename a column in place.
206
207 Functional usage example:
208
209 .. code-block:: python
210
211 df = rename_column("old_column_name", "new_column_name")
212
213 Method chaining example:
214
215 .. code-block:: python
216
217 df = pd.DataFrame(...)
218 df = jn.DataFrame(df).rename_column("old_column_name", "new_column_name") # noqa: E501
219
220 This is just syntactic sugar/a convenience function for renaming one column
221 at a time. If you are convinced that there are multiple columns in need of
222 changing, then use the :py:meth:`pandas.DataFrame.rename` method.
223
224 :param str old: The old column name.
225 :param str new: The new column name.
226 :returns: A pandas DataFrame.
227 """
228 return df.rename(columns={old: new})
229
230
231 def coalesce(df, columns, new_column_name):
232 """
233 Coalesces two or more columns of data in order of column names provided.
234
235 Functional usage example:
236
237 .. code-block:: python
238
239 df = coalesce(df, columns=['col1', 'col2'])
240
241 Method chaining example:
242
243 .. code-block:: python
244
245 df = pd.DataFrame(...)
246 df = jn.DataFrame(df).coalesce(['col1', 'col2'])
247
248
249 The result of this function is that we take the first non-null value across
250 rows.
251
252 This is more syntactic diabetes! For R users, this should look familiar to
253 `dplyr`'s `coalesce` function; for Python users, the interface
254 should be more intuitive than the :py:meth:`pandas.Series.combine_first`
255 method (which we're just using internally anyways).
256
257 :param df: A pandas DataFrame.
258 :param columns: A list of column names.
259 :param str new_column_name: The new column name after combining.
260 :returns: A pandas DataFrame.
261 """
262 series = [df[c] for c in columns]
263
264 def _coalesce(series1, series2):
265 return series1.combine_first(series2)
266 df = df.drop(columns=columns)
267 df[new_column_name] = reduce(_coalesce, series) # noqa: F821
268 return df
269
270
271 def convert_excel_date(df, column):
272 """
273 Convert Excel's serial date format into Python datetime format.
274
275 Implementation is also from `Stack Overflow`.
276
277 .. _Stack Overflow: https://stackoverflow.com/questions/38454403/convert-excel-style-date-with-pandas # noqa: E501
278
279 Functional usage example:
280
281 .. code-block:: python
282
283 df = convert_excel_date(df, column='date')
284
285 Method chaining example:
286
287 .. code-block:: python
288
289 df = pd.DataFrame(...)
290 df = jn.DataFrame(df).convert_excel_date('date')
291
292 :param df: A pandas DataFrame.
293 :param str column: A column name.
294 :returns: A pandas DataFrame with corrected dates.
295 """
296 df[column] = (pd.TimedeltaIndex(df[column], unit='d')
297 + dt.datetime(1899, 12, 30))
298 return df
299
300
301 def fill_empty(df, columns, value):
302 """
303 Fill `NaN` values in specified columns with a given value.
304
305 Super sugary syntax that wraps :py:meth:`pandas.DataFrame.fillna`.
306
307 Functional usage example:
308
309 .. code-block:: python
310
311 df = fill_empty(df, columns=['col1', 'col2'], value=0)
312
313 Method chaining example:
314
315 .. code-block:: python
316
317 df = pd.DataFrame(...)
318 df = jn.DataFrame(df).fill_empty(df, columns='col1', value=0)
319
320 :param df: A pandas DataFrame.
321 :param columns: Either a `str` or `list` or `tuple`. If a string is passed
322 in, then only that column will be filled; if a list or tuple of strings
323 are passed in, then they will all be filled with the same value.
324 :param value: The value that replaces the `NaN` values.
325 """
326 if isinstance(columns, list) or isinstance(columns, tuple):
327 for col in columns:
328 assert col in df.columns, \
329 JanitorError("{col} missing from dataframe columns!".format(col=col)) # noqa: E501
330 df[col] = df[col].fillna(value)
331 elif isinstance(columns, str):
332 df[columns] = df[columns].fillna(value)
333 else:
334 raise JanitorError('kwarg `columns` must be a string or iterable!')
335
336 return df
337
[end of janitor/functions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/janitor/functions.py b/janitor/functions.py
--- a/janitor/functions.py
+++ b/janitor/functions.py
@@ -31,26 +31,22 @@
:param df: The pandas DataFrame object.
:returns: A pandas DataFrame.
"""
- columns = [(c.lower()
- .replace(' ', '_')
- .replace('/', '_')
- .replace(':', '_')
- .replace("'", '')
- .replace('’', '')
- .replace(',', '_')
- .replace('?', '_')
- .replace('-', '_')
- .replace('(', '_')
- .replace(')', '_')
- .replace('.', '_')
- ) for c in df.columns]
-
- newcolumns = []
- for col in columns:
- # NOTE: Replace repeating underscores with single ones
- newcol = re.sub('[_]{2,}', '_', col)
- newcolumns.append(newcol)
- df.columns = newcolumns
+ df = df.rename(
+ columns=lambda x: x.lower()
+ .replace(' ', '_')
+ .replace('/', '_')
+ .replace(':', '_')
+ .replace("'", '')
+ .replace('’', '')
+ .replace(',', '_')
+ .replace('?', '_')
+ .replace('-', '_')
+ .replace('(', '_')
+ .replace(')', '_')
+ .replace('.', '_')
+ )
+
+ df = df.rename(columns=lambda x: re.sub('_+', '_', x))
return df
| {"golden_diff": "diff --git a/janitor/functions.py b/janitor/functions.py\n--- a/janitor/functions.py\n+++ b/janitor/functions.py\n@@ -31,26 +31,22 @@\n :param df: The pandas DataFrame object.\n :returns: A pandas DataFrame.\n \"\"\"\n- columns = [(c.lower()\n- .replace(' ', '_')\n- .replace('/', '_')\n- .replace(':', '_')\n- .replace(\"'\", '')\n- .replace('\u2019', '')\n- .replace(',', '_')\n- .replace('?', '_')\n- .replace('-', '_')\n- .replace('(', '_')\n- .replace(')', '_')\n- .replace('.', '_')\n- ) for c in df.columns]\n-\n- newcolumns = []\n- for col in columns:\n- # NOTE: Replace repeating underscores with single ones\n- newcol = re.sub('[_]{2,}', '_', col)\n- newcolumns.append(newcol)\n- df.columns = newcolumns\n+ df = df.rename(\n+ columns=lambda x: x.lower()\n+ .replace(' ', '_')\n+ .replace('/', '_')\n+ .replace(':', '_')\n+ .replace(\"'\", '')\n+ .replace('\u2019', '')\n+ .replace(',', '_')\n+ .replace('?', '_')\n+ .replace('-', '_')\n+ .replace('(', '_')\n+ .replace(')', '_')\n+ .replace('.', '_')\n+ )\n+\n+ df = df.rename(columns=lambda x: re.sub('_+', '_', x))\n return df\n", "issue": "clean_names breaks for MultiIndex Columns\nMultiIndex columns returns `expected str not tuple` type error.\r\n\r\nIf you instead used,\r\n`df.rename(columns=lambda x: x.lower().replace('_', ''))`\r\nthis would work for standard and MultiIndex DataFrames.\r\n\r\nCan do PR if required.\n", "before_files": [{"content": "import datetime as dt\nfrom functools import reduce\n\nimport pandas as pd\n\nfrom .errors import JanitorError\n\nimport re\n\n\ndef clean_names(df):\n \"\"\"\n Clean column names.\n\n Takes all column names, converts them to lowercase, then replaces all\n spaces with underscores.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = clean_names(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).clean_names()\n\n :param df: The pandas DataFrame object.\n :returns: A pandas DataFrame.\n \"\"\"\n columns = [(c.lower()\n .replace(' ', '_')\n .replace('/', '_')\n .replace(':', '_')\n .replace(\"'\", '')\n .replace('\u2019', '')\n .replace(',', '_')\n .replace('?', '_')\n .replace('-', '_')\n .replace('(', '_')\n .replace(')', '_')\n .replace('.', '_')\n ) for c in df.columns]\n\n newcolumns = []\n for col in columns:\n # NOTE: Replace repeating underscores with single ones\n newcol = re.sub('[_]{2,}', '_', col)\n newcolumns.append(newcol)\n df.columns = newcolumns\n return df\n\n\ndef remove_empty(df):\n \"\"\"\n Drop all rows and columns that are completely null.\n\n Implementation is shamelessly copied from `StackOverflow`_.\n\n .. _StackOverflow: https://stackoverflow.com/questions/38884538/python-pandas-find-all-rows-where-all-values-are-nan # noqa: E501\n\n Functional usage example:\n\n .. code-block:: python\n\n df = remove_empty(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).remove_empty()\n\n :param df: The pandas DataFrame object.\n :returns: A pandas DataFrame.\n \"\"\"\n\n nanrows = df.index[df.isnull().all(axis=1)]\n df.drop(index=nanrows, inplace=True)\n\n nancols = df.columns[df.isnull().all(axis=0)]\n df.drop(columns=nancols, inplace=True)\n\n return df\n\n\ndef get_dupes(df, columns=None):\n \"\"\"\n Returns all duplicate rows.\n\n Functional usage example:\n\n .. code-block:: python\n\n get_dupes(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n jn.DataFrame(df).get_dupes()\n\n :param df: The pandas DataFrame object.\n :param str/iterable columns: (optional) A column name or an iterable (list\n or tuple) of column names. Following pandas API, this only considers\n certain columns for identifying duplicates. Defaults to using all\n columns.\n :returns: The duplicate rows, as a pandas DataFrame.\n \"\"\"\n dupes = df.duplicated(subset=columns, keep=False)\n return df[dupes == True] # noqa: E712\n\n\ndef encode_categorical(df, columns):\n \"\"\"\n Encode the specified columns as categorical.\n\n Functional usage example:\n\n .. code-block:: python\n\n encode_categorical(df, columns=\"my_categorical_column\") # one way\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n categorical_cols = ['col1', 'col2', 'col4']\n jn.DataFrame(df).encode_categorical(columns=categorical_cols)\n\n :param df: The pandas DataFrame object.\n :param str/iterable columns: A column name or an iterable (list or tuple)\n of column names.\n :returns: A pandas DataFrame\n \"\"\"\n if isinstance(columns, list) or isinstance(columns, tuple):\n for col in columns:\n assert col in df.columns, \\\n JanitorError(\"{col} missing from dataframe columns!\".format(col=col)) # noqa: E501\n df[col] = pd.Categorical(df[col])\n elif isinstance(columns, str):\n df[columns] = pd.Categorical(df[columns])\n else:\n raise JanitorError('kwarg `columns` must be a string or iterable!')\n return df\n\n\ndef get_features_targets(df, target_columns, feature_columns=None):\n \"\"\"\n Get the features and targets as separate DataFrames/Series.\n\n The behaviour is as such:\n\n - `target_columns` is mandatory.\n - If `feature_columns` is present, then we will respect the column names\n inside there.\n - If `feature_columns` is not passed in, then we will assume that the\n rest of the columns are feature columns, and return them.\n\n Functional usage example:\n\n .. code-block:: python\n\n X, y = get_features_targets(df, target_columns=\"measurement\")\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n target_cols = ['output1', 'output2']\n X, y = jn.DataFrame(df).get_features_targets(target_columns=target_cols) # noqa: E501\n\n :param df: The pandas DataFrame object.\n :param str/iterable target_columns: Either a column name or an iterable\n (list or tuple) of column names that are the target(s) to be predicted.\n :param str/iterable feature_columns: (optional) The column name or iterable\n of column names that are the features (a.k.a. predictors) used to\n predict the targets.\n :returns: (X, Y) the feature matrix (X) and the target matrix (Y). Both are\n pandas DataFrames.\n \"\"\"\n Y = df[target_columns]\n\n if feature_columns:\n X = df[feature_columns]\n else:\n if isinstance(target_columns, str):\n xcols = [c for c in df.columns if target_columns != c]\n elif (isinstance(target_columns, list)\n or isinstance(target_columns, tuple)):\n xcols = [c for c in df.columns if c not in target_columns]\n X = df[xcols]\n return X, Y\n\n\ndef rename_column(df, old, new):\n \"\"\"\n Rename a column in place.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = rename_column(\"old_column_name\", \"new_column_name\")\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).rename_column(\"old_column_name\", \"new_column_name\") # noqa: E501\n\n This is just syntactic sugar/a convenience function for renaming one column\n at a time. If you are convinced that there are multiple columns in need of\n changing, then use the :py:meth:`pandas.DataFrame.rename` method.\n\n :param str old: The old column name.\n :param str new: The new column name.\n :returns: A pandas DataFrame.\n \"\"\"\n return df.rename(columns={old: new})\n\n\ndef coalesce(df, columns, new_column_name):\n \"\"\"\n Coalesces two or more columns of data in order of column names provided.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = coalesce(df, columns=['col1', 'col2'])\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).coalesce(['col1', 'col2'])\n\n\n The result of this function is that we take the first non-null value across\n rows.\n\n This is more syntactic diabetes! For R users, this should look familiar to\n `dplyr`'s `coalesce` function; for Python users, the interface\n should be more intuitive than the :py:meth:`pandas.Series.combine_first`\n method (which we're just using internally anyways).\n\n :param df: A pandas DataFrame.\n :param columns: A list of column names.\n :param str new_column_name: The new column name after combining.\n :returns: A pandas DataFrame.\n \"\"\"\n series = [df[c] for c in columns]\n\n def _coalesce(series1, series2):\n return series1.combine_first(series2)\n df = df.drop(columns=columns)\n df[new_column_name] = reduce(_coalesce, series) # noqa: F821\n return df\n\n\ndef convert_excel_date(df, column):\n \"\"\"\n Convert Excel's serial date format into Python datetime format.\n\n Implementation is also from `Stack Overflow`.\n\n .. _Stack Overflow: https://stackoverflow.com/questions/38454403/convert-excel-style-date-with-pandas # noqa: E501\n\n Functional usage example:\n\n .. code-block:: python\n\n df = convert_excel_date(df, column='date')\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).convert_excel_date('date')\n\n :param df: A pandas DataFrame.\n :param str column: A column name.\n :returns: A pandas DataFrame with corrected dates.\n \"\"\"\n df[column] = (pd.TimedeltaIndex(df[column], unit='d')\n + dt.datetime(1899, 12, 30))\n return df\n\n\ndef fill_empty(df, columns, value):\n \"\"\"\n Fill `NaN` values in specified columns with a given value.\n\n Super sugary syntax that wraps :py:meth:`pandas.DataFrame.fillna`.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = fill_empty(df, columns=['col1', 'col2'], value=0)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).fill_empty(df, columns='col1', value=0)\n\n :param df: A pandas DataFrame.\n :param columns: Either a `str` or `list` or `tuple`. If a string is passed\n in, then only that column will be filled; if a list or tuple of strings\n are passed in, then they will all be filled with the same value.\n :param value: The value that replaces the `NaN` values.\n \"\"\"\n if isinstance(columns, list) or isinstance(columns, tuple):\n for col in columns:\n assert col in df.columns, \\\n JanitorError(\"{col} missing from dataframe columns!\".format(col=col)) # noqa: E501\n df[col] = df[col].fillna(value)\n elif isinstance(columns, str):\n df[columns] = df[columns].fillna(value)\n else:\n raise JanitorError('kwarg `columns` must be a string or iterable!')\n\n return df\n", "path": "janitor/functions.py"}]} | 3,867 | 344 |
gh_patches_debug_13860 | rasdani/github-patches | git_diff | pre-commit__pre-commit-83 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pre-commit crashes when running during unresolved merge conflict
I intentionally forced the following by making two branches conflict and then editing a file on that branch. `pre-commit` should fail-fast in a merge conflict situation.
```
$ git diff --exit-code
diff --cc foo.txt
index 8ff26e7,c148433..0000000
--- a/foo.txt
+++ b/foo.txt
@@@ -1,4 -1,5 +1,11 @@@
asdf
++<<<<<<< HEAD
+fdsa
+yeah
+yeah
++=======
+ asdf
+ asdf
+ asdf
+
++>>>>>>> derp
diff --git a/git_code_debt/generate.py b/git_code_debt/generate.py
index 12ceec6..967506e 100644
--- a/git_code_debt/generate.py
+++ b/git_code_debt/generate.py
@@ -12,6 +12,7 @@ from git_code_debt.logic import get_previous_sha
from git_code_debt.logic import insert_metric_values
from git_code_debt.repo_parser import RepoParser
+
def get_metrics(diff, metric_parsers):
def get_all_metrics(file_diff_stats):
for metric_parser_cls in metric_parsers:
(py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ echo $?
1
(py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ pre-commit
[WARNING] Unstaged files detected.
[INFO] Stashing unstaged files to /tmp/git-code-debt/.pre-commit-files/patch1397455577.
Traceback (most recent call last):
File "/tmp/git-code-debt/py_env/bin/pre-commit", line 9, in <module>
load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()
File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/util.py", line 52, in wrapper
return func(argv)
File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/run.py", line 59, in run
return commands.run(runner, args)
File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/commands.py", line 242, in run
with ctx:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/staged_files_only.py", line 35, in staged_files_only
cmd_runner.run(['git', 'checkout', '--', '.'])
File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 77, in run
returncode, replaced_cmd, retcode, output=(stdout, stderr),
pre_commit.prefixed_command_runner.CalledProcessError: Command: ['git', 'checkout', '--', '.']
Return code: 1
Expected return code: 0
Output: (u'', u"error: path 'foo.txt' is unmerged\n")
(py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$
```
</issue>
<code>
[start of pre_commit/commands.py]
1 from __future__ import print_function
2
3 import logging
4 import os
5 import pkg_resources
6 import shutil
7 import stat
8 import subprocess
9 import sys
10 from asottile.ordereddict import OrderedDict
11 from asottile.yaml import ordered_dump
12 from asottile.yaml import ordered_load
13 from plumbum import local
14
15 import pre_commit.constants as C
16 from pre_commit import git
17 from pre_commit import color
18 from pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA
19 from pre_commit.clientlib.validate_config import load_config
20 from pre_commit.jsonschema_extensions import remove_defaults
21 from pre_commit.logging_handler import LoggingHandler
22 from pre_commit.repository import Repository
23 from pre_commit.staged_files_only import staged_files_only
24 from pre_commit.util import noop_context
25
26
27 logger = logging.getLogger('pre_commit')
28
29 COLS = int(subprocess.Popen(['tput', 'cols'], stdout=subprocess.PIPE).communicate()[0])
30
31 PASS_FAIL_LENGTH = 6
32
33
34 def install(runner):
35 """Install the pre-commit hooks."""
36 pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')
37 with open(runner.pre_commit_path, 'w') as pre_commit_file_obj:
38 pre_commit_file_obj.write(open(pre_commit_file).read())
39
40 original_mode = os.stat(runner.pre_commit_path).st_mode
41 os.chmod(
42 runner.pre_commit_path,
43 original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,
44 )
45
46 print('pre-commit installed at {0}'.format(runner.pre_commit_path))
47
48 return 0
49
50
51 def uninstall(runner):
52 """Uninstall the pre-commit hooks."""
53 if os.path.exists(runner.pre_commit_path):
54 os.remove(runner.pre_commit_path)
55 print('pre-commit uninstalled')
56 return 0
57
58
59 class RepositoryCannotBeUpdatedError(RuntimeError):
60 pass
61
62
63 def _update_repository(repo_config):
64 """Updates a repository to the tip of `master`. If the repository cannot
65 be updated because a hook that is configured does not exist in `master`,
66 this raises a RepositoryCannotBeUpdatedError
67
68 Args:
69 repo_config - A config for a repository
70 """
71 repo = Repository(repo_config)
72
73 with repo.in_checkout():
74 local['git']['fetch']()
75 head_sha = local['git']['rev-parse', 'origin/master']().strip()
76
77 # Don't bother trying to update if our sha is the same
78 if head_sha == repo_config['sha']:
79 return repo_config
80
81 # Construct a new config with the head sha
82 new_config = OrderedDict(repo_config)
83 new_config['sha'] = head_sha
84 new_repo = Repository(new_config)
85
86 # See if any of our hooks were deleted with the new commits
87 hooks = set(repo.hooks.keys())
88 hooks_missing = hooks - (hooks & set(new_repo.manifest.keys()))
89 if hooks_missing:
90 raise RepositoryCannotBeUpdatedError(
91 'Cannot update because the tip of master is missing these hooks:\n'
92 '{0}'.format(', '.join(sorted(hooks_missing)))
93 )
94
95 return remove_defaults([new_config], CONFIG_JSON_SCHEMA)[0]
96
97
98 def autoupdate(runner):
99 """Auto-update the pre-commit config to the latest versions of repos."""
100 retv = 0
101 output_configs = []
102 changed = False
103
104 input_configs = load_config(
105 runner.config_file_path,
106 load_strategy=ordered_load,
107 )
108
109 for repo_config in input_configs:
110 print('Updating {0}...'.format(repo_config['repo']), end='')
111 try:
112 new_repo_config = _update_repository(repo_config)
113 except RepositoryCannotBeUpdatedError as error:
114 print(error.args[0])
115 output_configs.append(repo_config)
116 retv = 1
117 continue
118
119 if new_repo_config['sha'] != repo_config['sha']:
120 changed = True
121 print(
122 'updating {0} -> {1}.'.format(
123 repo_config['sha'], new_repo_config['sha'],
124 )
125 )
126 output_configs.append(new_repo_config)
127 else:
128 print('already up to date.')
129 output_configs.append(repo_config)
130
131 if changed:
132 with open(runner.config_file_path, 'w') as config_file:
133 config_file.write(
134 ordered_dump(output_configs, **C.YAML_DUMP_KWARGS)
135 )
136
137 return retv
138
139
140 def clean(runner):
141 if os.path.exists(runner.hooks_workspace_path):
142 shutil.rmtree(runner.hooks_workspace_path)
143 print('Cleaned {0}.'.format(runner.hooks_workspace_path))
144 return 0
145
146
147 def _run_single_hook(runner, repository, hook_id, args, write):
148 if args.all_files:
149 get_filenames = git.get_all_files_matching
150 else:
151 get_filenames = git.get_staged_files_matching
152
153 hook = repository.hooks[hook_id]
154
155 filenames = get_filenames(hook['files'], hook['exclude'])
156 if not filenames:
157 no_files_msg = '(no files to check) '
158 skipped_msg = 'Skipped'
159 write(
160 '{0}{1}{2}{3}\n'.format(
161 hook['name'],
162 '.' * (
163 COLS -
164 len(hook['name']) -
165 len(no_files_msg) -
166 len(skipped_msg) -
167 6
168 ),
169 no_files_msg,
170 color.format_color(skipped_msg, color.TURQUOISE, args.color),
171 )
172 )
173 return 0
174
175 # Print the hook and the dots first in case the hook takes hella long to
176 # run.
177 write(
178 '{0}{1}'.format(
179 hook['name'],
180 '.' * (COLS - len(hook['name']) - PASS_FAIL_LENGTH - 6),
181 ),
182 )
183 sys.stdout.flush()
184
185 retcode, stdout, stderr = repository.run_hook(
186 runner.cmd_runner,
187 hook_id,
188 filenames,
189 )
190
191 if retcode != repository.hooks[hook_id]['expected_return_value']:
192 retcode = 1
193 print_color = color.RED
194 pass_fail = 'Failed'
195 else:
196 retcode = 0
197 print_color = color.GREEN
198 pass_fail = 'Passed'
199
200 write(color.format_color(pass_fail, print_color, args.color) + '\n')
201
202 if (stdout or stderr) and (retcode or args.verbose):
203 write('\n')
204 for output in (stdout, stderr):
205 if output.strip():
206 write(output.strip() + '\n')
207 write('\n')
208
209 return retcode
210
211
212 def _run_hooks(runner, args, write):
213 """Actually run the hooks."""
214 retval = 0
215
216 for repo in runner.repositories:
217 for hook_id in repo.hooks:
218 retval |= _run_single_hook(runner, repo, hook_id, args, write=write)
219
220 return retval
221
222
223 def _run_hook(runner, hook_id, args, write):
224 for repo in runner.repositories:
225 if hook_id in repo.hooks:
226 return _run_single_hook(runner, repo, hook_id, args, write=write)
227 else:
228 write('No hook with id `{0}`\n'.format(hook_id))
229 return 1
230
231
232 def run(runner, args, write=sys.stdout.write):
233 # Set up our logging handler
234 logger.addHandler(LoggingHandler(args.color, write=write))
235 logger.setLevel(logging.INFO)
236
237 if args.no_stash or args.all_files:
238 ctx = noop_context()
239 else:
240 ctx = staged_files_only(runner.cmd_runner)
241
242 with ctx:
243 if args.hook:
244 return _run_hook(runner, args.hook, args, write=write)
245 else:
246 return _run_hooks(runner, args, write=write)
247
[end of pre_commit/commands.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/commands.py b/pre_commit/commands.py
--- a/pre_commit/commands.py
+++ b/pre_commit/commands.py
@@ -229,11 +229,21 @@
return 1
+def _has_unmerged_paths(runner):
+ _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])
+ return bool(stdout.strip())
+
+
def run(runner, args, write=sys.stdout.write):
# Set up our logging handler
logger.addHandler(LoggingHandler(args.color, write=write))
logger.setLevel(logging.INFO)
+ # Check if we have unresolved merge conflict files and fail fast.
+ if _has_unmerged_paths(runner):
+ logger.error('Unmerged files. Resolve before committing.')
+ return 1
+
if args.no_stash or args.all_files:
ctx = noop_context()
else:
| {"golden_diff": "diff --git a/pre_commit/commands.py b/pre_commit/commands.py\n--- a/pre_commit/commands.py\n+++ b/pre_commit/commands.py\n@@ -229,11 +229,21 @@\n return 1\n \n \n+def _has_unmerged_paths(runner):\n+ _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n+ return bool(stdout.strip())\n+\n+\n def run(runner, args, write=sys.stdout.write):\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n \n+ # Check if we have unresolved merge conflict files and fail fast.\n+ if _has_unmerged_paths(runner):\n+ logger.error('Unmerged files. Resolve before committing.')\n+ return 1\n+\n if args.no_stash or args.all_files:\n ctx = noop_context()\n else:\n", "issue": "pre-commit crashes when running during unresolved merge conflict\nI intentionally forced the following by making two branches conflict and then editing a file on that branch. `pre-commit` should fail-fast in a merge conflict situation.\n\n```\n$ git diff --exit-code\ndiff --cc foo.txt\nindex 8ff26e7,c148433..0000000\n--- a/foo.txt\n+++ b/foo.txt\n@@@ -1,4 -1,5 +1,11 @@@\n asdf\n++<<<<<<< HEAD\n +fdsa\n +yeah\n +yeah\n++=======\n+ asdf\n+ asdf\n+ asdf\n+ \n++>>>>>>> derp\ndiff --git a/git_code_debt/generate.py b/git_code_debt/generate.py\nindex 12ceec6..967506e 100644\n--- a/git_code_debt/generate.py\n+++ b/git_code_debt/generate.py\n@@ -12,6 +12,7 @@ from git_code_debt.logic import get_previous_sha\n from git_code_debt.logic import insert_metric_values\n from git_code_debt.repo_parser import RepoParser\n\n+\n def get_metrics(diff, metric_parsers):\n def get_all_metrics(file_diff_stats):\n for metric_parser_cls in metric_parsers:\n(py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ echo $?\n1\n(py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ pre-commit \n[WARNING] Unstaged files detected.\n[INFO] Stashing unstaged files to /tmp/git-code-debt/.pre-commit-files/patch1397455577.\nTraceback (most recent call last):\n File \"/tmp/git-code-debt/py_env/bin/pre-commit\", line 9, in <module>\n load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()\n File \"/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/util.py\", line 52, in wrapper\n return func(argv)\n File \"/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/run.py\", line 59, in run\n return commands.run(runner, args)\n File \"/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/commands.py\", line 242, in run\n with ctx:\n File \"/usr/lib/python2.7/contextlib.py\", line 17, in __enter__\n return self.gen.next()\n File \"/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/staged_files_only.py\", line 35, in staged_files_only\n cmd_runner.run(['git', 'checkout', '--', '.'])\n File \"/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py\", line 77, in run\n returncode, replaced_cmd, retcode, output=(stdout, stderr),\npre_commit.prefixed_command_runner.CalledProcessError: Command: ['git', 'checkout', '--', '.']\nReturn code: 1\nExpected return code: 0\nOutput: (u'', u\"error: path 'foo.txt' is unmerged\\n\")\n\n(py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ \n```\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport logging\nimport os\nimport pkg_resources\nimport shutil\nimport stat\nimport subprocess\nimport sys\nfrom asottile.ordereddict import OrderedDict\nfrom asottile.yaml import ordered_dump\nfrom asottile.yaml import ordered_load\nfrom plumbum import local\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit import color\nfrom pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA\nfrom pre_commit.clientlib.validate_config import load_config\nfrom pre_commit.jsonschema_extensions import remove_defaults\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.repository import Repository\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\nCOLS = int(subprocess.Popen(['tput', 'cols'], stdout=subprocess.PIPE).communicate()[0])\n\nPASS_FAIL_LENGTH = 6\n\n\ndef install(runner):\n \"\"\"Install the pre-commit hooks.\"\"\"\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n with open(runner.pre_commit_path, 'w') as pre_commit_file_obj:\n pre_commit_file_obj.write(open(pre_commit_file).read())\n\n original_mode = os.stat(runner.pre_commit_path).st_mode\n os.chmod(\n runner.pre_commit_path,\n original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n )\n\n print('pre-commit installed at {0}'.format(runner.pre_commit_path))\n\n return 0\n\n\ndef uninstall(runner):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n if os.path.exists(runner.pre_commit_path):\n os.remove(runner.pre_commit_path)\n print('pre-commit uninstalled')\n return 0\n\n\nclass RepositoryCannotBeUpdatedError(RuntimeError):\n pass\n\n\ndef _update_repository(repo_config):\n \"\"\"Updates a repository to the tip of `master`. If the repository cannot\n be updated because a hook that is configured does not exist in `master`,\n this raises a RepositoryCannotBeUpdatedError\n\n Args:\n repo_config - A config for a repository\n \"\"\"\n repo = Repository(repo_config)\n\n with repo.in_checkout():\n local['git']['fetch']()\n head_sha = local['git']['rev-parse', 'origin/master']().strip()\n\n # Don't bother trying to update if our sha is the same\n if head_sha == repo_config['sha']:\n return repo_config\n\n # Construct a new config with the head sha\n new_config = OrderedDict(repo_config)\n new_config['sha'] = head_sha\n new_repo = Repository(new_config)\n\n # See if any of our hooks were deleted with the new commits\n hooks = set(repo.hooks.keys())\n hooks_missing = hooks - (hooks & set(new_repo.manifest.keys()))\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n 'Cannot update because the tip of master is missing these hooks:\\n'\n '{0}'.format(', '.join(sorted(hooks_missing)))\n )\n\n return remove_defaults([new_config], CONFIG_JSON_SCHEMA)[0]\n\n\ndef autoupdate(runner):\n \"\"\"Auto-update the pre-commit config to the latest versions of repos.\"\"\"\n retv = 0\n output_configs = []\n changed = False\n\n input_configs = load_config(\n runner.config_file_path,\n load_strategy=ordered_load,\n )\n\n for repo_config in input_configs:\n print('Updating {0}...'.format(repo_config['repo']), end='')\n try:\n new_repo_config = _update_repository(repo_config)\n except RepositoryCannotBeUpdatedError as error:\n print(error.args[0])\n output_configs.append(repo_config)\n retv = 1\n continue\n\n if new_repo_config['sha'] != repo_config['sha']:\n changed = True\n print(\n 'updating {0} -> {1}.'.format(\n repo_config['sha'], new_repo_config['sha'],\n )\n )\n output_configs.append(new_repo_config)\n else:\n print('already up to date.')\n output_configs.append(repo_config)\n\n if changed:\n with open(runner.config_file_path, 'w') as config_file:\n config_file.write(\n ordered_dump(output_configs, **C.YAML_DUMP_KWARGS)\n )\n\n return retv\n\n\ndef clean(runner):\n if os.path.exists(runner.hooks_workspace_path):\n shutil.rmtree(runner.hooks_workspace_path)\n print('Cleaned {0}.'.format(runner.hooks_workspace_path))\n return 0\n\n\ndef _run_single_hook(runner, repository, hook_id, args, write):\n if args.all_files:\n get_filenames = git.get_all_files_matching\n else:\n get_filenames = git.get_staged_files_matching\n\n hook = repository.hooks[hook_id]\n\n filenames = get_filenames(hook['files'], hook['exclude'])\n if not filenames:\n no_files_msg = '(no files to check) '\n skipped_msg = 'Skipped'\n write(\n '{0}{1}{2}{3}\\n'.format(\n hook['name'],\n '.' * (\n COLS -\n len(hook['name']) -\n len(no_files_msg) -\n len(skipped_msg) -\n 6\n ),\n no_files_msg,\n color.format_color(skipped_msg, color.TURQUOISE, args.color),\n )\n )\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n write(\n '{0}{1}'.format(\n hook['name'],\n '.' * (COLS - len(hook['name']) - PASS_FAIL_LENGTH - 6),\n ),\n )\n sys.stdout.flush()\n\n retcode, stdout, stderr = repository.run_hook(\n runner.cmd_runner,\n hook_id,\n filenames,\n )\n\n if retcode != repository.hooks[hook_id]['expected_return_value']:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n write(color.format_color(pass_fail, print_color, args.color) + '\\n')\n\n if (stdout or stderr) and (retcode or args.verbose):\n write('\\n')\n for output in (stdout, stderr):\n if output.strip():\n write(output.strip() + '\\n')\n write('\\n')\n\n return retcode\n\n\ndef _run_hooks(runner, args, write):\n \"\"\"Actually run the hooks.\"\"\"\n retval = 0\n\n for repo in runner.repositories:\n for hook_id in repo.hooks:\n retval |= _run_single_hook(runner, repo, hook_id, args, write=write)\n\n return retval\n\n\ndef _run_hook(runner, hook_id, args, write):\n for repo in runner.repositories:\n if hook_id in repo.hooks:\n return _run_single_hook(runner, repo, hook_id, args, write=write)\n else:\n write('No hook with id `{0}`\\n'.format(hook_id))\n return 1\n\n\ndef run(runner, args, write=sys.stdout.write):\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n\n if args.no_stash or args.all_files:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n if args.hook:\n return _run_hook(runner, args.hook, args, write=write)\n else:\n return _run_hooks(runner, args, write=write)\n", "path": "pre_commit/commands.py"}]} | 3,607 | 202 |
gh_patches_debug_26963 | rasdani/github-patches | git_diff | sktime__sktime-6439 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOC] Misleading docstring parameters for convert utility function of dask_to_pd and vice versa
#### Describe the issue linked to the documentation
The docstring of function `convert_dask_to_pandas` and `convert_pandas_to_dask` has been interchanged in terms of parameters and return object
```python
def convert_dask_to_pandas(obj):
"""Convert dask DataFrame to pandas DataFrame, preserving MultiIndex.
Parameters
----------
obj : pandas.DataFrame
Returns
-------
dask DataFrame
MultiIndex levels 0 .. -1 of X are converted to columns of name
__index__[indexname], where indexname is name of multiindex level,
or the integer index if the level has no name
other columns and column names are identical to those of X
"""
```
Here the type of parameter for the function should be `dask.DataFrame` while the return type should be `pandas.DataFrame`. There's similar issue in the docstring of `convert_pandas_to_dask`.
</issue>
<code>
[start of sktime/datatypes/_adapter/dask_to_pd.py]
1 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
2 """Converter utilities between dask and pandas, with multiindex convention.
3
4 Converts between:
5 pd.DataFrames with ordinary (single-level) index or pd.Multiindex, and
6 dask DataFrame
7
8 If pd.DataFrame has ordinary index, converts using dask compute/from_pandas
9
10 if pd.DataFrame has MultiIndex, converts and back-converts
11 MultiIndex columns to DataFrame columns with the name:
12 __index__[indexname], if level has a name indexname
13 __index__[index_iloc], if level has no indexname and is index_iloc-th level
14 index is replaced by a string index where tuples are replaced with str coerced elements
15 """
16 import pandas as pd
17
18 from sktime.datatypes._common import _req
19 from sktime.datatypes._common import _ret as ret
20
21
22 def _is_mi_col(x):
23 return isinstance(x, str) and x.startswith("__index__")
24
25
26 def get_mi_cols(obj):
27 """Get multiindex cols from a dask object.
28
29 Parameters
30 ----------
31 obj : dask DataFrame
32
33 Returns
34 -------
35 list of pandas index elements
36 all column index elements of obj that start with __index__
37 i.e., columns that are interpreted as multiindex columns in the correspondence
38 """
39 return [x for x in obj.columns if _is_mi_col(x)]
40
41
42 def convert_dask_to_pandas(obj):
43 """Convert dask DataFrame to pandas DataFrame, preserving MultiIndex.
44
45 Parameters
46 ----------
47 obj : pandas.DataFrame
48
49 Returns
50 -------
51 dask DataFrame
52 MultiIndex levels 0 .. -1 of X are converted to columns of name
53 __index__[indexname], where indexname is name of multiindex level,
54 or the integer index if the level has no name
55 other columns and column names are identical to those of X
56 """
57 obj = obj.compute()
58
59 def mi_name(x):
60 return x.split("__index__")[1]
61
62 def mi_names(names):
63 new_names = [mi_name(x) for x in names]
64 for i, name in enumerate(new_names):
65 if name == str(i):
66 new_names[i] = None
67 return new_names
68
69 multi_cols = get_mi_cols(obj)
70
71 # if has multi-index cols, move to pandas MultiIndex
72 if len(multi_cols) > 0:
73 obj = obj.set_index(multi_cols)
74
75 names = obj.index.names
76 new_names = mi_names(names)
77 new_names = new_names
78
79 obj.index.names = new_names
80
81 return obj
82
83
84 def convert_pandas_to_dask(obj, npartitions=1, chunksize=None, sort=True):
85 """Convert pandas DataFrame to dask DataFrame, preserving MultiIndex.
86
87 Parameters
88 ----------
89 obj : dask DataFrame
90 npartitions : int or None, optional, default = 1
91 npartitions passed to dask from_pandas when converting obj to dask
92 chunksize : int or None, optional, default = None
93 chunksize passed to dask from_pandas when converting obj to dask
94 sort : bool, optional, default = True
95 sort passed to dask from_pandas when converting obj to dask
96
97 Returns
98 -------
99 pandas.DataFrame
100 MultiIndex levels 0 .. -1 of X are converted to columns of name
101 __index__[indexname], where indexname is name of multiindex level,
102 or the integer index if the level has no name
103 other columns and column names are identical to those of X
104 """
105 from dask.dataframe import from_pandas
106
107 def dask_mi_names(names):
108 res = list(names).copy()
109 for i, name in enumerate(names):
110 if name is None:
111 res[i] = str(i)
112 return [f"__index__{x}" for x in res]
113
114 if isinstance(obj.index, pd.MultiIndex):
115 names = obj.index.names
116 new_names = dask_mi_names(names)
117 new_index = [str(x) for x in obj.index]
118
119 obj = obj.copy()
120 obj.index.names = new_names
121 obj = obj.reset_index()
122 obj.index = new_index
123
124 obj = from_pandas(obj, npartitions=npartitions, chunksize=chunksize, sort=sort)
125
126 return obj
127
128
129 def check_dask_frame(
130 obj, return_metadata=False, var_name="obj", freq_set_check=False, scitype="Series"
131 ):
132 """Check dask frame, generic for sktime check format."""
133 import dask
134
135 metadata = {}
136
137 if not isinstance(obj, dask.dataframe.core.DataFrame):
138 msg = f"{var_name} must be a dask DataFrame, found {type(obj)}"
139 return ret(False, msg, None, return_metadata)
140
141 # we now know obj is a dask DataFrame
142
143 index_cols = get_mi_cols(obj)
144
145 # check right number of cols depending on scitype
146 if scitype == "Series":
147 cols_msg = (
148 f"{var_name} must have exactly one index column, "
149 f"found {len(index_cols)}, namely: {index_cols}"
150 )
151 right_no_index_cols = len(index_cols) <= 1
152 elif scitype == "Panel":
153 cols_msg = (
154 f"{var_name} must have exactly two index columns, "
155 f"found {len(index_cols)}, namely: {index_cols}"
156 )
157 right_no_index_cols = len(index_cols) == 2
158 elif scitype == "Hierarchical":
159 cols_msg = (
160 f"{var_name} must have three or more index columns, "
161 f"found {len(index_cols)}, namely: {index_cols}"
162 )
163 right_no_index_cols = len(index_cols) >= 3
164 else:
165 return RuntimeError(
166 'scitype arg of check_dask_frame must be one of strings "Series", '
167 f'"Panel", or "Hierarchical", but found {scitype}'
168 )
169
170 if not right_no_index_cols:
171 # dask series should have at most one __index__ col
172 return ret(False, cols_msg, None, return_metadata)
173
174 if _req("is_empty", return_metadata):
175 metadata["is_empty"] = len(obj.index) < 1 or len(obj.columns) < 1
176 if _req("is_univariate", return_metadata):
177 metadata["is_univariate"] = len(obj.columns) == 1
178 if _req("n_features", return_metadata):
179 metadata["n_features"] = len(obj.columns)
180 if _req("feature_names", return_metadata):
181 metadata["feature_names"] = obj.columns.to_list()
182
183 # check that columns are unique
184 if not obj.columns.is_unique:
185 msg = f"{var_name} must have unique column indices, but found {obj.columns}"
186 return ret(False, msg, None, return_metadata)
187
188 # check whether the time index is of valid type
189 # if not is_in_valid_index_types(index):
190 # msg = (
191 # f"{type(index)} is not supported for {var_name}, use "
192 # f"one of {VALID_INDEX_TYPES} or integer index instead."
193 # )
194 # return ret(False, msg, None, return_metadata)
195
196 # Check time index is ordered in time
197 if not obj.index.is_monotonic_increasing.compute():
198 msg = (
199 f"The (time) index of {var_name} must be sorted "
200 f"monotonically increasing, but found: {obj.index}"
201 )
202 return ret(False, msg, None, return_metadata)
203
204 if freq_set_check and isinstance(obj.index, pd.DatetimeIndex):
205 if obj.index.freq is None:
206 msg = f"{var_name} has DatetimeIndex, but no freq attribute set."
207 return ret(False, msg, None, return_metadata)
208
209 # check whether index is equally spaced or if there are any nans
210 # compute only if needed
211 if _req("is_equally_spaced", return_metadata):
212 # todo: logic for equal spacing
213 metadata["is_equally_spaced"] = True
214 if _req("has_nans", return_metadata):
215 metadata["has_nans"] = obj.isnull().values.any().compute()
216
217 if scitype in ["Panel", "Hierarchical"]:
218 if _req("n_instances", return_metadata):
219 instance_cols = index_cols[:-1]
220 metadata["n_instances"] = len(obj[instance_cols].drop_duplicates())
221
222 if scitype in ["Hierarchical"]:
223 if _req("n_panels", return_metadata):
224 panel_cols = index_cols[:-2]
225 metadata["n_panels"] = len(obj[panel_cols].drop_duplicates())
226
227 return ret(True, None, metadata, return_metadata)
228
[end of sktime/datatypes/_adapter/dask_to_pd.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sktime/datatypes/_adapter/dask_to_pd.py b/sktime/datatypes/_adapter/dask_to_pd.py
--- a/sktime/datatypes/_adapter/dask_to_pd.py
+++ b/sktime/datatypes/_adapter/dask_to_pd.py
@@ -44,11 +44,11 @@
Parameters
----------
- obj : pandas.DataFrame
+ obj : dask DataFrame
Returns
-------
- dask DataFrame
+ pandas.DataFrame
MultiIndex levels 0 .. -1 of X are converted to columns of name
__index__[indexname], where indexname is name of multiindex level,
or the integer index if the level has no name
@@ -86,7 +86,7 @@
Parameters
----------
- obj : dask DataFrame
+ obj : pandas.DataFrame
npartitions : int or None, optional, default = 1
npartitions passed to dask from_pandas when converting obj to dask
chunksize : int or None, optional, default = None
@@ -96,7 +96,7 @@
Returns
-------
- pandas.DataFrame
+ dask DataFrame
MultiIndex levels 0 .. -1 of X are converted to columns of name
__index__[indexname], where indexname is name of multiindex level,
or the integer index if the level has no name
| {"golden_diff": "diff --git a/sktime/datatypes/_adapter/dask_to_pd.py b/sktime/datatypes/_adapter/dask_to_pd.py\n--- a/sktime/datatypes/_adapter/dask_to_pd.py\n+++ b/sktime/datatypes/_adapter/dask_to_pd.py\n@@ -44,11 +44,11 @@\n \n Parameters\n ----------\n- obj : pandas.DataFrame\n+ obj : dask DataFrame\n \n Returns\n -------\n- dask DataFrame\n+ pandas.DataFrame\n MultiIndex levels 0 .. -1 of X are converted to columns of name\n __index__[indexname], where indexname is name of multiindex level,\n or the integer index if the level has no name\n@@ -86,7 +86,7 @@\n \n Parameters\n ----------\n- obj : dask DataFrame\n+ obj : pandas.DataFrame\n npartitions : int or None, optional, default = 1\n npartitions passed to dask from_pandas when converting obj to dask\n chunksize : int or None, optional, default = None\n@@ -96,7 +96,7 @@\n \n Returns\n -------\n- pandas.DataFrame\n+ dask DataFrame\n MultiIndex levels 0 .. -1 of X are converted to columns of name\n __index__[indexname], where indexname is name of multiindex level,\n or the integer index if the level has no name\n", "issue": "[DOC] Misleading docstring parameters for convert utility function of dask_to_pd and vice versa\n#### Describe the issue linked to the documentation\r\nThe docstring of function `convert_dask_to_pandas` and `convert_pandas_to_dask` has been interchanged in terms of parameters and return object\r\n\r\n```python\r\ndef convert_dask_to_pandas(obj):\r\n \"\"\"Convert dask DataFrame to pandas DataFrame, preserving MultiIndex.\r\n\r\n Parameters\r\n ----------\r\n obj : pandas.DataFrame\r\n\r\n Returns\r\n -------\r\n dask DataFrame\r\n MultiIndex levels 0 .. -1 of X are converted to columns of name\r\n __index__[indexname], where indexname is name of multiindex level,\r\n or the integer index if the level has no name\r\n other columns and column names are identical to those of X\r\n \"\"\"\r\n```\r\n\r\nHere the type of parameter for the function should be `dask.DataFrame` while the return type should be `pandas.DataFrame`. There's similar issue in the docstring of `convert_pandas_to_dask`.\n", "before_files": [{"content": "# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Converter utilities between dask and pandas, with multiindex convention.\n\nConverts between:\npd.DataFrames with ordinary (single-level) index or pd.Multiindex, and\ndask DataFrame\n\nIf pd.DataFrame has ordinary index, converts using dask compute/from_pandas\n\nif pd.DataFrame has MultiIndex, converts and back-converts\nMultiIndex columns to DataFrame columns with the name:\n __index__[indexname], if level has a name indexname\n __index__[index_iloc], if level has no indexname and is index_iloc-th level\nindex is replaced by a string index where tuples are replaced with str coerced elements\n\"\"\"\nimport pandas as pd\n\nfrom sktime.datatypes._common import _req\nfrom sktime.datatypes._common import _ret as ret\n\n\ndef _is_mi_col(x):\n return isinstance(x, str) and x.startswith(\"__index__\")\n\n\ndef get_mi_cols(obj):\n \"\"\"Get multiindex cols from a dask object.\n\n Parameters\n ----------\n obj : dask DataFrame\n\n Returns\n -------\n list of pandas index elements\n all column index elements of obj that start with __index__\n i.e., columns that are interpreted as multiindex columns in the correspondence\n \"\"\"\n return [x for x in obj.columns if _is_mi_col(x)]\n\n\ndef convert_dask_to_pandas(obj):\n \"\"\"Convert dask DataFrame to pandas DataFrame, preserving MultiIndex.\n\n Parameters\n ----------\n obj : pandas.DataFrame\n\n Returns\n -------\n dask DataFrame\n MultiIndex levels 0 .. -1 of X are converted to columns of name\n __index__[indexname], where indexname is name of multiindex level,\n or the integer index if the level has no name\n other columns and column names are identical to those of X\n \"\"\"\n obj = obj.compute()\n\n def mi_name(x):\n return x.split(\"__index__\")[1]\n\n def mi_names(names):\n new_names = [mi_name(x) for x in names]\n for i, name in enumerate(new_names):\n if name == str(i):\n new_names[i] = None\n return new_names\n\n multi_cols = get_mi_cols(obj)\n\n # if has multi-index cols, move to pandas MultiIndex\n if len(multi_cols) > 0:\n obj = obj.set_index(multi_cols)\n\n names = obj.index.names\n new_names = mi_names(names)\n new_names = new_names\n\n obj.index.names = new_names\n\n return obj\n\n\ndef convert_pandas_to_dask(obj, npartitions=1, chunksize=None, sort=True):\n \"\"\"Convert pandas DataFrame to dask DataFrame, preserving MultiIndex.\n\n Parameters\n ----------\n obj : dask DataFrame\n npartitions : int or None, optional, default = 1\n npartitions passed to dask from_pandas when converting obj to dask\n chunksize : int or None, optional, default = None\n chunksize passed to dask from_pandas when converting obj to dask\n sort : bool, optional, default = True\n sort passed to dask from_pandas when converting obj to dask\n\n Returns\n -------\n pandas.DataFrame\n MultiIndex levels 0 .. -1 of X are converted to columns of name\n __index__[indexname], where indexname is name of multiindex level,\n or the integer index if the level has no name\n other columns and column names are identical to those of X\n \"\"\"\n from dask.dataframe import from_pandas\n\n def dask_mi_names(names):\n res = list(names).copy()\n for i, name in enumerate(names):\n if name is None:\n res[i] = str(i)\n return [f\"__index__{x}\" for x in res]\n\n if isinstance(obj.index, pd.MultiIndex):\n names = obj.index.names\n new_names = dask_mi_names(names)\n new_index = [str(x) for x in obj.index]\n\n obj = obj.copy()\n obj.index.names = new_names\n obj = obj.reset_index()\n obj.index = new_index\n\n obj = from_pandas(obj, npartitions=npartitions, chunksize=chunksize, sort=sort)\n\n return obj\n\n\ndef check_dask_frame(\n obj, return_metadata=False, var_name=\"obj\", freq_set_check=False, scitype=\"Series\"\n):\n \"\"\"Check dask frame, generic for sktime check format.\"\"\"\n import dask\n\n metadata = {}\n\n if not isinstance(obj, dask.dataframe.core.DataFrame):\n msg = f\"{var_name} must be a dask DataFrame, found {type(obj)}\"\n return ret(False, msg, None, return_metadata)\n\n # we now know obj is a dask DataFrame\n\n index_cols = get_mi_cols(obj)\n\n # check right number of cols depending on scitype\n if scitype == \"Series\":\n cols_msg = (\n f\"{var_name} must have exactly one index column, \"\n f\"found {len(index_cols)}, namely: {index_cols}\"\n )\n right_no_index_cols = len(index_cols) <= 1\n elif scitype == \"Panel\":\n cols_msg = (\n f\"{var_name} must have exactly two index columns, \"\n f\"found {len(index_cols)}, namely: {index_cols}\"\n )\n right_no_index_cols = len(index_cols) == 2\n elif scitype == \"Hierarchical\":\n cols_msg = (\n f\"{var_name} must have three or more index columns, \"\n f\"found {len(index_cols)}, namely: {index_cols}\"\n )\n right_no_index_cols = len(index_cols) >= 3\n else:\n return RuntimeError(\n 'scitype arg of check_dask_frame must be one of strings \"Series\", '\n f'\"Panel\", or \"Hierarchical\", but found {scitype}'\n )\n\n if not right_no_index_cols:\n # dask series should have at most one __index__ col\n return ret(False, cols_msg, None, return_metadata)\n\n if _req(\"is_empty\", return_metadata):\n metadata[\"is_empty\"] = len(obj.index) < 1 or len(obj.columns) < 1\n if _req(\"is_univariate\", return_metadata):\n metadata[\"is_univariate\"] = len(obj.columns) == 1\n if _req(\"n_features\", return_metadata):\n metadata[\"n_features\"] = len(obj.columns)\n if _req(\"feature_names\", return_metadata):\n metadata[\"feature_names\"] = obj.columns.to_list()\n\n # check that columns are unique\n if not obj.columns.is_unique:\n msg = f\"{var_name} must have unique column indices, but found {obj.columns}\"\n return ret(False, msg, None, return_metadata)\n\n # check whether the time index is of valid type\n # if not is_in_valid_index_types(index):\n # msg = (\n # f\"{type(index)} is not supported for {var_name}, use \"\n # f\"one of {VALID_INDEX_TYPES} or integer index instead.\"\n # )\n # return ret(False, msg, None, return_metadata)\n\n # Check time index is ordered in time\n if not obj.index.is_monotonic_increasing.compute():\n msg = (\n f\"The (time) index of {var_name} must be sorted \"\n f\"monotonically increasing, but found: {obj.index}\"\n )\n return ret(False, msg, None, return_metadata)\n\n if freq_set_check and isinstance(obj.index, pd.DatetimeIndex):\n if obj.index.freq is None:\n msg = f\"{var_name} has DatetimeIndex, but no freq attribute set.\"\n return ret(False, msg, None, return_metadata)\n\n # check whether index is equally spaced or if there are any nans\n # compute only if needed\n if _req(\"is_equally_spaced\", return_metadata):\n # todo: logic for equal spacing\n metadata[\"is_equally_spaced\"] = True\n if _req(\"has_nans\", return_metadata):\n metadata[\"has_nans\"] = obj.isnull().values.any().compute()\n\n if scitype in [\"Panel\", \"Hierarchical\"]:\n if _req(\"n_instances\", return_metadata):\n instance_cols = index_cols[:-1]\n metadata[\"n_instances\"] = len(obj[instance_cols].drop_duplicates())\n\n if scitype in [\"Hierarchical\"]:\n if _req(\"n_panels\", return_metadata):\n panel_cols = index_cols[:-2]\n metadata[\"n_panels\"] = len(obj[panel_cols].drop_duplicates())\n\n return ret(True, None, metadata, return_metadata)\n", "path": "sktime/datatypes/_adapter/dask_to_pd.py"}]} | 3,264 | 315 |
gh_patches_debug_8796 | rasdani/github-patches | git_diff | archlinux__archinstall-249 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: module 'pathlib' has no attribute 'Paht'
This is a serious typo in the code of luks.py
I tried to create an encrypted luks partitiom but end up with this error in the title
Please fix as soon as possible
</issue>
<code>
[start of archinstall/lib/luks.py]
1 import os
2 import shlex
3 import time
4 import pathlib
5 from .exceptions import *
6 from .general import *
7 from .disk import Partition
8 from .output import log, LOG_LEVELS
9 from .storage import storage
10
11 class luks2():
12 def __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs):
13 self.password = password
14 self.partition = partition
15 self.mountpoint = mountpoint
16 self.args = args
17 self.kwargs = kwargs
18 self.key_file = key_file
19 self.auto_unmount = auto_unmount
20 self.filesystem = 'crypto_LUKS'
21 self.mapdev = None
22
23 def __enter__(self):
24 #if self.partition.allow_formatting:
25 # self.key_file = self.encrypt(self.partition, *self.args, **self.kwargs)
26 #else:
27 if not self.key_file:
28 self.key_file = f"/tmp/{os.path.basename(self.partition.path)}.disk_pw" # TODO: Make disk-pw-file randomly unique?
29
30 if type(self.password) != bytes:
31 self.password = bytes(self.password, 'UTF-8')
32
33 with open(self.key_file, 'wb') as fh:
34 fh.write(self.password)
35
36 return self.unlock(self.partition, self.mountpoint, self.key_file)
37
38 def __exit__(self, *args, **kwargs):
39 # TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager
40 if self.auto_unmount:
41 self.close()
42
43 if len(args) >= 2 and args[1]:
44 raise args[1]
45 return True
46
47 def encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):
48 if not self.partition.allow_formatting:
49 raise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')
50
51 log(f'Encrypting {partition} (This might take a while)', level=LOG_LEVELS.Info)
52
53 if not key_file:
54 if self.key_file:
55 key_file = self.key_file
56 else:
57 key_file = f"/tmp/{os.path.basename(self.partition.path)}.disk_pw" # TODO: Make disk-pw-file randomly unique?
58
59 if not password:
60 password = self.password
61
62 if type(password) != bytes:
63 password = bytes(password, 'UTF-8')
64
65 with open(key_file, 'wb') as fh:
66 fh.write(password)
67
68 cryptsetup_args = shlex.join([
69 '/usr/bin/cryptsetup',
70 '--batch-mode',
71 '--verbose',
72 '--type', 'luks2',
73 '--pbkdf', 'argon2i',
74 '--hash', hash_type,
75 '--key-size', str(key_size),
76 '--iter-time', str(iter_time),
77 '--key-file', os.path.abspath(key_file),
78 '--use-urandom',
79 'luksFormat', partition.path,
80 ])
81
82 try:
83 # Try to setup the crypt-device
84 cmd_handle = sys_command(cryptsetup_args)
85 except SysCallError as err:
86 if err.exit_code == 256:
87 log(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=LOG_LEVELS.Debug)
88 # Partition was in use, unmount it and try again
89 partition.unmount()
90
91 # Get crypt-information about the device by doing a reverse lookup starting with the partition path
92 # For instance: /dev/sda
93 devinfo = json.loads(b''.join(sys_command(f"lsblk --fs -J {partition.path}")).decode('UTF-8'))['blockdevices'][0]
94
95 # For each child (sub-partition/sub-device)
96 if len(children := devinfo.get('children', [])):
97 for child in children:
98 # Unmount the child location
99 if child_mountpoint := child.get('mountpoint', None):
100 log(f'Unmounting {child_mountpoint}', level=LOG_LEVELS.Debug)
101 sys_command(f"umount -R {child_mountpoint}")
102
103 # And close it if possible.
104 log(f"Closing crypt device {child['name']}", level=LOG_LEVELS.Debug)
105 sys_command(f"cryptsetup close {child['name']}")
106
107 # Then try again to set up the crypt-device
108 cmd_handle = sys_command(cryptsetup_args)
109 else:
110 raise err
111
112 if cmd_handle.exit_code != 0:
113 raise DiskError(f'Could not encrypt volume "{partition.path}": {cmd_output}')
114
115 return key_file
116
117 def unlock(self, partition, mountpoint, key_file):
118 """
119 Mounts a lukts2 compatible partition to a certain mountpoint.
120 Keyfile must be specified as there's no way to interact with the pw-prompt atm.
121
122 :param mountpoint: The name without absolute path, for instance "luksdev" will point to /dev/mapper/luksdev
123 :type mountpoint: str
124 """
125 from .disk import get_filesystem_type
126 if '/' in mountpoint:
127 os.path.basename(mountpoint) # TODO: Raise exception instead?
128
129 wait_timer = time.time()
130 while pathlib.Paht(partition.path).exists() is False and time.time() - wait_timer < 10:
131 time.sleep(0.025)
132
133 sys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')
134 if os.path.islink(f'/dev/mapper/{mountpoint}'):
135 self.mapdev = f'/dev/mapper/{mountpoint}'
136 unlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False)
137 unlocked_partition.allow_formatting = self.partition.allow_formatting
138 return unlocked_partition
139
140 def close(self, mountpoint=None):
141 if not mountpoint:
142 mountpoint = self.mapdev
143
144 sys_command(f'/usr/bin/cryptsetup close {self.mapdev}')
145 return os.path.islink(self.mapdev) is False
146
147 def format(self, path):
148 if (handle := sys_command(f"/usr/bin/cryptsetup -q -v luksErase {path}")).exit_code != 0:
149 raise DiskError(f'Could not format {path} with {self.filesystem} because: {b"".join(handle)}')
150
[end of archinstall/lib/luks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py
--- a/archinstall/lib/luks.py
+++ b/archinstall/lib/luks.py
@@ -127,7 +127,7 @@
os.path.basename(mountpoint) # TODO: Raise exception instead?
wait_timer = time.time()
- while pathlib.Paht(partition.path).exists() is False and time.time() - wait_timer < 10:
+ while pathlib.Path(partition.path).exists() is False and time.time() - wait_timer < 10:
time.sleep(0.025)
sys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')
| {"golden_diff": "diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py\n--- a/archinstall/lib/luks.py\n+++ b/archinstall/lib/luks.py\n@@ -127,7 +127,7 @@\n \t\t\tos.path.basename(mountpoint) # TODO: Raise exception instead?\n \n \t\twait_timer = time.time()\n-\t\twhile pathlib.Paht(partition.path).exists() is False and time.time() - wait_timer < 10:\n+\t\twhile pathlib.Path(partition.path).exists() is False and time.time() - wait_timer < 10:\n \t\t\ttime.sleep(0.025)\n \n \t\tsys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')\n", "issue": "AttributeError: module 'pathlib' has no attribute 'Paht'\nThis is a serious typo in the code of luks.py\n\nI tried to create an encrypted luks partitiom but end up with this error in the title\n\nPlease fix as soon as possible\n\n", "before_files": [{"content": "import os\nimport shlex\nimport time\nimport pathlib\nfrom .exceptions import *\nfrom .general import *\nfrom .disk import Partition\nfrom .output import log, LOG_LEVELS\nfrom .storage import storage\n\nclass luks2():\n\tdef __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs):\n\t\tself.password = password\n\t\tself.partition = partition\n\t\tself.mountpoint = mountpoint\n\t\tself.args = args\n\t\tself.kwargs = kwargs\n\t\tself.key_file = key_file\n\t\tself.auto_unmount = auto_unmount\n\t\tself.filesystem = 'crypto_LUKS'\n\t\tself.mapdev = None\n\n\tdef __enter__(self):\n\t\t#if self.partition.allow_formatting:\n\t\t#\tself.key_file = self.encrypt(self.partition, *self.args, **self.kwargs)\n\t\t#else:\n\t\tif not self.key_file:\n\t\t\tself.key_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\t\t\n\t\tif type(self.password) != bytes:\n\t\t\tself.password = bytes(self.password, 'UTF-8')\n\n\t\twith open(self.key_file, 'wb') as fh:\n\t\t\tfh.write(self.password)\n\n\t\treturn self.unlock(self.partition, self.mountpoint, self.key_file)\n\n\tdef __exit__(self, *args, **kwargs):\n\t\t# TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager\n\t\tif self.auto_unmount:\n\t\t\tself.close()\n\n\t\tif len(args) >= 2 and args[1]:\n\t\t\traise args[1]\n\t\treturn True\n\n\tdef encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):\n\t\tif not self.partition.allow_formatting:\n\t\t\traise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')\n\n\t\tlog(f'Encrypting {partition} (This might take a while)', level=LOG_LEVELS.Info)\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\n\t\tif not password:\n\t\t\tpassword = self.password\n\n\t\tif type(password) != bytes:\n\t\t\tpassword = bytes(password, 'UTF-8')\n\n\t\twith open(key_file, 'wb') as fh:\n\t\t\tfh.write(password)\n\n\t\tcryptsetup_args = shlex.join([\n\t\t\t'/usr/bin/cryptsetup',\n\t\t\t'--batch-mode',\n\t\t\t'--verbose',\n\t\t\t'--type', 'luks2',\n\t\t\t'--pbkdf', 'argon2i',\n\t\t\t'--hash', hash_type,\n\t\t\t'--key-size', str(key_size),\n\t\t\t'--iter-time', str(iter_time),\n\t\t\t'--key-file', os.path.abspath(key_file),\n\t\t\t'--use-urandom',\n\t\t\t'luksFormat', partition.path,\n\t\t])\n\n\t\ttry:\n\t\t\t# Try to setup the crypt-device\n\t\t\tcmd_handle = sys_command(cryptsetup_args)\n\t\texcept SysCallError as err:\n\t\t\tif err.exit_code == 256:\n\t\t\t\tlog(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=LOG_LEVELS.Debug)\n\t\t\t\t# Partition was in use, unmount it and try again\n\t\t\t\tpartition.unmount()\n\n\t\t\t\t# Get crypt-information about the device by doing a reverse lookup starting with the partition path\n\t\t\t\t# For instance: /dev/sda\n\t\t\t\tdevinfo = json.loads(b''.join(sys_command(f\"lsblk --fs -J {partition.path}\")).decode('UTF-8'))['blockdevices'][0]\n\n\t\t\t\t# For each child (sub-partition/sub-device)\n\t\t\t\tif len(children := devinfo.get('children', [])):\n\t\t\t\t\tfor child in children:\n\t\t\t\t\t\t# Unmount the child location\n\t\t\t\t\t\tif child_mountpoint := child.get('mountpoint', None):\n\t\t\t\t\t\t\tlog(f'Unmounting {child_mountpoint}', level=LOG_LEVELS.Debug)\n\t\t\t\t\t\t\tsys_command(f\"umount -R {child_mountpoint}\")\n\n\t\t\t\t\t\t# And close it if possible.\n\t\t\t\t\t\tlog(f\"Closing crypt device {child['name']}\", level=LOG_LEVELS.Debug)\n\t\t\t\t\t\tsys_command(f\"cryptsetup close {child['name']}\")\n\n\t\t\t\t# Then try again to set up the crypt-device\n\t\t\t\tcmd_handle = sys_command(cryptsetup_args)\n\t\t\telse:\n\t\t\t\traise err\n\n\t\tif cmd_handle.exit_code != 0:\n\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {cmd_output}')\n\t\n\t\treturn key_file\n\n\tdef unlock(self, partition, mountpoint, key_file):\n\t\t\"\"\"\n\t\tMounts a lukts2 compatible partition to a certain mountpoint.\n\t\tKeyfile must be specified as there's no way to interact with the pw-prompt atm.\n\n\t\t:param mountpoint: The name without absolute path, for instance \"luksdev\" will point to /dev/mapper/luksdev\n\t\t:type mountpoint: str\n\t\t\"\"\"\n\t\tfrom .disk import get_filesystem_type\n\t\tif '/' in mountpoint:\n\t\t\tos.path.basename(mountpoint) # TODO: Raise exception instead?\n\n\t\twait_timer = time.time()\n\t\twhile pathlib.Paht(partition.path).exists() is False and time.time() - wait_timer < 10:\n\t\t\ttime.sleep(0.025)\n\n\t\tsys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')\n\t\tif os.path.islink(f'/dev/mapper/{mountpoint}'):\n\t\t\tself.mapdev = f'/dev/mapper/{mountpoint}'\n\t\t\tunlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False)\n\t\t\tunlocked_partition.allow_formatting = self.partition.allow_formatting\n\t\t\treturn unlocked_partition\n\n\tdef close(self, mountpoint=None):\n\t\tif not mountpoint:\n\t\t\tmountpoint = self.mapdev\n\n\t\tsys_command(f'/usr/bin/cryptsetup close {self.mapdev}')\n\t\treturn os.path.islink(self.mapdev) is False\n\n\tdef format(self, path):\n\t\tif (handle := sys_command(f\"/usr/bin/cryptsetup -q -v luksErase {path}\")).exit_code != 0:\n\t\t\traise DiskError(f'Could not format {path} with {self.filesystem} because: {b\"\".join(handle)}')\n", "path": "archinstall/lib/luks.py"}]} | 2,410 | 176 |
gh_patches_debug_48760 | rasdani/github-patches | git_diff | pypa__virtualenv-1941 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Multiple processes competition on read/write update log
**Issue**
https://github.com/pypa/virtualenv/blob/0cd009b5a1338f66397f71c85a75f576a2f3eabf/src/virtualenv/app_data/via_disk_folder.py#L129-L141
When the file is opened for writing in another process, the JSON content can't be parsed, then the file will be removed. However, on Windows the removal will fail since the file is occupied by anther process.
**Environment**
Provide at least:
- OS: Windows 10
- ``pip list`` of the host python where ``virtualenv`` is installed:
```console
appdirs 1.4.4
argcomplete 1.11.1
backcall 0.1.0
better-exceptions 0.2.2
certifi 2020.4.5.1
chardet 3.0.4
colorama 0.4.3
decorator 4.4.2
distlib 0.3.1
filelock 3.0.12
idna 2.10
ipython 7.14.0
ipython-genutils 0.2.0
jedi 0.17.0
naipyext 0.5.2
parso 0.7.0
pickleshare 0.7.5
pip 20.1.1
pipenv 2020.8.13.dev0 d:\workspace\pipenv
pipx 0.15.1.3
prompt-toolkit 3.0.5
Pygments 2.6.1
requests 2.24.0
setuptools 46.4.0
six 1.14.0
traitlets 4.3.3
urllib3 1.25.10
userpath 1.3.0
virtualenv 20.0.31
virtualenv-clone 0.5.4
wcwidth 0.1.9
wheel 0.34.2
```
**Output of the virtual environment creation**
Make sure to run the creation with `-vvv --with-traceback`:
```pytb
Traceback (most recent call last):
File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\seed\embed\via_app_data\via_app_data.py", line 94, in _get
do_periodic_update=self.periodic_update,
File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\seed\wheels\acquire.py", line 25, in get_wheel
wheel = from_bundle(distribution, version, for_py_version, search_dirs, app_data, do_periodic_update)
File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\seed\wheels\bundle.py", line 20, in from_bundle
wheel = periodic_update(distribution, for_py_version, wheel, search_dirs, app_data, do_periodic_update)
File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\seed\wheels\periodic_update.py", line 41, in periodic_update
handle_auto_update(distribution, for_py_version, wheel, search_dirs, app_data)
File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\seed\wheels\periodic_update.py", line 62, in handle_auto_update
u_log = UpdateLog.from_dict(embed_update_log.read())
File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\app_data\via_disk_folder.py", line 140, in read
self.remove()
File "C:\Users\runneradmin\.virtualenvs\pipenv-6Kr0DpZ2\lib\site-packages\virtualenv\app_data\via_disk_folder.py", line 144, in remove
self.file.unlink()
File "C:\hostedtoolcache\windows\Python\3.6.8\x64\lib\pathlib.py", line 1284, in unlink
self._accessor.unlink(self)
File "C:\hostedtoolcache\windows\Python\3.6.8\x64\lib\pathlib.py", line 387, in wrapped
return strfunc(str(pathobj), *args)
PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\Users\\runneradmin\\AppData\\Local\\pypa\\virtualenv\\wheel\\3.8\\embed\\1\\wheel.json'
PermissionError(13, 'The process cannot access the file because it is being used by another process')
RuntimeError: seed failed due to failing to download wheels wheel
```
To fix the issue, I prefer to change the writing into **atomic**, that is, before the writing is done, the content should be kept.
If that is an acceptable approach I can send a PR.
</issue>
<code>
[start of src/virtualenv/app_data/via_disk_folder.py]
1 # -*- coding: utf-8 -*-
2 """
3 A rough layout of the current storage goes as:
4
5 virtualenv-app-data
6 ├── py - <version> <cache information about python interpreters>
7 │ └── *.json/lock
8 ├── wheel <cache wheels used for seeding>
9 │ ├── house
10 │ │ └── *.whl <wheels downloaded go here>
11 │ └── <python major.minor> -> 3.9
12 │ ├── img-<version>
13 │ │ └── image
14 │ │ └── <install class> -> CopyPipInstall / SymlinkPipInstall
15 │ │ └── <wheel name> -> pip-20.1.1-py2.py3-none-any
16 │ └── embed
17 │ └── 1
18 │ └── *.json -> for every distribution contains data about newer embed versions and releases
19 └─── unzip <in zip app we cannot refer to some internal files, so first extract them>
20 └── <virtualenv version>
21 ├── py_info.py
22 ├── debug.py
23 └── _virtualenv.py
24 """
25 from __future__ import absolute_import, unicode_literals
26
27 import json
28 import logging
29 from abc import ABCMeta
30 from contextlib import contextmanager
31 from hashlib import sha256
32
33 import six
34
35 from virtualenv.util.lock import ReentrantFileLock
36 from virtualenv.util.path import safe_delete
37 from virtualenv.util.six import ensure_text
38 from virtualenv.util.zipapp import extract
39 from virtualenv.version import __version__
40
41 from .base import AppData, ContentStore
42
43
44 class AppDataDiskFolder(AppData):
45 """
46 Store the application data on the disk within a folder layout.
47 """
48
49 def __init__(self, folder):
50 self.lock = ReentrantFileLock(folder)
51
52 def __repr__(self):
53 return "{}".format(self.lock.path)
54
55 @property
56 def transient(self):
57 return False
58
59 def reset(self):
60 logging.debug("reset app data folder %s", self.lock.path)
61 safe_delete(self.lock.path)
62
63 def close(self):
64 """do nothing"""
65
66 @contextmanager
67 def locked(self, path):
68 path_lock = self.lock / path
69 with path_lock:
70 yield path_lock.path
71
72 @contextmanager
73 def extract(self, path, to_folder):
74 if to_folder is not None:
75 root = ReentrantFileLock(to_folder())
76 else:
77 root = self.lock / "unzip" / __version__
78 with root.lock_for_key(path.name):
79 dest = root.path / path.name
80 if not dest.exists():
81 extract(path, dest)
82 yield dest
83
84 @property
85 def py_info_at(self):
86 return self.lock / "py_info" / "1"
87
88 def py_info(self, path):
89 return PyInfoStoreDisk(self.py_info_at, path)
90
91 def py_info_clear(self):
92 """"""
93 py_info_folder = self.py_info_at
94 with py_info_folder:
95 for filename in py_info_folder.path.iterdir():
96 if filename.suffix == ".json":
97 with py_info_folder.lock_for_key(filename.stem):
98 if filename.exists():
99 filename.unlink()
100
101 def embed_update_log(self, distribution, for_py_version):
102 return EmbedDistributionUpdateStoreDisk(self.lock / "wheel" / for_py_version / "embed" / "1", distribution)
103
104 @property
105 def house(self):
106 path = self.lock.path / "wheel" / "house"
107 path.mkdir(parents=True, exist_ok=True)
108 return path
109
110 def wheel_image(self, for_py_version, name):
111 return self.lock.path / "wheel" / for_py_version / "image" / "1" / name
112
113
114 @six.add_metaclass(ABCMeta)
115 class JSONStoreDisk(ContentStore):
116 def __init__(self, in_folder, key, msg, msg_args):
117 self.in_folder = in_folder
118 self.key = key
119 self.msg = msg
120 self.msg_args = msg_args + (self.file,)
121
122 @property
123 def file(self):
124 return self.in_folder.path / "{}.json".format(self.key)
125
126 def exists(self):
127 return self.file.exists()
128
129 def read(self):
130 data, bad_format = None, False
131 try:
132 data = json.loads(self.file.read_text())
133 logging.debug("got {} from %s".format(self.msg), *self.msg_args)
134 return data
135 except ValueError:
136 bad_format = True
137 except Exception: # noqa
138 pass
139 if bad_format:
140 self.remove()
141 return None
142
143 def remove(self):
144 self.file.unlink()
145 logging.debug("removed {} at %s".format(self.msg), *self.msg_args)
146
147 @contextmanager
148 def locked(self):
149 with self.in_folder.lock_for_key(self.key):
150 yield
151
152 def write(self, content):
153 folder = self.file.parent
154 try:
155 folder.mkdir(parents=True, exist_ok=True)
156 except OSError:
157 pass
158 self.file.write_text(ensure_text(json.dumps(content, sort_keys=True, indent=2)))
159 logging.debug("wrote {} at %s".format(self.msg), *self.msg_args)
160
161
162 class PyInfoStoreDisk(JSONStoreDisk):
163 def __init__(self, in_folder, path):
164 key = sha256(str(path).encode("utf-8") if six.PY3 else str(path)).hexdigest()
165 super(PyInfoStoreDisk, self).__init__(in_folder, key, "python info of %s", (path,))
166
167
168 class EmbedDistributionUpdateStoreDisk(JSONStoreDisk):
169 def __init__(self, in_folder, distribution):
170 super(EmbedDistributionUpdateStoreDisk, self).__init__(
171 in_folder,
172 distribution,
173 "embed update of distribution %s",
174 (distribution,),
175 )
176
[end of src/virtualenv/app_data/via_disk_folder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/virtualenv/app_data/via_disk_folder.py b/src/virtualenv/app_data/via_disk_folder.py
--- a/src/virtualenv/app_data/via_disk_folder.py
+++ b/src/virtualenv/app_data/via_disk_folder.py
@@ -137,7 +137,10 @@
except Exception: # noqa
pass
if bad_format:
- self.remove()
+ try:
+ self.remove()
+ except OSError: # reading and writing on the same file may cause race on multiple processes
+ pass
return None
def remove(self):
| {"golden_diff": "diff --git a/src/virtualenv/app_data/via_disk_folder.py b/src/virtualenv/app_data/via_disk_folder.py\n--- a/src/virtualenv/app_data/via_disk_folder.py\n+++ b/src/virtualenv/app_data/via_disk_folder.py\n@@ -137,7 +137,10 @@\n except Exception: # noqa\n pass\n if bad_format:\n- self.remove()\n+ try:\n+ self.remove()\n+ except OSError: # reading and writing on the same file may cause race on multiple processes\n+ pass\n return None\n \n def remove(self):\n", "issue": "Multiple processes competition on read/write update log\n**Issue**\r\n\r\nhttps://github.com/pypa/virtualenv/blob/0cd009b5a1338f66397f71c85a75f576a2f3eabf/src/virtualenv/app_data/via_disk_folder.py#L129-L141\r\n\r\nWhen the file is opened for writing in another process, the JSON content can't be parsed, then the file will be removed. However, on Windows the removal will fail since the file is occupied by anther process.\r\n\r\n**Environment**\r\n\r\nProvide at least:\r\n- OS: Windows 10\r\n- ``pip list`` of the host python where ``virtualenv`` is installed:\r\n\r\n ```console\r\n appdirs 1.4.4\r\n argcomplete 1.11.1\r\n backcall 0.1.0\r\n better-exceptions 0.2.2\r\n certifi 2020.4.5.1\r\n chardet 3.0.4\r\n colorama 0.4.3\r\n decorator 4.4.2\r\n distlib 0.3.1\r\n filelock 3.0.12\r\n idna 2.10\r\n ipython 7.14.0\r\n ipython-genutils 0.2.0\r\n jedi 0.17.0\r\n naipyext 0.5.2\r\n parso 0.7.0\r\n pickleshare 0.7.5\r\n pip 20.1.1\r\n pipenv 2020.8.13.dev0 d:\\workspace\\pipenv\r\n pipx 0.15.1.3\r\n prompt-toolkit 3.0.5\r\n Pygments 2.6.1\r\n requests 2.24.0\r\n setuptools 46.4.0\r\n six 1.14.0\r\n traitlets 4.3.3\r\n urllib3 1.25.10\r\n userpath 1.3.0\r\n virtualenv 20.0.31\r\n virtualenv-clone 0.5.4\r\n wcwidth 0.1.9\r\n wheel 0.34.2\r\n ```\r\n\r\n**Output of the virtual environment creation**\r\n\r\nMake sure to run the creation with `-vvv --with-traceback`:\r\n\r\n```pytb\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\seed\\embed\\via_app_data\\via_app_data.py\", line 94, in _get\r\n do_periodic_update=self.periodic_update,\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\seed\\wheels\\acquire.py\", line 25, in get_wheel\r\n wheel = from_bundle(distribution, version, for_py_version, search_dirs, app_data, do_periodic_update)\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\seed\\wheels\\bundle.py\", line 20, in from_bundle\r\n wheel = periodic_update(distribution, for_py_version, wheel, search_dirs, app_data, do_periodic_update)\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\seed\\wheels\\periodic_update.py\", line 41, in periodic_update\r\n handle_auto_update(distribution, for_py_version, wheel, search_dirs, app_data)\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\seed\\wheels\\periodic_update.py\", line 62, in handle_auto_update\r\n u_log = UpdateLog.from_dict(embed_update_log.read())\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\app_data\\via_disk_folder.py\", line 140, in read\r\n self.remove()\r\n File \"C:\\Users\\runneradmin\\.virtualenvs\\pipenv-6Kr0DpZ2\\lib\\site-packages\\virtualenv\\app_data\\via_disk_folder.py\", line 144, in remove\r\n self.file.unlink()\r\n File \"C:\\hostedtoolcache\\windows\\Python\\3.6.8\\x64\\lib\\pathlib.py\", line 1284, in unlink\r\n self._accessor.unlink(self)\r\n File \"C:\\hostedtoolcache\\windows\\Python\\3.6.8\\x64\\lib\\pathlib.py\", line 387, in wrapped\r\n return strfunc(str(pathobj), *args)\r\nPermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\\\Users\\\\runneradmin\\\\AppData\\\\Local\\\\pypa\\\\virtualenv\\\\wheel\\\\3.8\\\\embed\\\\1\\\\wheel.json'\r\nPermissionError(13, 'The process cannot access the file because it is being used by another process')\r\nRuntimeError: seed failed due to failing to download wheels wheel\r\n```\r\n\r\nTo fix the issue, I prefer to change the writing into **atomic**, that is, before the writing is done, the content should be kept.\r\n\r\nIf that is an acceptable approach I can send a PR.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nA rough layout of the current storage goes as:\n\nvirtualenv-app-data\n\u251c\u2500\u2500 py - <version> <cache information about python interpreters>\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 *.json/lock\n\u251c\u2500\u2500 wheel <cache wheels used for seeding>\n\u2502 \u00a0 \u251c\u2500\u2500 house\n\u2502 \u2502 \u2514\u2500\u2500 *.whl <wheels downloaded go here>\n\u2502 \u2514\u2500\u2500 <python major.minor> -> 3.9\n\u2502 \u251c\u2500\u2500 img-<version>\n\u2502 \u2502 \u2514\u2500\u2500 image\n\u2502 \u2502 \u2514\u2500\u2500 <install class> -> CopyPipInstall / SymlinkPipInstall\n\u2502 \u2502 \u2514\u2500\u2500 <wheel name> -> pip-20.1.1-py2.py3-none-any\n\u2502 \u2514\u2500\u2500 embed\n\u2502 \u2514\u2500\u2500 1\n\u2502 \u2514\u2500\u2500 *.json -> for every distribution contains data about newer embed versions and releases\n\u2514\u2500\u2500\u2500 unzip <in zip app we cannot refer to some internal files, so first extract them>\n \u2514\u2500\u2500 <virtualenv version>\n \u251c\u2500\u2500 py_info.py\n \u251c\u2500\u2500 debug.py\n \u2514\u2500\u2500 _virtualenv.py\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport json\nimport logging\nfrom abc import ABCMeta\nfrom contextlib import contextmanager\nfrom hashlib import sha256\n\nimport six\n\nfrom virtualenv.util.lock import ReentrantFileLock\nfrom virtualenv.util.path import safe_delete\nfrom virtualenv.util.six import ensure_text\nfrom virtualenv.util.zipapp import extract\nfrom virtualenv.version import __version__\n\nfrom .base import AppData, ContentStore\n\n\nclass AppDataDiskFolder(AppData):\n \"\"\"\n Store the application data on the disk within a folder layout.\n \"\"\"\n\n def __init__(self, folder):\n self.lock = ReentrantFileLock(folder)\n\n def __repr__(self):\n return \"{}\".format(self.lock.path)\n\n @property\n def transient(self):\n return False\n\n def reset(self):\n logging.debug(\"reset app data folder %s\", self.lock.path)\n safe_delete(self.lock.path)\n\n def close(self):\n \"\"\"do nothing\"\"\"\n\n @contextmanager\n def locked(self, path):\n path_lock = self.lock / path\n with path_lock:\n yield path_lock.path\n\n @contextmanager\n def extract(self, path, to_folder):\n if to_folder is not None:\n root = ReentrantFileLock(to_folder())\n else:\n root = self.lock / \"unzip\" / __version__\n with root.lock_for_key(path.name):\n dest = root.path / path.name\n if not dest.exists():\n extract(path, dest)\n yield dest\n\n @property\n def py_info_at(self):\n return self.lock / \"py_info\" / \"1\"\n\n def py_info(self, path):\n return PyInfoStoreDisk(self.py_info_at, path)\n\n def py_info_clear(self):\n \"\"\"\"\"\"\n py_info_folder = self.py_info_at\n with py_info_folder:\n for filename in py_info_folder.path.iterdir():\n if filename.suffix == \".json\":\n with py_info_folder.lock_for_key(filename.stem):\n if filename.exists():\n filename.unlink()\n\n def embed_update_log(self, distribution, for_py_version):\n return EmbedDistributionUpdateStoreDisk(self.lock / \"wheel\" / for_py_version / \"embed\" / \"1\", distribution)\n\n @property\n def house(self):\n path = self.lock.path / \"wheel\" / \"house\"\n path.mkdir(parents=True, exist_ok=True)\n return path\n\n def wheel_image(self, for_py_version, name):\n return self.lock.path / \"wheel\" / for_py_version / \"image\" / \"1\" / name\n\n\[email protected]_metaclass(ABCMeta)\nclass JSONStoreDisk(ContentStore):\n def __init__(self, in_folder, key, msg, msg_args):\n self.in_folder = in_folder\n self.key = key\n self.msg = msg\n self.msg_args = msg_args + (self.file,)\n\n @property\n def file(self):\n return self.in_folder.path / \"{}.json\".format(self.key)\n\n def exists(self):\n return self.file.exists()\n\n def read(self):\n data, bad_format = None, False\n try:\n data = json.loads(self.file.read_text())\n logging.debug(\"got {} from %s\".format(self.msg), *self.msg_args)\n return data\n except ValueError:\n bad_format = True\n except Exception: # noqa\n pass\n if bad_format:\n self.remove()\n return None\n\n def remove(self):\n self.file.unlink()\n logging.debug(\"removed {} at %s\".format(self.msg), *self.msg_args)\n\n @contextmanager\n def locked(self):\n with self.in_folder.lock_for_key(self.key):\n yield\n\n def write(self, content):\n folder = self.file.parent\n try:\n folder.mkdir(parents=True, exist_ok=True)\n except OSError:\n pass\n self.file.write_text(ensure_text(json.dumps(content, sort_keys=True, indent=2)))\n logging.debug(\"wrote {} at %s\".format(self.msg), *self.msg_args)\n\n\nclass PyInfoStoreDisk(JSONStoreDisk):\n def __init__(self, in_folder, path):\n key = sha256(str(path).encode(\"utf-8\") if six.PY3 else str(path)).hexdigest()\n super(PyInfoStoreDisk, self).__init__(in_folder, key, \"python info of %s\", (path,))\n\n\nclass EmbedDistributionUpdateStoreDisk(JSONStoreDisk):\n def __init__(self, in_folder, distribution):\n super(EmbedDistributionUpdateStoreDisk, self).__init__(\n in_folder,\n distribution,\n \"embed update of distribution %s\",\n (distribution,),\n )\n", "path": "src/virtualenv/app_data/via_disk_folder.py"}]} | 3,535 | 135 |
gh_patches_debug_28668 | rasdani/github-patches | git_diff | dotkom__onlineweb4-1888 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot save user profile with an empty zip code
## What kind of an issue is this?
- [x] Bug report
## What is the expected behaviour?
That you can save the form and receive guidance without having to input a ZIP code.
## What is the current behaviour?
If you don't have a ZIP code in the field the form doesn't validate and you get a runtime error, which doesn't tell the user why it didn't work.
## How do you reproduce this problem?
Go to my profile, empty the ZIP code field and save.
</issue>
<code>
[start of apps/profiles/forms.py]
1 # -*- coding: utf-8 -*-
2 import re
3
4 from django import forms
5 from django.contrib import auth
6 from django.utils.translation import ugettext as _
7
8 from apps.authentication.models import OnlineUser, Position
9 from apps.profiles.models import Privacy
10
11
12 class ProfileForm(forms.ModelForm):
13 class Meta(object):
14 model = OnlineUser
15
16 fields = [
17 'nickname',
18 'website',
19 'phone_number',
20 'address',
21 'zip_code',
22 'allergies',
23 'compiled',
24 'bio',
25 'gender',
26 'github',
27 'linkedin'
28 ]
29 widgets = {
30 'allergies': forms.Textarea(attrs={'id': 'allergies'}),
31 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}),
32 'bio': forms.Textarea(attrs={'id': 'bio'}),
33 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),
34 }
35
36 def clean(self):
37 super(ProfileForm, self).clean()
38
39 cleaned_data = self.cleaned_data
40
41 # ZIP code digits only
42 zip_code = cleaned_data['zip_code']
43 if len(zip_code) != 0 and not re.match(r'\d{4}', zip_code):
44 self._errors['zip_code'] = self.error_class([_("Postnummer må bestå av fire siffer.")])
45
46 return cleaned_data
47
48
49 class PrivacyForm(forms.ModelForm):
50 class Meta(object):
51 model = Privacy
52 exclude = ['user', 'expose_nickname']
53
54
55 class MailSettingsForm(forms.ModelForm):
56 class Meta(object):
57 model = OnlineUser
58 fields = ['infomail', ]
59
60
61 class PositionForm(forms.ModelForm):
62 class Meta(object):
63 model = Position
64 exclude = ['user']
65 widgets = {
66 'committee': forms.Select(attrs={'class': 'form-control'}),
67 'position': forms.Select(attrs={'class': 'form-control'}),
68 }
69
70 def clean(self):
71 super(PositionForm, self).clean()
72
73 range_compiler = re.compile(r'\d{4}-\d{4}')
74 year_range = self.cleaned_data['period']
75
76 # If it doesn't match the format YYYY-YYYY
77 if not range_compiler.match(year_range):
78 self._errors['period'] = self.error_class(
79 [_('Feil format. Dobbelsjekk at input er på formatet YYYY-YYYY.')]
80 )
81 return self.cleaned_data
82
83 years = year_range.split('-')
84
85 # If somewhat they fucked up input, we don't want None-shit after the split.
86 if not years[0] or not years[1]:
87 self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')])
88 return self.cleaned_data
89
90 # If first year is larger than latter, or the diff is more than one, fail.
91 if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1:
92 self._errors['period'] = self.error_class([_('Ikke gyldig års-intervall. Bare ett år er tillat.')])
93
94 return self.cleaned_data
95
96
97 class MembershipSettingsForm(forms.ModelForm):
98 def __init__(self, *args, **kwargs):
99 super(MembershipSettingsForm, self).__init__(*args, **kwargs)
100 self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'
101
102 class Meta(object):
103 model = OnlineUser
104 fields = ['field_of_study', 'started_date']
105
106 widgets = {
107 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),
108 }
109
110
111 class InternalServicesForm(forms.Form):
112 ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u"Online passord"))
113 services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u"Ønsket service passord"))
114 current_user = None
115
116 def clean(self):
117 super(InternalServicesForm, self).clean()
118 if self.is_valid():
119 cleaned_data = self.cleaned_data
120
121 # User object relation here
122 user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password'])
123
124 if user is None or user.id != self.current_user.id:
125 self._errors['ow4_password'] = self.error_class([_(u"Passordet er ikke korrekt.")])
126
127 return cleaned_data
128
[end of apps/profiles/forms.py]
[start of apps/authentication/templatetags/gravatar_url_resolver.py]
1 import hashlib
2 import urllib
3
4 from django import template
5 from django.conf import settings
6
7 register = template.Library()
8
9
10 @register.assignment_tag(takes_context=True)
11 def gravatar_url(context, user, size):
12 prefix = "https://" if context['request'].is_secure() else "http://"
13 default = "%s%s%s_%s.png" % (
14 prefix,
15 context['request'].META['HTTP_HOST'],
16 settings.DEFAULT_PROFILE_PICTURE_PREFIX,
17 user.gender
18 )
19
20 grav_url = "https://www.gravatar.com/avatar/" + hashlib.md5(user.email.encode()).hexdigest() + "?"
21 grav_url += urllib.parse.urlencode({'d': default, 's': str(size)})
22
23 return grav_url
24
[end of apps/authentication/templatetags/gravatar_url_resolver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/authentication/templatetags/gravatar_url_resolver.py b/apps/authentication/templatetags/gravatar_url_resolver.py
--- a/apps/authentication/templatetags/gravatar_url_resolver.py
+++ b/apps/authentication/templatetags/gravatar_url_resolver.py
@@ -12,7 +12,7 @@
prefix = "https://" if context['request'].is_secure() else "http://"
default = "%s%s%s_%s.png" % (
prefix,
- context['request'].META['HTTP_HOST'],
+ context['request'].META.get('HTTP_HOST', 'localhost'),
settings.DEFAULT_PROFILE_PICTURE_PREFIX,
user.gender
)
diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py
--- a/apps/profiles/forms.py
+++ b/apps/profiles/forms.py
@@ -8,6 +8,8 @@
from apps.authentication.models import OnlineUser, Position
from apps.profiles.models import Privacy
+ZIP_CODE_VALIDATION_ERROR = "Postnummer må bestå av fire siffer."
+
class ProfileForm(forms.ModelForm):
class Meta(object):
@@ -33,17 +35,10 @@
'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),
}
- def clean(self):
- super(ProfileForm, self).clean()
-
- cleaned_data = self.cleaned_data
-
- # ZIP code digits only
- zip_code = cleaned_data['zip_code']
- if len(zip_code) != 0 and not re.match(r'\d{4}', zip_code):
- self._errors['zip_code'] = self.error_class([_("Postnummer må bestå av fire siffer.")])
-
- return cleaned_data
+ def clean_zip_code(self):
+ zip_code = self.cleaned_data['zip_code']
+ if zip_code and len(zip_code) != 0 and not re.match(r'\d{4}', zip_code):
+ self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)
class PrivacyForm(forms.ModelForm):
| {"golden_diff": "diff --git a/apps/authentication/templatetags/gravatar_url_resolver.py b/apps/authentication/templatetags/gravatar_url_resolver.py\n--- a/apps/authentication/templatetags/gravatar_url_resolver.py\n+++ b/apps/authentication/templatetags/gravatar_url_resolver.py\n@@ -12,7 +12,7 @@\n prefix = \"https://\" if context['request'].is_secure() else \"http://\"\n default = \"%s%s%s_%s.png\" % (\n prefix,\n- context['request'].META['HTTP_HOST'],\n+ context['request'].META.get('HTTP_HOST', 'localhost'),\n settings.DEFAULT_PROFILE_PICTURE_PREFIX,\n user.gender\n )\ndiff --git a/apps/profiles/forms.py b/apps/profiles/forms.py\n--- a/apps/profiles/forms.py\n+++ b/apps/profiles/forms.py\n@@ -8,6 +8,8 @@\n from apps.authentication.models import OnlineUser, Position\n from apps.profiles.models import Privacy\n \n+ZIP_CODE_VALIDATION_ERROR = \"Postnummer m\u00e5 best\u00e5 av fire siffer.\"\n+\n \n class ProfileForm(forms.ModelForm):\n class Meta(object):\n@@ -33,17 +35,10 @@\n 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),\n }\n \n- def clean(self):\n- super(ProfileForm, self).clean()\n-\n- cleaned_data = self.cleaned_data\n-\n- # ZIP code digits only\n- zip_code = cleaned_data['zip_code']\n- if len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n- self._errors['zip_code'] = self.error_class([_(\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n-\n- return cleaned_data\n+ def clean_zip_code(self):\n+ zip_code = self.cleaned_data['zip_code']\n+ if zip_code and len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n+ self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)\n \n \n class PrivacyForm(forms.ModelForm):\n", "issue": "Cannot save user profile with an empty zip code\n## What kind of an issue is this?\r\n\r\n- [x] Bug report\r\n\r\n## What is the expected behaviour?\r\n\r\nThat you can save the form and receive guidance without having to input a ZIP code.\r\n\r\n## What is the current behaviour?\r\n\r\nIf you don't have a ZIP code in the field the form doesn't validate and you get a runtime error, which doesn't tell the user why it didn't work.\r\n\r\n## How do you reproduce this problem? \r\n\r\nGo to my profile, empty the ZIP code field and save.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser, Position\nfrom apps.profiles.models import Privacy\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n\n fields = [\n 'nickname',\n 'website',\n 'phone_number',\n 'address',\n 'zip_code',\n 'allergies',\n 'compiled',\n 'bio',\n 'gender',\n 'github',\n 'linkedin'\n ]\n widgets = {\n 'allergies': forms.Textarea(attrs={'id': 'allergies'}),\n 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}),\n 'bio': forms.Textarea(attrs={'id': 'bio'}),\n 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),\n }\n\n def clean(self):\n super(ProfileForm, self).clean()\n\n cleaned_data = self.cleaned_data\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n self._errors['zip_code'] = self.error_class([_(\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data\n\n\nclass PrivacyForm(forms.ModelForm):\n class Meta(object):\n model = Privacy\n exclude = ['user', 'expose_nickname']\n\n\nclass MailSettingsForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass PositionForm(forms.ModelForm):\n class Meta(object):\n model = Position\n exclude = ['user']\n widgets = {\n 'committee': forms.Select(attrs={'class': 'form-control'}),\n 'position': forms.Select(attrs={'class': 'form-control'}),\n }\n\n def clean(self):\n super(PositionForm, self).clean()\n\n range_compiler = re.compile(r'\\d{4}-\\d{4}')\n year_range = self.cleaned_data['period']\n\n # If it doesn't match the format YYYY-YYYY\n if not range_compiler.match(year_range):\n self._errors['period'] = self.error_class(\n [_('Feil format. Dobbelsjekk at input er p\u00e5 formatet YYYY-YYYY.')]\n )\n return self.cleaned_data\n\n years = year_range.split('-')\n\n # If somewhat they fucked up input, we don't want None-shit after the split.\n if not years[0] or not years[1]:\n self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')])\n return self.cleaned_data\n\n # If first year is larger than latter, or the diff is more than one, fail.\n if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1:\n self._errors['period'] = self.error_class([_('Ikke gyldig \u00e5rs-intervall. Bare ett \u00e5r er tillat.')])\n\n return self.cleaned_data\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta(object):\n model = OnlineUser\n fields = ['field_of_study', 'started_date']\n\n widgets = {\n 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),\n }\n\n\nclass InternalServicesForm(forms.Form):\n ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"Online passord\"))\n services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"\u00d8nsket service passord\"))\n current_user = None\n\n def clean(self):\n super(InternalServicesForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # User object relation here\n user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password'])\n\n if user is None or user.id != self.current_user.id:\n self._errors['ow4_password'] = self.error_class([_(u\"Passordet er ikke korrekt.\")])\n\n return cleaned_data\n", "path": "apps/profiles/forms.py"}, {"content": "import hashlib\nimport urllib\n\nfrom django import template\nfrom django.conf import settings\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=True)\ndef gravatar_url(context, user, size):\n prefix = \"https://\" if context['request'].is_secure() else \"http://\"\n default = \"%s%s%s_%s.png\" % (\n prefix,\n context['request'].META['HTTP_HOST'],\n settings.DEFAULT_PROFILE_PICTURE_PREFIX,\n user.gender\n )\n\n grav_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(user.email.encode()).hexdigest() + \"?\"\n grav_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n\n return grav_url\n", "path": "apps/authentication/templatetags/gravatar_url_resolver.py"}]} | 2,113 | 439 |
gh_patches_debug_33077 | rasdani/github-patches | git_diff | certbot__certbot-8898 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
apache authenticator should authenticate all duplicate vhosts
See https://community.letsencrypt.org/t/cant-renew-certificate/153274/3?u=_az. I've seen this a few other times as well.
The Apache authenticator needs to authenticate all of the virtual hosts which could match the requested domain, including duplicates and HTTPS ones.
#6729 fixed this for the most part, but there are seemingly still some edge cases left over.
Let's investigate and fix those.
</issue>
<code>
[start of certbot-apache/certbot_apache/_internal/http_01.py]
1 """A class that performs HTTP-01 challenges for Apache"""
2 import errno
3 import logging
4 from typing import List
5 from typing import Set
6
7 from certbot import errors
8 from certbot.compat import filesystem
9 from certbot.compat import os
10 from certbot.plugins import common
11 from certbot_apache._internal.obj import VirtualHost # pylint: disable=unused-import
12 from certbot_apache._internal.parser import get_aug_path
13
14 logger = logging.getLogger(__name__)
15
16
17 class ApacheHttp01(common.ChallengePerformer):
18 """Class that performs HTTP-01 challenges within the Apache configurator."""
19
20 CONFIG_TEMPLATE22_PRE = """\
21 RewriteEngine on
22 RewriteRule ^/\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [L]
23
24 """
25 CONFIG_TEMPLATE22_POST = """\
26 <Directory {0}>
27 Order Allow,Deny
28 Allow from all
29 </Directory>
30 <Location /.well-known/acme-challenge>
31 Order Allow,Deny
32 Allow from all
33 </Location>
34 """
35
36 CONFIG_TEMPLATE24_PRE = """\
37 RewriteEngine on
38 RewriteRule ^/\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [END]
39 """
40 CONFIG_TEMPLATE24_POST = """\
41 <Directory {0}>
42 Require all granted
43 </Directory>
44 <Location /.well-known/acme-challenge>
45 Require all granted
46 </Location>
47 """
48
49 def __init__(self, *args, **kwargs):
50 super().__init__(*args, **kwargs)
51 self.challenge_conf_pre = os.path.join(
52 self.configurator.conf("challenge-location"),
53 "le_http_01_challenge_pre.conf")
54 self.challenge_conf_post = os.path.join(
55 self.configurator.conf("challenge-location"),
56 "le_http_01_challenge_post.conf")
57 self.challenge_dir = os.path.join(
58 self.configurator.config.work_dir,
59 "http_challenges")
60 self.moded_vhosts: Set[VirtualHost] = set()
61
62 def perform(self):
63 """Perform all HTTP-01 challenges."""
64 if not self.achalls:
65 return []
66 # Save any changes to the configuration as a precaution
67 # About to make temporary changes to the config
68 self.configurator.save("Changes before challenge setup", True)
69
70 self.configurator.ensure_listen(str(
71 self.configurator.config.http01_port))
72 self.prepare_http01_modules()
73
74 responses = self._set_up_challenges()
75
76 self._mod_config()
77 # Save reversible changes
78 self.configurator.save("HTTP Challenge", True)
79
80 return responses
81
82 def prepare_http01_modules(self):
83 """Make sure that we have the needed modules available for http01"""
84
85 if self.configurator.conf("handle-modules"):
86 needed_modules = ["rewrite"]
87 if self.configurator.version < (2, 4):
88 needed_modules.append("authz_host")
89 else:
90 needed_modules.append("authz_core")
91 for mod in needed_modules:
92 if mod + "_module" not in self.configurator.parser.modules:
93 self.configurator.enable_mod(mod, temp=True)
94
95 def _mod_config(self):
96 selected_vhosts: List[VirtualHost] = []
97 http_port = str(self.configurator.config.http01_port)
98 for chall in self.achalls:
99 # Search for matching VirtualHosts
100 for vh in self._matching_vhosts(chall.domain):
101 selected_vhosts.append(vh)
102
103 # Ensure that we have one or more VirtualHosts that we can continue
104 # with. (one that listens to port configured with --http-01-port)
105 found = False
106 for vhost in selected_vhosts:
107 if any(a.is_wildcard() or a.get_port() == http_port for a in vhost.addrs):
108 found = True
109
110 if not found:
111 for vh in self._relevant_vhosts():
112 selected_vhosts.append(vh)
113
114 # Add the challenge configuration
115 for vh in selected_vhosts:
116 self._set_up_include_directives(vh)
117
118 self.configurator.reverter.register_file_creation(
119 True, self.challenge_conf_pre)
120 self.configurator.reverter.register_file_creation(
121 True, self.challenge_conf_post)
122
123 if self.configurator.version < (2, 4):
124 config_template_pre = self.CONFIG_TEMPLATE22_PRE
125 config_template_post = self.CONFIG_TEMPLATE22_POST
126 else:
127 config_template_pre = self.CONFIG_TEMPLATE24_PRE
128 config_template_post = self.CONFIG_TEMPLATE24_POST
129
130 config_text_pre = config_template_pre.format(self.challenge_dir)
131 config_text_post = config_template_post.format(self.challenge_dir)
132
133 logger.debug("writing a pre config file with text:\n %s", config_text_pre)
134 with open(self.challenge_conf_pre, "w") as new_conf:
135 new_conf.write(config_text_pre)
136 logger.debug("writing a post config file with text:\n %s", config_text_post)
137 with open(self.challenge_conf_post, "w") as new_conf:
138 new_conf.write(config_text_post)
139
140 def _matching_vhosts(self, domain):
141 """Return all VirtualHost objects that have the requested domain name or
142 a wildcard name that would match the domain in ServerName or ServerAlias
143 directive.
144 """
145 matching_vhosts = []
146 for vhost in self.configurator.vhosts:
147 if self.configurator.domain_in_names(vhost.get_names(), domain):
148 # domain_in_names also matches the exact names, so no need
149 # to check "domain in vhost.get_names()" explicitly here
150 matching_vhosts.append(vhost)
151
152 return matching_vhosts
153
154 def _relevant_vhosts(self):
155 http01_port = str(self.configurator.config.http01_port)
156 relevant_vhosts = []
157 for vhost in self.configurator.vhosts:
158 if any(a.is_wildcard() or a.get_port() == http01_port for a in vhost.addrs):
159 if not vhost.ssl:
160 relevant_vhosts.append(vhost)
161 if not relevant_vhosts:
162 raise errors.PluginError(
163 "Unable to find a virtual host listening on port {0} which is"
164 " currently needed for Certbot to prove to the CA that you"
165 " control your domain. Please add a virtual host for port"
166 " {0}.".format(http01_port))
167
168 return relevant_vhosts
169
170 def _set_up_challenges(self):
171 if not os.path.isdir(self.challenge_dir):
172 old_umask = filesystem.umask(0o022)
173 try:
174 filesystem.makedirs(self.challenge_dir, 0o755)
175 except OSError as exception:
176 if exception.errno not in (errno.EEXIST, errno.EISDIR):
177 raise errors.PluginError(
178 "Couldn't create root for http-01 challenge")
179 finally:
180 filesystem.umask(old_umask)
181
182 responses = []
183 for achall in self.achalls:
184 responses.append(self._set_up_challenge(achall))
185
186 return responses
187
188 def _set_up_challenge(self, achall):
189 response, validation = achall.response_and_validation()
190
191 name = os.path.join(self.challenge_dir, achall.chall.encode("token"))
192
193 self.configurator.reverter.register_file_creation(True, name)
194 with open(name, 'wb') as f:
195 f.write(validation.encode())
196 filesystem.chmod(name, 0o644)
197
198 return response
199
200 def _set_up_include_directives(self, vhost):
201 """Includes override configuration to the beginning and to the end of
202 VirtualHost. Note that this include isn't added to Augeas search tree"""
203
204 if vhost not in self.moded_vhosts:
205 logger.debug(
206 "Adding a temporary challenge validation Include for name: %s in: %s",
207 vhost.name, vhost.filep)
208 self.configurator.parser.add_dir_beginning(
209 vhost.path, "Include", self.challenge_conf_pre)
210 self.configurator.parser.add_dir(
211 vhost.path, "Include", self.challenge_conf_post)
212
213 if not vhost.enabled:
214 self.configurator.parser.add_dir(
215 get_aug_path(self.configurator.parser.loc["default"]),
216 "Include", vhost.filep)
217
218 self.moded_vhosts.add(vhost)
219
[end of certbot-apache/certbot_apache/_internal/http_01.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/certbot-apache/certbot_apache/_internal/http_01.py b/certbot-apache/certbot_apache/_internal/http_01.py
--- a/certbot-apache/certbot_apache/_internal/http_01.py
+++ b/certbot-apache/certbot_apache/_internal/http_01.py
@@ -95,10 +95,10 @@
def _mod_config(self):
selected_vhosts: List[VirtualHost] = []
http_port = str(self.configurator.config.http01_port)
+
+ # Search for VirtualHosts matching by name
for chall in self.achalls:
- # Search for matching VirtualHosts
- for vh in self._matching_vhosts(chall.domain):
- selected_vhosts.append(vh)
+ selected_vhosts += self._matching_vhosts(chall.domain)
# Ensure that we have one or more VirtualHosts that we can continue
# with. (one that listens to port configured with --http-01-port)
@@ -107,9 +107,13 @@
if any(a.is_wildcard() or a.get_port() == http_port for a in vhost.addrs):
found = True
- if not found:
- for vh in self._relevant_vhosts():
- selected_vhosts.append(vh)
+ # If there's at least one elgible VirtualHost, also add all unnamed VirtualHosts
+ # because they might match at runtime (#8890)
+ if found:
+ selected_vhosts += self._unnamed_vhosts()
+ # Otherwise, add every Virtualhost which listens on the right port
+ else:
+ selected_vhosts += self._relevant_vhosts()
# Add the challenge configuration
for vh in selected_vhosts:
@@ -167,6 +171,10 @@
return relevant_vhosts
+ def _unnamed_vhosts(self) -> List[VirtualHost]:
+ """Return all VirtualHost objects with no ServerName"""
+ return [vh for vh in self.configurator.vhosts if vh.name is None]
+
def _set_up_challenges(self):
if not os.path.isdir(self.challenge_dir):
old_umask = filesystem.umask(0o022)
| {"golden_diff": "diff --git a/certbot-apache/certbot_apache/_internal/http_01.py b/certbot-apache/certbot_apache/_internal/http_01.py\n--- a/certbot-apache/certbot_apache/_internal/http_01.py\n+++ b/certbot-apache/certbot_apache/_internal/http_01.py\n@@ -95,10 +95,10 @@\n def _mod_config(self):\n selected_vhosts: List[VirtualHost] = []\n http_port = str(self.configurator.config.http01_port)\n+\n+ # Search for VirtualHosts matching by name\n for chall in self.achalls:\n- # Search for matching VirtualHosts\n- for vh in self._matching_vhosts(chall.domain):\n- selected_vhosts.append(vh)\n+ selected_vhosts += self._matching_vhosts(chall.domain)\n \n # Ensure that we have one or more VirtualHosts that we can continue\n # with. (one that listens to port configured with --http-01-port)\n@@ -107,9 +107,13 @@\n if any(a.is_wildcard() or a.get_port() == http_port for a in vhost.addrs):\n found = True\n \n- if not found:\n- for vh in self._relevant_vhosts():\n- selected_vhosts.append(vh)\n+ # If there's at least one elgible VirtualHost, also add all unnamed VirtualHosts\n+ # because they might match at runtime (#8890)\n+ if found:\n+ selected_vhosts += self._unnamed_vhosts()\n+ # Otherwise, add every Virtualhost which listens on the right port\n+ else:\n+ selected_vhosts += self._relevant_vhosts()\n \n # Add the challenge configuration\n for vh in selected_vhosts:\n@@ -167,6 +171,10 @@\n \n return relevant_vhosts\n \n+ def _unnamed_vhosts(self) -> List[VirtualHost]:\n+ \"\"\"Return all VirtualHost objects with no ServerName\"\"\"\n+ return [vh for vh in self.configurator.vhosts if vh.name is None]\n+\n def _set_up_challenges(self):\n if not os.path.isdir(self.challenge_dir):\n old_umask = filesystem.umask(0o022)\n", "issue": "apache authenticator should authenticate all duplicate vhosts\nSee https://community.letsencrypt.org/t/cant-renew-certificate/153274/3?u=_az. I've seen this a few other times as well.\r\n\r\nThe Apache authenticator needs to authenticate all of the virtual hosts which could match the requested domain, including duplicates and HTTPS ones.\r\n\r\n#6729 fixed this for the most part, but there are seemingly still some edge cases left over.\r\n\r\nLet's investigate and fix those.\n", "before_files": [{"content": "\"\"\"A class that performs HTTP-01 challenges for Apache\"\"\"\nimport errno\nimport logging\nfrom typing import List\nfrom typing import Set\n\nfrom certbot import errors\nfrom certbot.compat import filesystem\nfrom certbot.compat import os\nfrom certbot.plugins import common\nfrom certbot_apache._internal.obj import VirtualHost # pylint: disable=unused-import\nfrom certbot_apache._internal.parser import get_aug_path\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApacheHttp01(common.ChallengePerformer):\n \"\"\"Class that performs HTTP-01 challenges within the Apache configurator.\"\"\"\n\n CONFIG_TEMPLATE22_PRE = \"\"\"\\\n RewriteEngine on\n RewriteRule ^/\\\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [L]\n\n \"\"\"\n CONFIG_TEMPLATE22_POST = \"\"\"\\\n <Directory {0}>\n Order Allow,Deny\n Allow from all\n </Directory>\n <Location /.well-known/acme-challenge>\n Order Allow,Deny\n Allow from all\n </Location>\n \"\"\"\n\n CONFIG_TEMPLATE24_PRE = \"\"\"\\\n RewriteEngine on\n RewriteRule ^/\\\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [END]\n \"\"\"\n CONFIG_TEMPLATE24_POST = \"\"\"\\\n <Directory {0}>\n Require all granted\n </Directory>\n <Location /.well-known/acme-challenge>\n Require all granted\n </Location>\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.challenge_conf_pre = os.path.join(\n self.configurator.conf(\"challenge-location\"),\n \"le_http_01_challenge_pre.conf\")\n self.challenge_conf_post = os.path.join(\n self.configurator.conf(\"challenge-location\"),\n \"le_http_01_challenge_post.conf\")\n self.challenge_dir = os.path.join(\n self.configurator.config.work_dir,\n \"http_challenges\")\n self.moded_vhosts: Set[VirtualHost] = set()\n\n def perform(self):\n \"\"\"Perform all HTTP-01 challenges.\"\"\"\n if not self.achalls:\n return []\n # Save any changes to the configuration as a precaution\n # About to make temporary changes to the config\n self.configurator.save(\"Changes before challenge setup\", True)\n\n self.configurator.ensure_listen(str(\n self.configurator.config.http01_port))\n self.prepare_http01_modules()\n\n responses = self._set_up_challenges()\n\n self._mod_config()\n # Save reversible changes\n self.configurator.save(\"HTTP Challenge\", True)\n\n return responses\n\n def prepare_http01_modules(self):\n \"\"\"Make sure that we have the needed modules available for http01\"\"\"\n\n if self.configurator.conf(\"handle-modules\"):\n needed_modules = [\"rewrite\"]\n if self.configurator.version < (2, 4):\n needed_modules.append(\"authz_host\")\n else:\n needed_modules.append(\"authz_core\")\n for mod in needed_modules:\n if mod + \"_module\" not in self.configurator.parser.modules:\n self.configurator.enable_mod(mod, temp=True)\n\n def _mod_config(self):\n selected_vhosts: List[VirtualHost] = []\n http_port = str(self.configurator.config.http01_port)\n for chall in self.achalls:\n # Search for matching VirtualHosts\n for vh in self._matching_vhosts(chall.domain):\n selected_vhosts.append(vh)\n\n # Ensure that we have one or more VirtualHosts that we can continue\n # with. (one that listens to port configured with --http-01-port)\n found = False\n for vhost in selected_vhosts:\n if any(a.is_wildcard() or a.get_port() == http_port for a in vhost.addrs):\n found = True\n\n if not found:\n for vh in self._relevant_vhosts():\n selected_vhosts.append(vh)\n\n # Add the challenge configuration\n for vh in selected_vhosts:\n self._set_up_include_directives(vh)\n\n self.configurator.reverter.register_file_creation(\n True, self.challenge_conf_pre)\n self.configurator.reverter.register_file_creation(\n True, self.challenge_conf_post)\n\n if self.configurator.version < (2, 4):\n config_template_pre = self.CONFIG_TEMPLATE22_PRE\n config_template_post = self.CONFIG_TEMPLATE22_POST\n else:\n config_template_pre = self.CONFIG_TEMPLATE24_PRE\n config_template_post = self.CONFIG_TEMPLATE24_POST\n\n config_text_pre = config_template_pre.format(self.challenge_dir)\n config_text_post = config_template_post.format(self.challenge_dir)\n\n logger.debug(\"writing a pre config file with text:\\n %s\", config_text_pre)\n with open(self.challenge_conf_pre, \"w\") as new_conf:\n new_conf.write(config_text_pre)\n logger.debug(\"writing a post config file with text:\\n %s\", config_text_post)\n with open(self.challenge_conf_post, \"w\") as new_conf:\n new_conf.write(config_text_post)\n\n def _matching_vhosts(self, domain):\n \"\"\"Return all VirtualHost objects that have the requested domain name or\n a wildcard name that would match the domain in ServerName or ServerAlias\n directive.\n \"\"\"\n matching_vhosts = []\n for vhost in self.configurator.vhosts:\n if self.configurator.domain_in_names(vhost.get_names(), domain):\n # domain_in_names also matches the exact names, so no need\n # to check \"domain in vhost.get_names()\" explicitly here\n matching_vhosts.append(vhost)\n\n return matching_vhosts\n\n def _relevant_vhosts(self):\n http01_port = str(self.configurator.config.http01_port)\n relevant_vhosts = []\n for vhost in self.configurator.vhosts:\n if any(a.is_wildcard() or a.get_port() == http01_port for a in vhost.addrs):\n if not vhost.ssl:\n relevant_vhosts.append(vhost)\n if not relevant_vhosts:\n raise errors.PluginError(\n \"Unable to find a virtual host listening on port {0} which is\"\n \" currently needed for Certbot to prove to the CA that you\"\n \" control your domain. Please add a virtual host for port\"\n \" {0}.\".format(http01_port))\n\n return relevant_vhosts\n\n def _set_up_challenges(self):\n if not os.path.isdir(self.challenge_dir):\n old_umask = filesystem.umask(0o022)\n try:\n filesystem.makedirs(self.challenge_dir, 0o755)\n except OSError as exception:\n if exception.errno not in (errno.EEXIST, errno.EISDIR):\n raise errors.PluginError(\n \"Couldn't create root for http-01 challenge\")\n finally:\n filesystem.umask(old_umask)\n\n responses = []\n for achall in self.achalls:\n responses.append(self._set_up_challenge(achall))\n\n return responses\n\n def _set_up_challenge(self, achall):\n response, validation = achall.response_and_validation()\n\n name = os.path.join(self.challenge_dir, achall.chall.encode(\"token\"))\n\n self.configurator.reverter.register_file_creation(True, name)\n with open(name, 'wb') as f:\n f.write(validation.encode())\n filesystem.chmod(name, 0o644)\n\n return response\n\n def _set_up_include_directives(self, vhost):\n \"\"\"Includes override configuration to the beginning and to the end of\n VirtualHost. Note that this include isn't added to Augeas search tree\"\"\"\n\n if vhost not in self.moded_vhosts:\n logger.debug(\n \"Adding a temporary challenge validation Include for name: %s in: %s\",\n vhost.name, vhost.filep)\n self.configurator.parser.add_dir_beginning(\n vhost.path, \"Include\", self.challenge_conf_pre)\n self.configurator.parser.add_dir(\n vhost.path, \"Include\", self.challenge_conf_post)\n\n if not vhost.enabled:\n self.configurator.parser.add_dir(\n get_aug_path(self.configurator.parser.loc[\"default\"]),\n \"Include\", vhost.filep)\n\n self.moded_vhosts.add(vhost)\n", "path": "certbot-apache/certbot_apache/_internal/http_01.py"}]} | 3,038 | 515 |
gh_patches_debug_41884 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-2358 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Generated manifests for OpenSearch 2.x need custom JAVA_HOME
### Is your feature request related to a problem? Please describe
Automation opened a PR to generate the OpenSearch 3.0 manifest. That manifest was missing `JAVA_HOME` that was added manually in https://github.com/opensearch-project/opensearch-build/pull/2042.
### Describe the solution you'd like
Add a way to customize generated manifests for various products/versions, add `JAVA_HOME` to the OpenSearch ones.
### Describe alternatives you've considered
Keep doing it manually.
### Additional context
_No response_
</issue>
<code>
[start of src/manifests_workflow/input_manifests.py]
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import glob
8 import logging
9 import os
10 import re
11 from abc import abstractmethod
12 from typing import Dict, List, Type, Union
13
14 from manifests.input_manifest import InputManifest
15 from manifests.manifests import Manifests
16 from manifests_workflow.component_opensearch import ComponentOpenSearch
17 from manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin
18 from manifests_workflow.component_opensearch_min import ComponentOpenSearchMin
19 from system.temporary_directory import TemporaryDirectory
20
21
22 class InputManifests(Manifests):
23 def __init__(self, name: str) -> None:
24 self.name = name
25 self.prefix = name.lower().replace(" ", "-")
26 super().__init__(InputManifest, InputManifests.files(self.prefix))
27
28 @classmethod
29 def manifests_path(self) -> str:
30 return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "manifests"))
31
32 @classmethod
33 def jenkins_path(self) -> str:
34 return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "jenkins"))
35
36 @classmethod
37 def cron_jenkinsfile(self) -> str:
38 return os.path.join(self.jenkins_path(), "check-for-build.jenkinsfile")
39
40 @classmethod
41 def files(self, name: str) -> List:
42 results = []
43 for filename in glob.glob(os.path.join(self.manifests_path(), f"**/{name}-*.yml")):
44 # avoids the -maven manifest
45 match = re.search(rf"^{name}-([0-9.]*).yml$", os.path.basename(filename))
46 if match:
47 results.append(filename)
48 return results
49
50 @abstractmethod
51 def update(self, min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]], component_klass: Type[ComponentOpenSearch], keep: bool = False) -> None:
52 known_versions = self.versions
53 logging.info(f"Known versions: {known_versions}")
54 main_versions: Dict = {}
55 with TemporaryDirectory(keep=keep, chdir=True) as work_dir:
56 logging.info(f"Checking out components into {work_dir.name}")
57
58 # check out and build #main, 1.x, etc.
59 branches = min_klass.branches()
60
61 logging.info(f"Checking {self.name} {branches} branches")
62 for branch in branches:
63 c = min_klass.checkout(
64 path=os.path.join(work_dir.name, self.name.replace(" ", ""), branch),
65 branch=branch,
66 )
67
68 version = c.version
69 logging.info(f"{self.name}#{branch} is version {version}")
70 if version not in main_versions.keys():
71 main_versions[version] = [c]
72
73 if component_klass is not None:
74 # components can increment their own version first without incrementing min
75 manifest = self.latest
76 logging.info(f"Examining components in the latest manifest of {manifest.build.name} ({manifest.build.version})")
77 for component in manifest.components.values():
78 if component.name == self.name:
79 continue
80
81 logging.info(f"Checking out {component.name}#main")
82 component = component_klass.checkout(
83 name=component.name,
84 path=os.path.join(work_dir.name, component.name),
85 opensearch_version=manifest.build.version,
86 branch="main",
87 )
88
89 component_version = component.version
90 if component_version:
91 release_version = ".".join(component_version.split(".")[:3])
92 if release_version not in main_versions.keys():
93 main_versions[release_version] = []
94 main_versions[release_version].append(component)
95 logging.info(f"{component.name}#main is version {release_version} (from {component_version})")
96
97 # summarize
98 logging.info("Found versions on main:")
99 for main_version in main_versions.keys():
100 for component in main_versions[main_version]:
101 logging.info(f" {component.name}={main_version}")
102
103 # generate new manifests
104 for release_version in sorted(main_versions.keys() - known_versions):
105 self.write_manifest(release_version, main_versions[release_version])
106 self.add_to_cron(release_version)
107
108 def create_manifest(self, version: str, components: List = []) -> InputManifest:
109 image_map = {
110 "opensearch": "opensearchstaging/ci-runner:ci-runner-centos7-opensearch-build-v2",
111 "opensearch-dashboards": "opensearchstaging/ci-runner:ci-runner-centos7-opensearch-dashboards-build-v2"
112 }
113
114 data: Dict = {
115 "schema-version": "1.0",
116 "build": {
117 "name": self.name,
118 "version": version
119 },
120 "ci": {
121 "image": {
122 "name": image_map[self.prefix]
123 }
124 },
125 "components": [],
126 }
127
128 for component in components:
129 logging.info(f" Adding {component.name}")
130 data["components"].append(component.to_dict())
131
132 return InputManifest(data)
133
134 def write_manifest(self, version: str, components: List = []) -> None:
135 logging.info(f"Creating new version: {version}")
136 manifest = self.create_manifest(version, components)
137 manifest_dir = os.path.join(self.manifests_path(), version)
138 os.makedirs(manifest_dir, exist_ok=True)
139 manifest_path = os.path.join(manifest_dir, f"{self.prefix}-{version}.yml")
140 manifest.to_file(manifest_path)
141 logging.info(f"Wrote {manifest_path}")
142
143 def add_to_cron(self, version: str) -> None:
144 logging.info(f"Adding new version to cron: {version}")
145 jenkinsfile = self.cron_jenkinsfile()
146 with open(jenkinsfile, "r") as f:
147 data = f.read()
148
149 cron_entry = f"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;TARGET_JOB_NAME=distribution-build-{self.prefix}\n"
150
151 if cron_entry in data:
152 raise ValueError(f"{jenkinsfile} already contains an entry for {self.prefix} {version}")
153
154 data = data.replace(
155 "parameterizedCron '''\n",
156 f"parameterizedCron '''\n{' ' * 12}{cron_entry}"
157 )
158
159 with open(jenkinsfile, "w") as f:
160 f.write(data)
161
162 logging.info(f"Wrote {jenkinsfile}")
163
[end of src/manifests_workflow/input_manifests.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/manifests_workflow/input_manifests.py b/src/manifests_workflow/input_manifests.py
--- a/src/manifests_workflow/input_manifests.py
+++ b/src/manifests_workflow/input_manifests.py
@@ -11,7 +11,7 @@
from abc import abstractmethod
from typing import Dict, List, Type, Union
-from manifests.input_manifest import InputManifest
+from manifests.input_manifest import InputComponents, InputManifest
from manifests.manifests import Manifests
from manifests_workflow.component_opensearch import ComponentOpenSearch
from manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin
@@ -48,7 +48,12 @@
return results
@abstractmethod
- def update(self, min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]], component_klass: Type[ComponentOpenSearch], keep: bool = False) -> None:
+ def update(
+ self,
+ min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]],
+ component_klass: Type[ComponentOpenSearch],
+ keep: bool = False,
+ ) -> None:
known_versions = self.versions
logging.info(f"Known versions: {known_versions}")
main_versions: Dict = {}
@@ -106,30 +111,23 @@
self.add_to_cron(release_version)
def create_manifest(self, version: str, components: List = []) -> InputManifest:
- image_map = {
- "opensearch": "opensearchstaging/ci-runner:ci-runner-centos7-opensearch-build-v2",
- "opensearch-dashboards": "opensearchstaging/ci-runner:ci-runner-centos7-opensearch-dashboards-build-v2"
- }
-
- data: Dict = {
- "schema-version": "1.0",
- "build": {
- "name": self.name,
- "version": version
- },
- "ci": {
- "image": {
- "name": image_map[self.prefix]
- }
- },
- "components": [],
- }
+ templates_base_path = os.path.join(self.manifests_path(), "templates")
+ template_version_folder = version.split(".")[0] + ".x"
+ template_full_path = os.path.join(templates_base_path, self.prefix, template_version_folder, "manifest.yml")
+ if not os.path.exists(template_full_path):
+ template_full_path = os.path.join(templates_base_path, self.prefix, "default", "manifest.yml")
+
+ manifest = InputManifest.from_file(open(template_full_path))
+
+ manifest.build.version = version
+ manifests_components = []
for component in components:
logging.info(f" Adding {component.name}")
- data["components"].append(component.to_dict())
+ manifests_components.append(component.to_dict())
- return InputManifest(data)
+ manifest.components = InputComponents(manifests_components) # type: ignore
+ return manifest
def write_manifest(self, version: str, components: List = []) -> None:
logging.info(f"Creating new version: {version}")
@@ -151,10 +149,7 @@
if cron_entry in data:
raise ValueError(f"{jenkinsfile} already contains an entry for {self.prefix} {version}")
- data = data.replace(
- "parameterizedCron '''\n",
- f"parameterizedCron '''\n{' ' * 12}{cron_entry}"
- )
+ data = data.replace("parameterizedCron '''\n", f"parameterizedCron '''\n{' ' * 12}{cron_entry}")
with open(jenkinsfile, "w") as f:
f.write(data)
| {"golden_diff": "diff --git a/src/manifests_workflow/input_manifests.py b/src/manifests_workflow/input_manifests.py\n--- a/src/manifests_workflow/input_manifests.py\n+++ b/src/manifests_workflow/input_manifests.py\n@@ -11,7 +11,7 @@\n from abc import abstractmethod\n from typing import Dict, List, Type, Union\n \n-from manifests.input_manifest import InputManifest\n+from manifests.input_manifest import InputComponents, InputManifest\n from manifests.manifests import Manifests\n from manifests_workflow.component_opensearch import ComponentOpenSearch\n from manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin\n@@ -48,7 +48,12 @@\n return results\n \n @abstractmethod\n- def update(self, min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]], component_klass: Type[ComponentOpenSearch], keep: bool = False) -> None:\n+ def update(\n+ self,\n+ min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]],\n+ component_klass: Type[ComponentOpenSearch],\n+ keep: bool = False,\n+ ) -> None:\n known_versions = self.versions\n logging.info(f\"Known versions: {known_versions}\")\n main_versions: Dict = {}\n@@ -106,30 +111,23 @@\n self.add_to_cron(release_version)\n \n def create_manifest(self, version: str, components: List = []) -> InputManifest:\n- image_map = {\n- \"opensearch\": \"opensearchstaging/ci-runner:ci-runner-centos7-opensearch-build-v2\",\n- \"opensearch-dashboards\": \"opensearchstaging/ci-runner:ci-runner-centos7-opensearch-dashboards-build-v2\"\n- }\n-\n- data: Dict = {\n- \"schema-version\": \"1.0\",\n- \"build\": {\n- \"name\": self.name,\n- \"version\": version\n- },\n- \"ci\": {\n- \"image\": {\n- \"name\": image_map[self.prefix]\n- }\n- },\n- \"components\": [],\n- }\n+ templates_base_path = os.path.join(self.manifests_path(), \"templates\")\n+ template_version_folder = version.split(\".\")[0] + \".x\"\n+ template_full_path = os.path.join(templates_base_path, self.prefix, template_version_folder, \"manifest.yml\")\n+ if not os.path.exists(template_full_path):\n+ template_full_path = os.path.join(templates_base_path, self.prefix, \"default\", \"manifest.yml\")\n+\n+ manifest = InputManifest.from_file(open(template_full_path))\n+\n+ manifest.build.version = version\n+ manifests_components = []\n \n for component in components:\n logging.info(f\" Adding {component.name}\")\n- data[\"components\"].append(component.to_dict())\n+ manifests_components.append(component.to_dict())\n \n- return InputManifest(data)\n+ manifest.components = InputComponents(manifests_components) # type: ignore\n+ return manifest\n \n def write_manifest(self, version: str, components: List = []) -> None:\n logging.info(f\"Creating new version: {version}\")\n@@ -151,10 +149,7 @@\n if cron_entry in data:\n raise ValueError(f\"{jenkinsfile} already contains an entry for {self.prefix} {version}\")\n \n- data = data.replace(\n- \"parameterizedCron '''\\n\",\n- f\"parameterizedCron '''\\n{' ' * 12}{cron_entry}\"\n- )\n+ data = data.replace(\"parameterizedCron '''\\n\", f\"parameterizedCron '''\\n{' ' * 12}{cron_entry}\")\n \n with open(jenkinsfile, \"w\") as f:\n f.write(data)\n", "issue": "Generated manifests for OpenSearch 2.x need custom JAVA_HOME\n### Is your feature request related to a problem? Please describe\n\nAutomation opened a PR to generate the OpenSearch 3.0 manifest. That manifest was missing `JAVA_HOME` that was added manually in https://github.com/opensearch-project/opensearch-build/pull/2042.\n\n### Describe the solution you'd like\n\nAdd a way to customize generated manifests for various products/versions, add `JAVA_HOME` to the OpenSearch ones.\n\n### Describe alternatives you've considered\n\nKeep doing it manually.\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport glob\nimport logging\nimport os\nimport re\nfrom abc import abstractmethod\nfrom typing import Dict, List, Type, Union\n\nfrom manifests.input_manifest import InputManifest\nfrom manifests.manifests import Manifests\nfrom manifests_workflow.component_opensearch import ComponentOpenSearch\nfrom manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin\nfrom manifests_workflow.component_opensearch_min import ComponentOpenSearchMin\nfrom system.temporary_directory import TemporaryDirectory\n\n\nclass InputManifests(Manifests):\n def __init__(self, name: str) -> None:\n self.name = name\n self.prefix = name.lower().replace(\" \", \"-\")\n super().__init__(InputManifest, InputManifests.files(self.prefix))\n\n @classmethod\n def manifests_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"manifests\"))\n\n @classmethod\n def jenkins_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"jenkins\"))\n\n @classmethod\n def cron_jenkinsfile(self) -> str:\n return os.path.join(self.jenkins_path(), \"check-for-build.jenkinsfile\")\n\n @classmethod\n def files(self, name: str) -> List:\n results = []\n for filename in glob.glob(os.path.join(self.manifests_path(), f\"**/{name}-*.yml\")):\n # avoids the -maven manifest\n match = re.search(rf\"^{name}-([0-9.]*).yml$\", os.path.basename(filename))\n if match:\n results.append(filename)\n return results\n\n @abstractmethod\n def update(self, min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]], component_klass: Type[ComponentOpenSearch], keep: bool = False) -> None:\n known_versions = self.versions\n logging.info(f\"Known versions: {known_versions}\")\n main_versions: Dict = {}\n with TemporaryDirectory(keep=keep, chdir=True) as work_dir:\n logging.info(f\"Checking out components into {work_dir.name}\")\n\n # check out and build #main, 1.x, etc.\n branches = min_klass.branches()\n\n logging.info(f\"Checking {self.name} {branches} branches\")\n for branch in branches:\n c = min_klass.checkout(\n path=os.path.join(work_dir.name, self.name.replace(\" \", \"\"), branch),\n branch=branch,\n )\n\n version = c.version\n logging.info(f\"{self.name}#{branch} is version {version}\")\n if version not in main_versions.keys():\n main_versions[version] = [c]\n\n if component_klass is not None:\n # components can increment their own version first without incrementing min\n manifest = self.latest\n logging.info(f\"Examining components in the latest manifest of {manifest.build.name} ({manifest.build.version})\")\n for component in manifest.components.values():\n if component.name == self.name:\n continue\n\n logging.info(f\"Checking out {component.name}#main\")\n component = component_klass.checkout(\n name=component.name,\n path=os.path.join(work_dir.name, component.name),\n opensearch_version=manifest.build.version,\n branch=\"main\",\n )\n\n component_version = component.version\n if component_version:\n release_version = \".\".join(component_version.split(\".\")[:3])\n if release_version not in main_versions.keys():\n main_versions[release_version] = []\n main_versions[release_version].append(component)\n logging.info(f\"{component.name}#main is version {release_version} (from {component_version})\")\n\n # summarize\n logging.info(\"Found versions on main:\")\n for main_version in main_versions.keys():\n for component in main_versions[main_version]:\n logging.info(f\" {component.name}={main_version}\")\n\n # generate new manifests\n for release_version in sorted(main_versions.keys() - known_versions):\n self.write_manifest(release_version, main_versions[release_version])\n self.add_to_cron(release_version)\n\n def create_manifest(self, version: str, components: List = []) -> InputManifest:\n image_map = {\n \"opensearch\": \"opensearchstaging/ci-runner:ci-runner-centos7-opensearch-build-v2\",\n \"opensearch-dashboards\": \"opensearchstaging/ci-runner:ci-runner-centos7-opensearch-dashboards-build-v2\"\n }\n\n data: Dict = {\n \"schema-version\": \"1.0\",\n \"build\": {\n \"name\": self.name,\n \"version\": version\n },\n \"ci\": {\n \"image\": {\n \"name\": image_map[self.prefix]\n }\n },\n \"components\": [],\n }\n\n for component in components:\n logging.info(f\" Adding {component.name}\")\n data[\"components\"].append(component.to_dict())\n\n return InputManifest(data)\n\n def write_manifest(self, version: str, components: List = []) -> None:\n logging.info(f\"Creating new version: {version}\")\n manifest = self.create_manifest(version, components)\n manifest_dir = os.path.join(self.manifests_path(), version)\n os.makedirs(manifest_dir, exist_ok=True)\n manifest_path = os.path.join(manifest_dir, f\"{self.prefix}-{version}.yml\")\n manifest.to_file(manifest_path)\n logging.info(f\"Wrote {manifest_path}\")\n\n def add_to_cron(self, version: str) -> None:\n logging.info(f\"Adding new version to cron: {version}\")\n jenkinsfile = self.cron_jenkinsfile()\n with open(jenkinsfile, \"r\") as f:\n data = f.read()\n\n cron_entry = f\"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;TARGET_JOB_NAME=distribution-build-{self.prefix}\\n\"\n\n if cron_entry in data:\n raise ValueError(f\"{jenkinsfile} already contains an entry for {self.prefix} {version}\")\n\n data = data.replace(\n \"parameterizedCron '''\\n\",\n f\"parameterizedCron '''\\n{' ' * 12}{cron_entry}\"\n )\n\n with open(jenkinsfile, \"w\") as f:\n f.write(data)\n\n logging.info(f\"Wrote {jenkinsfile}\")\n", "path": "src/manifests_workflow/input_manifests.py"}]} | 2,481 | 849 |
gh_patches_debug_39707 | rasdani/github-patches | git_diff | kubeflow__pipelines-4831 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
VolumeOp doesn't support GC after workflow deletion
The PVC created from VolumeOp is not automatically recycled after workflow is deleted.
I wonder if owner reference can work with PVC when it is set with workflow's reference. If it works, we can provide an option in volumeop to set it when creating the PVC.
If it doesn't work, we should provide a way to either use Argo's volume API or support a delete PVC op in exit handler.
VolumeOp doesn't support GC after workflow deletion
The PVC created from VolumeOp is not automatically recycled after workflow is deleted.
I wonder if owner reference can work with PVC when it is set with workflow's reference. If it works, we can provide an option in volumeop to set it when creating the PVC.
If it doesn't work, we should provide a way to either use Argo's volume API or support a delete PVC op in exit handler.
</issue>
<code>
[start of sdk/python/kfp/dsl/_resource_op.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Dict, List, Optional
16 import warnings
17
18 from ._container_op import BaseOp, ContainerOp
19 from . import _pipeline_param
20
21
22 class Resource(object):
23 """
24 A wrapper over Argo ResourceTemplate definition object
25 (io.argoproj.workflow.v1alpha1.ResourceTemplate)
26 which is used to represent the `resource` property in argo's workflow
27 template (io.argoproj.workflow.v1alpha1.Template).
28 """
29 swagger_types = {
30 "action": "str",
31 "merge_strategy": "str",
32 "success_condition": "str",
33 "failure_condition": "str",
34 "manifest": "str",
35 "flags": "list[str]"
36 }
37 openapi_types = {
38 "action": "str",
39 "merge_strategy": "str",
40 "success_condition": "str",
41 "failure_condition": "str",
42 "manifest": "str",
43 "flags": "list[str]"
44 }
45 attribute_map = {
46 "action": "action",
47 "merge_strategy": "mergeStrategy",
48 "success_condition": "successCondition",
49 "failure_condition": "failureCondition",
50 "manifest": "manifest",
51 "flags": "flags"
52 }
53
54 def __init__(self,
55 action: str = None,
56 merge_strategy: str = None,
57 success_condition: str = None,
58 failure_condition: str = None,
59 manifest: str = None,
60 flags: Optional[List[str]] = None):
61 """Create a new instance of Resource"""
62 self.action = action
63 self.merge_strategy = merge_strategy
64 self.success_condition = success_condition
65 self.failure_condition = failure_condition
66 self.manifest = manifest
67 self.flags = flags
68
69
70 class ResourceOp(BaseOp):
71 """Represents an op which will be translated into a resource template.
72
73 TODO(https://github.com/kubeflow/pipelines/issues/4822): Determine the
74 stability level of this feature.
75
76 Args:
77 k8s_resource: A k8s resource which will be submitted to the cluster
78 action: One of "create"/"delete"/"apply"/"patch" (default is "create")
79 merge_strategy: The merge strategy for the "apply" action
80 success_condition: The successCondition of the template
81 failure_condition: The failureCondition of the template
82 For more info see:
83 https://github.com/argoproj/argo/blob/master/examples/k8s-jobs.yaml
84 attribute_outputs: Maps output labels to resource's json paths,
85 similarly to file_outputs of ContainerOp
86 kwargs: name, sidecars. See BaseOp definition
87
88 Raises:
89 ValueError: if not inside a pipeline
90 if the name is an invalid string
91 if no k8s_resource is provided
92 if merge_strategy is set without "apply" action
93 """
94
95 def __init__(self,
96 k8s_resource=None,
97 action: str = "create",
98 merge_strategy: str = None,
99 success_condition: str = None,
100 failure_condition: str = None,
101 attribute_outputs: Optional[Dict[str, str]] = None,
102 flags: Optional[List[str]] = None,
103 **kwargs):
104
105 super().__init__(**kwargs)
106 self.attrs_with_pipelineparams = list(self.attrs_with_pipelineparams)
107 self.attrs_with_pipelineparams.extend(
108 ["_resource", "k8s_resource", "attribute_outputs"])
109
110 if k8s_resource is None:
111 raise ValueError("You need to provide a k8s_resource.")
112
113 if action == "delete":
114 warnings.warn(
115 'Please use `kubernetes_resource_delete_op` instead of '
116 '`ResourceOp(action="delete")`',
117 DeprecationWarning)
118
119 if merge_strategy and action != "apply":
120 raise ValueError("You can't set merge_strategy when action != 'apply'")
121
122 # if action is delete, there should not be any outputs, success_condition,
123 # and failure_condition
124 if action == "delete" and (success_condition or failure_condition or
125 attribute_outputs):
126 raise ValueError(
127 "You can't set success_condition, failure_condition, or "
128 "attribute_outputs when action == 'delete'"
129 )
130
131 if action == "delete" and flags is None:
132 flags = ["--wait=false"]
133 init_resource = {
134 "action": action,
135 "merge_strategy": merge_strategy,
136 "success_condition": success_condition,
137 "failure_condition": failure_condition,
138 "flags": flags
139 }
140 # `resource` prop in `io.argoproj.workflow.v1alpha1.Template`
141 self._resource = Resource(**init_resource)
142
143 self.k8s_resource = k8s_resource
144
145 # if action is delete, there should not be any outputs, success_condition,
146 # and failure_condition
147 if action == "delete":
148 self.attribute_outputs = {}
149 self.outputs = {}
150 self.output = None
151 return
152
153 # Set attribute_outputs
154 extra_attribute_outputs = \
155 attribute_outputs if attribute_outputs else {}
156 self.attribute_outputs = \
157 self.attribute_outputs if hasattr(self, "attribute_outputs") \
158 else {}
159 self.attribute_outputs.update(extra_attribute_outputs)
160 # Add name and manifest if not specified by the user
161 if "name" not in self.attribute_outputs:
162 self.attribute_outputs["name"] = "{.metadata.name}"
163 if "manifest" not in self.attribute_outputs:
164 self.attribute_outputs["manifest"] = "{}"
165
166 # Set outputs
167 self.outputs = {
168 name: _pipeline_param.PipelineParam(name, op_name=self.name)
169 for name in self.attribute_outputs.keys()
170 }
171 # If user set a single attribute_output, set self.output as that
172 # parameter, else set it as the resource name
173 self.output = self.outputs["name"]
174 if len(extra_attribute_outputs) == 1:
175 self.output = self.outputs[list(extra_attribute_outputs)[0]]
176
177 @property
178 def resource(self):
179 """`Resource` object that represents the `resource` property in `io.argoproj.workflow.v1alpha1.Template`.
180 """
181 return self._resource
182
183 def delete(self, flags: Optional[List[str]] = None):
184 """Returns a ResourceOp which deletes the resource."""
185 if self.resource.action == "delete":
186 raise ValueError("This operation is already a resource deletion.")
187
188 if isinstance(self.k8s_resource, dict):
189 kind = self.k8s_resource["kind"]
190 else:
191 kind = self.k8s_resource.kind
192
193 return kubernetes_resource_delete_op(
194 name=self.outputs["name"], kind=kind, flags=flags or ["--wait=false"])
195
196
197 def kubernetes_resource_delete_op(
198 name: str,
199 kind: str,
200 namespace: str = None,
201 flags: Optional[List[str]] = None,
202 ) -> ContainerOp:
203 """Operation that deletes a Kubernetes resource.
204
205 Outputs:
206 name: The name of the deleted resource
207 """
208
209 command = [
210 "kubectl", "delete",
211 str(kind),
212 str(name), "--ignore-not-found", "--output", "name"
213 ]
214 if namespace:
215 command.extend(["--namespace", str(namespace)])
216 if flags:
217 command.extend(flags)
218
219 result = ContainerOp(
220 name="kubernetes_resource_delete",
221 image="gcr.io/cloud-builders/kubectl",
222 command=command,
223 )
224 return result
225
[end of sdk/python/kfp/dsl/_resource_op.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/kfp/dsl/_resource_op.py b/sdk/python/kfp/dsl/_resource_op.py
--- a/sdk/python/kfp/dsl/_resource_op.py
+++ b/sdk/python/kfp/dsl/_resource_op.py
@@ -31,6 +31,7 @@
"merge_strategy": "str",
"success_condition": "str",
"failure_condition": "str",
+ "set_owner_reference": "bool",
"manifest": "str",
"flags": "list[str]"
}
@@ -39,6 +40,7 @@
"merge_strategy": "str",
"success_condition": "str",
"failure_condition": "str",
+ "set_owner_reference": "bool",
"manifest": "str",
"flags": "list[str]"
}
@@ -47,6 +49,7 @@
"merge_strategy": "mergeStrategy",
"success_condition": "successCondition",
"failure_condition": "failureCondition",
+ "set_owner_reference": "setOwnerReference",
"manifest": "manifest",
"flags": "flags"
}
@@ -56,6 +59,7 @@
merge_strategy: str = None,
success_condition: str = None,
failure_condition: str = None,
+ set_owner_reference: bool = None,
manifest: str = None,
flags: Optional[List[str]] = None):
"""Create a new instance of Resource"""
@@ -63,6 +67,7 @@
self.merge_strategy = merge_strategy
self.success_condition = success_condition
self.failure_condition = failure_condition
+ self.set_owner_reference = set_owner_reference
self.manifest = manifest
self.flags = flags
@@ -98,6 +103,7 @@
merge_strategy: str = None,
success_condition: str = None,
failure_condition: str = None,
+ set_owner_reference: bool = None,
attribute_outputs: Optional[Dict[str, str]] = None,
flags: Optional[List[str]] = None,
**kwargs):
@@ -135,6 +141,7 @@
"merge_strategy": merge_strategy,
"success_condition": success_condition,
"failure_condition": failure_condition,
+ "set_owner_reference": set_owner_reference,
"flags": flags
}
# `resource` prop in `io.argoproj.workflow.v1alpha1.Template`
| {"golden_diff": "diff --git a/sdk/python/kfp/dsl/_resource_op.py b/sdk/python/kfp/dsl/_resource_op.py\n--- a/sdk/python/kfp/dsl/_resource_op.py\n+++ b/sdk/python/kfp/dsl/_resource_op.py\n@@ -31,6 +31,7 @@\n \"merge_strategy\": \"str\",\n \"success_condition\": \"str\",\n \"failure_condition\": \"str\",\n+ \"set_owner_reference\": \"bool\",\n \"manifest\": \"str\",\n \"flags\": \"list[str]\"\n }\n@@ -39,6 +40,7 @@\n \"merge_strategy\": \"str\",\n \"success_condition\": \"str\",\n \"failure_condition\": \"str\",\n+ \"set_owner_reference\": \"bool\",\n \"manifest\": \"str\",\n \"flags\": \"list[str]\"\n }\n@@ -47,6 +49,7 @@\n \"merge_strategy\": \"mergeStrategy\",\n \"success_condition\": \"successCondition\",\n \"failure_condition\": \"failureCondition\",\n+ \"set_owner_reference\": \"setOwnerReference\",\n \"manifest\": \"manifest\",\n \"flags\": \"flags\"\n }\n@@ -56,6 +59,7 @@\n merge_strategy: str = None,\n success_condition: str = None,\n failure_condition: str = None,\n+ set_owner_reference: bool = None,\n manifest: str = None,\n flags: Optional[List[str]] = None):\n \"\"\"Create a new instance of Resource\"\"\"\n@@ -63,6 +67,7 @@\n self.merge_strategy = merge_strategy\n self.success_condition = success_condition\n self.failure_condition = failure_condition\n+ self.set_owner_reference = set_owner_reference\n self.manifest = manifest\n self.flags = flags\n \n@@ -98,6 +103,7 @@\n merge_strategy: str = None,\n success_condition: str = None,\n failure_condition: str = None,\n+ set_owner_reference: bool = None,\n attribute_outputs: Optional[Dict[str, str]] = None,\n flags: Optional[List[str]] = None,\n **kwargs):\n@@ -135,6 +141,7 @@\n \"merge_strategy\": merge_strategy,\n \"success_condition\": success_condition,\n \"failure_condition\": failure_condition,\n+ \"set_owner_reference\": set_owner_reference,\n \"flags\": flags\n }\n # `resource` prop in `io.argoproj.workflow.v1alpha1.Template`\n", "issue": "VolumeOp doesn't support GC after workflow deletion\nThe PVC created from VolumeOp is not automatically recycled after workflow is deleted.\r\n\r\nI wonder if owner reference can work with PVC when it is set with workflow's reference. If it works, we can provide an option in volumeop to set it when creating the PVC.\r\n\r\nIf it doesn't work, we should provide a way to either use Argo's volume API or support a delete PVC op in exit handler.\nVolumeOp doesn't support GC after workflow deletion\nThe PVC created from VolumeOp is not automatically recycled after workflow is deleted.\r\n\r\nI wonder if owner reference can work with PVC when it is set with workflow's reference. If it works, we can provide an option in volumeop to set it when creating the PVC.\r\n\r\nIf it doesn't work, we should provide a way to either use Argo's volume API or support a delete PVC op in exit handler.\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, List, Optional\nimport warnings\n\nfrom ._container_op import BaseOp, ContainerOp\nfrom . import _pipeline_param\n\n\nclass Resource(object):\n \"\"\"\n A wrapper over Argo ResourceTemplate definition object\n (io.argoproj.workflow.v1alpha1.ResourceTemplate)\n which is used to represent the `resource` property in argo's workflow\n template (io.argoproj.workflow.v1alpha1.Template).\n \"\"\"\n swagger_types = {\n \"action\": \"str\",\n \"merge_strategy\": \"str\",\n \"success_condition\": \"str\",\n \"failure_condition\": \"str\",\n \"manifest\": \"str\",\n \"flags\": \"list[str]\"\n }\n openapi_types = {\n \"action\": \"str\",\n \"merge_strategy\": \"str\",\n \"success_condition\": \"str\",\n \"failure_condition\": \"str\",\n \"manifest\": \"str\",\n \"flags\": \"list[str]\"\n }\n attribute_map = {\n \"action\": \"action\",\n \"merge_strategy\": \"mergeStrategy\",\n \"success_condition\": \"successCondition\",\n \"failure_condition\": \"failureCondition\",\n \"manifest\": \"manifest\",\n \"flags\": \"flags\"\n }\n\n def __init__(self,\n action: str = None,\n merge_strategy: str = None,\n success_condition: str = None,\n failure_condition: str = None,\n manifest: str = None,\n flags: Optional[List[str]] = None):\n \"\"\"Create a new instance of Resource\"\"\"\n self.action = action\n self.merge_strategy = merge_strategy\n self.success_condition = success_condition\n self.failure_condition = failure_condition\n self.manifest = manifest\n self.flags = flags\n\n\nclass ResourceOp(BaseOp):\n \"\"\"Represents an op which will be translated into a resource template.\n\n TODO(https://github.com/kubeflow/pipelines/issues/4822): Determine the\n stability level of this feature.\n\n Args:\n k8s_resource: A k8s resource which will be submitted to the cluster\n action: One of \"create\"/\"delete\"/\"apply\"/\"patch\" (default is \"create\")\n merge_strategy: The merge strategy for the \"apply\" action\n success_condition: The successCondition of the template\n failure_condition: The failureCondition of the template\n For more info see:\n https://github.com/argoproj/argo/blob/master/examples/k8s-jobs.yaml\n attribute_outputs: Maps output labels to resource's json paths,\n similarly to file_outputs of ContainerOp\n kwargs: name, sidecars. See BaseOp definition\n\n Raises:\n ValueError: if not inside a pipeline\n if the name is an invalid string\n if no k8s_resource is provided\n if merge_strategy is set without \"apply\" action\n \"\"\"\n\n def __init__(self,\n k8s_resource=None,\n action: str = \"create\",\n merge_strategy: str = None,\n success_condition: str = None,\n failure_condition: str = None,\n attribute_outputs: Optional[Dict[str, str]] = None,\n flags: Optional[List[str]] = None,\n **kwargs):\n\n super().__init__(**kwargs)\n self.attrs_with_pipelineparams = list(self.attrs_with_pipelineparams)\n self.attrs_with_pipelineparams.extend(\n [\"_resource\", \"k8s_resource\", \"attribute_outputs\"])\n\n if k8s_resource is None:\n raise ValueError(\"You need to provide a k8s_resource.\")\n\n if action == \"delete\":\n warnings.warn(\n 'Please use `kubernetes_resource_delete_op` instead of '\n '`ResourceOp(action=\"delete\")`',\n DeprecationWarning)\n\n if merge_strategy and action != \"apply\":\n raise ValueError(\"You can't set merge_strategy when action != 'apply'\")\n\n # if action is delete, there should not be any outputs, success_condition,\n # and failure_condition\n if action == \"delete\" and (success_condition or failure_condition or\n attribute_outputs):\n raise ValueError(\n \"You can't set success_condition, failure_condition, or \"\n \"attribute_outputs when action == 'delete'\"\n )\n\n if action == \"delete\" and flags is None:\n flags = [\"--wait=false\"]\n init_resource = {\n \"action\": action,\n \"merge_strategy\": merge_strategy,\n \"success_condition\": success_condition,\n \"failure_condition\": failure_condition,\n \"flags\": flags\n }\n # `resource` prop in `io.argoproj.workflow.v1alpha1.Template`\n self._resource = Resource(**init_resource)\n\n self.k8s_resource = k8s_resource\n\n # if action is delete, there should not be any outputs, success_condition,\n # and failure_condition\n if action == \"delete\":\n self.attribute_outputs = {}\n self.outputs = {}\n self.output = None\n return\n\n # Set attribute_outputs\n extra_attribute_outputs = \\\n attribute_outputs if attribute_outputs else {}\n self.attribute_outputs = \\\n self.attribute_outputs if hasattr(self, \"attribute_outputs\") \\\n else {}\n self.attribute_outputs.update(extra_attribute_outputs)\n # Add name and manifest if not specified by the user\n if \"name\" not in self.attribute_outputs:\n self.attribute_outputs[\"name\"] = \"{.metadata.name}\"\n if \"manifest\" not in self.attribute_outputs:\n self.attribute_outputs[\"manifest\"] = \"{}\"\n\n # Set outputs\n self.outputs = {\n name: _pipeline_param.PipelineParam(name, op_name=self.name)\n for name in self.attribute_outputs.keys()\n }\n # If user set a single attribute_output, set self.output as that\n # parameter, else set it as the resource name\n self.output = self.outputs[\"name\"]\n if len(extra_attribute_outputs) == 1:\n self.output = self.outputs[list(extra_attribute_outputs)[0]]\n\n @property\n def resource(self):\n \"\"\"`Resource` object that represents the `resource` property in `io.argoproj.workflow.v1alpha1.Template`.\n \"\"\"\n return self._resource\n\n def delete(self, flags: Optional[List[str]] = None):\n \"\"\"Returns a ResourceOp which deletes the resource.\"\"\"\n if self.resource.action == \"delete\":\n raise ValueError(\"This operation is already a resource deletion.\")\n\n if isinstance(self.k8s_resource, dict):\n kind = self.k8s_resource[\"kind\"]\n else:\n kind = self.k8s_resource.kind\n\n return kubernetes_resource_delete_op(\n name=self.outputs[\"name\"], kind=kind, flags=flags or [\"--wait=false\"])\n\n\ndef kubernetes_resource_delete_op(\n name: str,\n kind: str,\n namespace: str = None,\n flags: Optional[List[str]] = None,\n) -> ContainerOp:\n \"\"\"Operation that deletes a Kubernetes resource.\n\n Outputs:\n name: The name of the deleted resource\n \"\"\"\n\n command = [\n \"kubectl\", \"delete\",\n str(kind),\n str(name), \"--ignore-not-found\", \"--output\", \"name\"\n ]\n if namespace:\n command.extend([\"--namespace\", str(namespace)])\n if flags:\n command.extend(flags)\n\n result = ContainerOp(\n name=\"kubernetes_resource_delete\",\n image=\"gcr.io/cloud-builders/kubectl\",\n command=command,\n )\n return result\n", "path": "sdk/python/kfp/dsl/_resource_op.py"}]} | 3,008 | 528 |
gh_patches_debug_1984 | rasdani/github-patches | git_diff | mkdocs__mkdocs-694 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
current_page.ancestors only contains direct ancestor and not the full path of the page
I'm using the mkdocs theme and tried to enhance it with a breadcrumb trail. The page navigation is created automatically by mkdocs (I don't use the pages confguration since I have almost 300 pages).
I copied and adapted the `breadcrumbs.html` file from the readthedocs theme and integrated it in `content.html`:
```
<ol class="breadcrumb">
<li><a href="{{ homepage_url }}">Docs</a></li>
{% if current_page %}
{% for doc in current_page.ancestors %}
{% if doc.link %}
<li><a href="{{ doc.link|e }}">{{ doc.title }}</a></li>
{% else %}
<li>{{ doc.title }}</li>
{% endif %}
{% endfor %}
{% endif %}
{% if current_page %}<li>{{ current_page.title }}</li>{% endif %}
</ol>
```
My file path (starting from the `docs_dir`) is:
`beheerteam/diensten/algemeen/ActiveDirectory.md`
The generated breadcrumb trail is:
`Docs/algemeen/ActiveDirectory`
`algemeen` is the only part that originates from the loop `for doc in current_page.ancestors`.
Maybe this is a stupid question or it is just not possible, but I couldn't find i in the documentation and I'm just starting with mkdocs and couldn't understand the source on how this works.
</issue>
<code>
[start of mkdocs/nav.py]
1 # coding: utf-8
2
3 """
4 Deals with generating the site-wide navigation.
5
6 This consists of building a set of interlinked page and header objects.
7 """
8
9 from __future__ import unicode_literals
10 import datetime
11 import logging
12 import os
13
14 from mkdocs import utils, exceptions
15
16 log = logging.getLogger(__name__)
17
18
19 def filename_to_title(filename):
20 """
21 Automatically generate a default title, given a filename.
22 """
23 if utils.is_homepage(filename):
24 return 'Home'
25
26 return utils.filename_to_title(filename)
27
28
29 class SiteNavigation(object):
30 def __init__(self, pages_config, use_directory_urls=True):
31 self.url_context = URLContext()
32 self.file_context = FileContext()
33 self.nav_items, self.pages = _generate_site_navigation(
34 pages_config, self.url_context, use_directory_urls)
35 self.homepage = self.pages[0] if self.pages else None
36 self.use_directory_urls = use_directory_urls
37
38 def __str__(self):
39 return ''.join([str(item) for item in self])
40
41 def __iter__(self):
42 return iter(self.nav_items)
43
44 def walk_pages(self):
45 """
46 Returns each page in the site in turn.
47
48 Additionally this sets the active status of the pages and headers,
49 in the site navigation, so that the rendered navbar can correctly
50 highlight the currently active page and/or header item.
51 """
52 page = self.homepage
53 page.set_active()
54 self.url_context.set_current_url(page.abs_url)
55 self.file_context.set_current_path(page.input_path)
56 yield page
57 while page.next_page:
58 page.set_active(False)
59 page = page.next_page
60 page.set_active()
61 self.url_context.set_current_url(page.abs_url)
62 self.file_context.set_current_path(page.input_path)
63 yield page
64 page.set_active(False)
65
66 @property
67 def source_files(self):
68 if not hasattr(self, '_source_files'):
69 self._source_files = set([page.input_path for page in self.pages])
70 return self._source_files
71
72
73 class URLContext(object):
74 """
75 The URLContext is used to ensure that we can generate the appropriate
76 relative URLs to other pages from any given page in the site.
77
78 We use relative URLs so that static sites can be deployed to any location
79 without having to specify what the path component on the host will be
80 if the documentation is not hosted at the root path.
81 """
82
83 def __init__(self):
84 self.base_path = '/'
85
86 def set_current_url(self, current_url):
87 self.base_path = os.path.dirname(current_url)
88
89 def make_relative(self, url):
90 """
91 Given a URL path return it as a relative URL,
92 given the context of the current page.
93 """
94 suffix = '/' if (url.endswith('/') and len(url) > 1) else ''
95 # Workaround for bug on `os.path.relpath()` in Python 2.6
96 if self.base_path == '/':
97 if url == '/':
98 # Workaround for static assets
99 return '.'
100 return url.lstrip('/')
101 # Under Python 2.6, relative_path adds an extra '/' at the end.
102 relative_path = os.path.relpath(url, start=self.base_path)
103 relative_path = relative_path.rstrip('/') + suffix
104
105 return utils.path_to_url(relative_path)
106
107
108 class FileContext(object):
109 """
110 The FileContext is used to ensure that we can generate the appropriate
111 full path for other pages given their relative path from a particular page.
112
113 This is used when we have relative hyperlinks in the documentation, so that
114 we can ensure that they point to markdown documents that actually exist
115 in the `pages` config.
116 """
117 def __init__(self):
118 self.current_file = None
119 self.base_path = ''
120
121 def set_current_path(self, current_path):
122 self.current_file = current_path
123 self.base_path = os.path.dirname(current_path)
124
125 def make_absolute(self, path):
126 """
127 Given a relative file path return it as a POSIX-style
128 absolute filepath, given the context of the current page.
129 """
130 return os.path.normpath(os.path.join(self.base_path, path))
131
132
133 class Page(object):
134 def __init__(self, title, url, path, url_context):
135
136 self.title = title
137 self.abs_url = url
138 self.active = False
139 self.url_context = url_context
140 self.update_date = datetime.datetime.now().strftime("%Y-%m-%d")
141
142 # Relative paths to the input markdown file and output html file.
143 self.input_path = path
144 self.output_path = utils.get_html_path(path)
145
146 # Links to related pages
147 self.previous_page = None
148 self.next_page = None
149 self.ancestors = []
150
151 @property
152 def url(self):
153 return self.url_context.make_relative(self.abs_url)
154
155 @property
156 def is_homepage(self):
157 return utils.is_homepage(self.input_path)
158
159 @property
160 def is_top_level(self):
161 return len(self.ancestors) == 0
162
163 def __str__(self):
164 return self.indent_print()
165
166 def indent_print(self, depth=0):
167 indent = ' ' * depth
168 active_marker = ' [*]' if self.active else ''
169 title = self.title if (self.title is not None) else '[blank]'
170 return '%s%s - %s%s\n' % (indent, title, self.abs_url, active_marker)
171
172 def set_active(self, active=True):
173 self.active = active
174 for ancestor in self.ancestors:
175 ancestor.set_active(active)
176
177
178 class Header(object):
179 def __init__(self, title, children):
180 self.title, self.children = title, children
181 self.active = False
182 self.ancestors = []
183
184 def __str__(self):
185 return self.indent_print()
186
187 @property
188 def is_top_level(self):
189 return len(self.ancestors) == 0
190
191 def indent_print(self, depth=0):
192 indent = ' ' * depth
193 active_marker = ' [*]' if self.active else ''
194 ret = '%s%s%s\n' % (indent, self.title, active_marker)
195 for item in self.children:
196 ret += item.indent_print(depth + 1)
197 return ret
198
199 def set_active(self, active=True):
200 self.active = active
201 for ancestor in self.ancestors:
202 ancestor.set_active(active)
203
204
205 def _path_to_page(path, title, url_context, use_directory_urls):
206 if title is None:
207 title = filename_to_title(path.split(os.path.sep)[-1])
208 url = utils.get_url_path(path, use_directory_urls)
209 return Page(title=title, url=url, path=path,
210 url_context=url_context)
211
212
213 def _follow(config_line, url_context, use_dir_urls, header=None, title=None):
214
215 if isinstance(config_line, utils.string_types):
216 path = os.path.normpath(config_line)
217 page = _path_to_page(path, title, url_context, use_dir_urls)
218
219 if header:
220 page.ancestors = [header]
221 header.children.append(page)
222
223 yield page
224 raise StopIteration
225
226 elif not isinstance(config_line, dict):
227 msg = ("Line in 'page' config is of type {0}, dict or string "
228 "expected. Config: {1}").format(type(config_line), config_line)
229 raise exceptions.ConfigurationError(msg)
230
231 if len(config_line) > 1:
232 raise exceptions.ConfigurationError(
233 "Page configs should be in the format 'name: markdown.md'. The "
234 "config contains an invalid entry: {0}".format(config_line))
235 elif len(config_line) == 0:
236 log.warning("Ignoring empty line in the pages config.")
237 raise StopIteration
238
239 next_cat_or_title, subpages_or_path = next(iter(config_line.items()))
240
241 if isinstance(subpages_or_path, utils.string_types):
242 path = subpages_or_path
243 for sub in _follow(path, url_context, use_dir_urls, header=header, title=next_cat_or_title):
244 yield sub
245 raise StopIteration
246
247 elif not isinstance(subpages_or_path, list):
248 msg = ("Line in 'page' config is of type {0}, list or string "
249 "expected for sub pages. Config: {1}"
250 ).format(type(config_line), config_line)
251 raise exceptions.ConfigurationError(msg)
252
253 next_header = Header(title=next_cat_or_title, children=[])
254 if header:
255 next_header.ancestors = [header]
256 header.children.append(next_header)
257 yield next_header
258
259 subpages = subpages_or_path
260
261 for subpage in subpages:
262 for sub in _follow(subpage, url_context, use_dir_urls, next_header):
263 yield sub
264
265
266 def _generate_site_navigation(pages_config, url_context, use_dir_urls=True):
267 """
268 Returns a list of Page and Header instances that represent the
269 top level site navigation.
270 """
271 nav_items = []
272 pages = []
273
274 previous = None
275
276 for config_line in pages_config:
277
278 for page_or_header in _follow(
279 config_line, url_context, use_dir_urls):
280
281 if isinstance(page_or_header, Header):
282
283 if page_or_header.is_top_level:
284 nav_items.append(page_or_header)
285
286 elif isinstance(page_or_header, Page):
287
288 if page_or_header.is_top_level:
289 nav_items.append(page_or_header)
290
291 pages.append(page_or_header)
292
293 if previous:
294 page_or_header.previous_page = previous
295 previous.next_page = page_or_header
296 previous = page_or_header
297
298 if len(pages) == 0:
299 raise exceptions.ConfigurationError(
300 "No pages found in the pages config. "
301 "Remove it entirely to enable automatic page discovery.")
302
303 return (nav_items, pages)
304
[end of mkdocs/nav.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkdocs/nav.py b/mkdocs/nav.py
--- a/mkdocs/nav.py
+++ b/mkdocs/nav.py
@@ -217,7 +217,7 @@
page = _path_to_page(path, title, url_context, use_dir_urls)
if header:
- page.ancestors = [header]
+ page.ancestors = header.ancestors + [header, ]
header.children.append(page)
yield page
| {"golden_diff": "diff --git a/mkdocs/nav.py b/mkdocs/nav.py\n--- a/mkdocs/nav.py\n+++ b/mkdocs/nav.py\n@@ -217,7 +217,7 @@\n page = _path_to_page(path, title, url_context, use_dir_urls)\n \n if header:\n- page.ancestors = [header]\n+ page.ancestors = header.ancestors + [header, ]\n header.children.append(page)\n \n yield page\n", "issue": "current_page.ancestors only contains direct ancestor and not the full path of the page\nI'm using the mkdocs theme and tried to enhance it with a breadcrumb trail. The page navigation is created automatically by mkdocs (I don't use the pages confguration since I have almost 300 pages).\nI copied and adapted the `breadcrumbs.html` file from the readthedocs theme and integrated it in `content.html`:\n\n```\n<ol class=\"breadcrumb\">\n<li><a href=\"{{ homepage_url }}\">Docs</a></li>\n{% if current_page %}\n {% for doc in current_page.ancestors %}\n {% if doc.link %}\n <li><a href=\"{{ doc.link|e }}\">{{ doc.title }}</a></li>\n {% else %}\n <li>{{ doc.title }}</li>\n {% endif %}\n {% endfor %}\n{% endif %}\n{% if current_page %}<li>{{ current_page.title }}</li>{% endif %}\n</ol>\n```\n\nMy file path (starting from the `docs_dir`) is:\n`beheerteam/diensten/algemeen/ActiveDirectory.md`\nThe generated breadcrumb trail is:\n`Docs/algemeen/ActiveDirectory`\n\n`algemeen` is the only part that originates from the loop `for doc in current_page.ancestors`.\n\nMaybe this is a stupid question or it is just not possible, but I couldn't find i in the documentation and I'm just starting with mkdocs and couldn't understand the source on how this works.\n\n", "before_files": [{"content": "# coding: utf-8\n\n\"\"\"\nDeals with generating the site-wide navigation.\n\nThis consists of building a set of interlinked page and header objects.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport datetime\nimport logging\nimport os\n\nfrom mkdocs import utils, exceptions\n\nlog = logging.getLogger(__name__)\n\n\ndef filename_to_title(filename):\n \"\"\"\n Automatically generate a default title, given a filename.\n \"\"\"\n if utils.is_homepage(filename):\n return 'Home'\n\n return utils.filename_to_title(filename)\n\n\nclass SiteNavigation(object):\n def __init__(self, pages_config, use_directory_urls=True):\n self.url_context = URLContext()\n self.file_context = FileContext()\n self.nav_items, self.pages = _generate_site_navigation(\n pages_config, self.url_context, use_directory_urls)\n self.homepage = self.pages[0] if self.pages else None\n self.use_directory_urls = use_directory_urls\n\n def __str__(self):\n return ''.join([str(item) for item in self])\n\n def __iter__(self):\n return iter(self.nav_items)\n\n def walk_pages(self):\n \"\"\"\n Returns each page in the site in turn.\n\n Additionally this sets the active status of the pages and headers,\n in the site navigation, so that the rendered navbar can correctly\n highlight the currently active page and/or header item.\n \"\"\"\n page = self.homepage\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n while page.next_page:\n page.set_active(False)\n page = page.next_page\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n page.set_active(False)\n\n @property\n def source_files(self):\n if not hasattr(self, '_source_files'):\n self._source_files = set([page.input_path for page in self.pages])\n return self._source_files\n\n\nclass URLContext(object):\n \"\"\"\n The URLContext is used to ensure that we can generate the appropriate\n relative URLs to other pages from any given page in the site.\n\n We use relative URLs so that static sites can be deployed to any location\n without having to specify what the path component on the host will be\n if the documentation is not hosted at the root path.\n \"\"\"\n\n def __init__(self):\n self.base_path = '/'\n\n def set_current_url(self, current_url):\n self.base_path = os.path.dirname(current_url)\n\n def make_relative(self, url):\n \"\"\"\n Given a URL path return it as a relative URL,\n given the context of the current page.\n \"\"\"\n suffix = '/' if (url.endswith('/') and len(url) > 1) else ''\n # Workaround for bug on `os.path.relpath()` in Python 2.6\n if self.base_path == '/':\n if url == '/':\n # Workaround for static assets\n return '.'\n return url.lstrip('/')\n # Under Python 2.6, relative_path adds an extra '/' at the end.\n relative_path = os.path.relpath(url, start=self.base_path)\n relative_path = relative_path.rstrip('/') + suffix\n\n return utils.path_to_url(relative_path)\n\n\nclass FileContext(object):\n \"\"\"\n The FileContext is used to ensure that we can generate the appropriate\n full path for other pages given their relative path from a particular page.\n\n This is used when we have relative hyperlinks in the documentation, so that\n we can ensure that they point to markdown documents that actually exist\n in the `pages` config.\n \"\"\"\n def __init__(self):\n self.current_file = None\n self.base_path = ''\n\n def set_current_path(self, current_path):\n self.current_file = current_path\n self.base_path = os.path.dirname(current_path)\n\n def make_absolute(self, path):\n \"\"\"\n Given a relative file path return it as a POSIX-style\n absolute filepath, given the context of the current page.\n \"\"\"\n return os.path.normpath(os.path.join(self.base_path, path))\n\n\nclass Page(object):\n def __init__(self, title, url, path, url_context):\n\n self.title = title\n self.abs_url = url\n self.active = False\n self.url_context = url_context\n self.update_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\n # Relative paths to the input markdown file and output html file.\n self.input_path = path\n self.output_path = utils.get_html_path(path)\n\n # Links to related pages\n self.previous_page = None\n self.next_page = None\n self.ancestors = []\n\n @property\n def url(self):\n return self.url_context.make_relative(self.abs_url)\n\n @property\n def is_homepage(self):\n return utils.is_homepage(self.input_path)\n\n @property\n def is_top_level(self):\n return len(self.ancestors) == 0\n\n def __str__(self):\n return self.indent_print()\n\n def indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n title = self.title if (self.title is not None) else '[blank]'\n return '%s%s - %s%s\\n' % (indent, title, self.abs_url, active_marker)\n\n def set_active(self, active=True):\n self.active = active\n for ancestor in self.ancestors:\n ancestor.set_active(active)\n\n\nclass Header(object):\n def __init__(self, title, children):\n self.title, self.children = title, children\n self.active = False\n self.ancestors = []\n\n def __str__(self):\n return self.indent_print()\n\n @property\n def is_top_level(self):\n return len(self.ancestors) == 0\n\n def indent_print(self, depth=0):\n indent = ' ' * depth\n active_marker = ' [*]' if self.active else ''\n ret = '%s%s%s\\n' % (indent, self.title, active_marker)\n for item in self.children:\n ret += item.indent_print(depth + 1)\n return ret\n\n def set_active(self, active=True):\n self.active = active\n for ancestor in self.ancestors:\n ancestor.set_active(active)\n\n\ndef _path_to_page(path, title, url_context, use_directory_urls):\n if title is None:\n title = filename_to_title(path.split(os.path.sep)[-1])\n url = utils.get_url_path(path, use_directory_urls)\n return Page(title=title, url=url, path=path,\n url_context=url_context)\n\n\ndef _follow(config_line, url_context, use_dir_urls, header=None, title=None):\n\n if isinstance(config_line, utils.string_types):\n path = os.path.normpath(config_line)\n page = _path_to_page(path, title, url_context, use_dir_urls)\n\n if header:\n page.ancestors = [header]\n header.children.append(page)\n\n yield page\n raise StopIteration\n\n elif not isinstance(config_line, dict):\n msg = (\"Line in 'page' config is of type {0}, dict or string \"\n \"expected. Config: {1}\").format(type(config_line), config_line)\n raise exceptions.ConfigurationError(msg)\n\n if len(config_line) > 1:\n raise exceptions.ConfigurationError(\n \"Page configs should be in the format 'name: markdown.md'. The \"\n \"config contains an invalid entry: {0}\".format(config_line))\n elif len(config_line) == 0:\n log.warning(\"Ignoring empty line in the pages config.\")\n raise StopIteration\n\n next_cat_or_title, subpages_or_path = next(iter(config_line.items()))\n\n if isinstance(subpages_or_path, utils.string_types):\n path = subpages_or_path\n for sub in _follow(path, url_context, use_dir_urls, header=header, title=next_cat_or_title):\n yield sub\n raise StopIteration\n\n elif not isinstance(subpages_or_path, list):\n msg = (\"Line in 'page' config is of type {0}, list or string \"\n \"expected for sub pages. Config: {1}\"\n ).format(type(config_line), config_line)\n raise exceptions.ConfigurationError(msg)\n\n next_header = Header(title=next_cat_or_title, children=[])\n if header:\n next_header.ancestors = [header]\n header.children.append(next_header)\n yield next_header\n\n subpages = subpages_or_path\n\n for subpage in subpages:\n for sub in _follow(subpage, url_context, use_dir_urls, next_header):\n yield sub\n\n\ndef _generate_site_navigation(pages_config, url_context, use_dir_urls=True):\n \"\"\"\n Returns a list of Page and Header instances that represent the\n top level site navigation.\n \"\"\"\n nav_items = []\n pages = []\n\n previous = None\n\n for config_line in pages_config:\n\n for page_or_header in _follow(\n config_line, url_context, use_dir_urls):\n\n if isinstance(page_or_header, Header):\n\n if page_or_header.is_top_level:\n nav_items.append(page_or_header)\n\n elif isinstance(page_or_header, Page):\n\n if page_or_header.is_top_level:\n nav_items.append(page_or_header)\n\n pages.append(page_or_header)\n\n if previous:\n page_or_header.previous_page = previous\n previous.next_page = page_or_header\n previous = page_or_header\n\n if len(pages) == 0:\n raise exceptions.ConfigurationError(\n \"No pages found in the pages config. \"\n \"Remove it entirely to enable automatic page discovery.\")\n\n return (nav_items, pages)\n", "path": "mkdocs/nav.py"}]} | 3,793 | 109 |
gh_patches_debug_25459 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-948 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
anytime_fitness.py null values
The scraper currently includes address2 whether it is null or not, resulting in ", None" being appended to many (most?) of the addr:full fields.
</issue>
<code>
[start of locations/spiders/anytime_fitness.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 from locations.items import GeojsonPointItem
4 import json
5
6 class AnytimeFitnessSpider(scrapy.Spider):
7 name = 'anytime_fitness'
8 allowed_domains = ['www.anytimefitness.com']
9
10 def start_requests(self):
11 url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'
12 yield scrapy.Request(url, callback=self.parse)
13
14 def parse(self, response):
15 gyms = json.loads(response.body_as_unicode())
16
17 for gym in gyms:
18 yield GeojsonPointItem(
19 lat = gym['latitude'],
20 lon = gym['longitude'],
21 addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),
22 city = gym['content']['city'],
23 phone = gym['content']['phone'],
24 state = gym['content']['state_abbr'],
25 postcode = gym['content']['zip'],
26 ref = gym['content']['url'],
27 country = gym['content']['country']
28 )
29
30
31
32
[end of locations/spiders/anytime_fitness.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/anytime_fitness.py b/locations/spiders/anytime_fitness.py
--- a/locations/spiders/anytime_fitness.py
+++ b/locations/spiders/anytime_fitness.py
@@ -2,6 +2,8 @@
import scrapy
from locations.items import GeojsonPointItem
import json
+import html
+
class AnytimeFitnessSpider(scrapy.Spider):
name = 'anytime_fitness'
@@ -18,14 +20,13 @@
yield GeojsonPointItem(
lat = gym['latitude'],
lon = gym['longitude'],
- addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),
+ addr_full = ", ".join(filter(None, [gym['content']['address'], gym['content']['address2']])),
city = gym['content']['city'],
phone = gym['content']['phone'],
state = gym['content']['state_abbr'],
postcode = gym['content']['zip'],
ref = gym['content']['url'],
- country = gym['content']['country']
+ country = gym['content']['country'],
+ name = html.unescape(gym['content']['title']),
+ extras = {"number": gym['content']['number']}
)
-
-
-
| {"golden_diff": "diff --git a/locations/spiders/anytime_fitness.py b/locations/spiders/anytime_fitness.py\n--- a/locations/spiders/anytime_fitness.py\n+++ b/locations/spiders/anytime_fitness.py\n@@ -2,6 +2,8 @@\n import scrapy\n from locations.items import GeojsonPointItem\n import json\n+import html\n+\n \n class AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n@@ -18,14 +20,13 @@\n yield GeojsonPointItem(\n lat = gym['latitude'],\n lon = gym['longitude'],\n- addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),\n+ addr_full = \", \".join(filter(None, [gym['content']['address'], gym['content']['address2']])),\n city = gym['content']['city'],\n phone = gym['content']['phone'],\n state = gym['content']['state_abbr'],\n postcode = gym['content']['zip'],\n ref = gym['content']['url'],\n- country = gym['content']['country']\n+ country = gym['content']['country'],\n+ name = html.unescape(gym['content']['title']),\n+ extras = {\"number\": gym['content']['number']}\n )\n- \n- \n-\n", "issue": "anytime_fitness.py null values\nThe scraper currently includes address2 whether it is null or not, resulting in \", None\" being appended to many (most?) of the addr:full fields.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\n\nclass AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n allowed_domains = ['www.anytimefitness.com']\n\n def start_requests(self):\n url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'\n yield scrapy.Request(url, callback=self.parse)\n\n def parse(self, response):\n gyms = json.loads(response.body_as_unicode())\n\n for gym in gyms:\n yield GeojsonPointItem(\n lat = gym['latitude'],\n lon = gym['longitude'],\n addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),\n city = gym['content']['city'],\n phone = gym['content']['phone'],\n state = gym['content']['state_abbr'],\n postcode = gym['content']['zip'],\n ref = gym['content']['url'],\n country = gym['content']['country']\n )\n \n \n \n", "path": "locations/spiders/anytime_fitness.py"}]} | 855 | 283 |
gh_patches_debug_29017 | rasdani/github-patches | git_diff | pwndbg__pwndbg-750 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Help: disasm with duplicate visual pointing references
I'm learning about RE and just find out pwndbg.
Using splitmind (saw it in FEATURES readme) with a custom setup of window/pane splitting, I got some "duplicate" visual references pointing to the same instruction in DISASM section.
I don't know if it's the right place to ask for help or not, sorry if it's not the right one

I'm playing aroung with this:
https://crackmes.one/crackme/5d22b9d833c5d410dc4d0c9f
```
Gdb: 10.0.50.20200307-git
Python: 3.8.2 (default, Apr 8 2020, 14:31:25) [GCC 9.3.0]
Pwndbg: 1.1.0 build: 3cf9b31
Capstone: 4.0.1024
Unicorn: 1.0.1
alacritty: 0.4.2
tmux: 3.1
zsh: zsh 5.8 (x86_64-pc-linux-gnu)
OS:
DISTRIB_ID=ManjaroLinux
DISTRIB_RELEASE=20.0
DISTRIB_CODENAME=Lysia
DISTRIB_DESCRIPTION="Manjaro Linux"
```
My .gdbinit
```
source /home/lucasbertin/Projetos/OSS/pwndbg/gdbinit.py
set context-clear-screen on
set follow-fork-mode parent
source /home/lucasbertin/Projetos/OSS/splitmind/gdbinit.py
python
import splitmind
(splitmind.Mind()
.tell_splitter(show_titles=True)
.tell_splitter(set_title="Main")
.right(display="backtrace", size="25%")
.above(of="main", display="disasm", banner="top", size="85%")
.right(of="disasm", display="legend")
.below(of="legend", display="stack")
.show("regs", on="legend")
.below(of="backtrace", cmd='tty; tail -f /dev/null', clearing=False)
.below(cmd="python3")
).build(nobanner=True)
end
set context-code-lines 30
set context-source-code-lines 30
set context-sections "regs args code disasm stack backtrace"
```
Any tips or hints are welcome, thanks!!
</issue>
<code>
[start of pwndbg/commands/nearpc.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7
8 import argparse
9 import codecs
10
11 import gdb
12 from capstone import *
13
14 import pwndbg.arguments
15 import pwndbg.color
16 import pwndbg.color.context as C
17 import pwndbg.color.disasm as D
18 import pwndbg.color.nearpc as N
19 import pwndbg.color.theme
20 import pwndbg.config
21 import pwndbg.disasm
22 import pwndbg.functions
23 import pwndbg.ida
24 import pwndbg.regs
25 import pwndbg.strings
26 import pwndbg.symbol
27 import pwndbg.ui
28 import pwndbg.vmmap
29 from pwndbg.color import message
30
31
32 def ljust_padding(lst):
33 longest_len = max(map(len, lst)) if lst else 0
34 return [s.ljust(longest_len) for s in lst]
35
36 nearpc_branch_marker = pwndbg.color.theme.Parameter('nearpc-branch-marker', ' ↓', 'branch marker line for nearpc command')
37 nearpc_branch_marker_contiguous = pwndbg.color.theme.Parameter('nearpc-branch-marker-contiguous', ' ', 'contiguous branch marker line for nearpc command')
38 pwndbg.color.theme.Parameter('highlight-pc', True, 'whether to highlight the current instruction')
39 pwndbg.color.theme.Parameter('nearpc-prefix', '►', 'prefix marker for nearpc command')
40 pwndbg.config.Parameter('left-pad-disasm', True, 'whether to left-pad disassembly')
41 nearpc_lines = pwndbg.config.Parameter('nearpc-lines', 10, 'number of additional lines to print for the nearpc command')
42 show_args = pwndbg.config.Parameter('nearpc-show-args', True, 'show call arguments below instruction')
43
44 parser = argparse.ArgumentParser(description='''Disassemble near a specified address.''')
45 parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to dissassemble near.")
46 parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.")
47 #parser.add_argument("to_string", type=bool, nargs="?", default=False, help="Whether to print it or not.") #TODO make sure this should not be exposed
48 parser.add_argument("emulate", type=bool, nargs="?", default=False, help="Whether to emulate instructions to find the next ones or just linearly disassemble.")
49 @pwndbg.commands.ArgparsedCommand(parser)
50 @pwndbg.commands.OnlyWhenRunning
51 def nearpc(pc=None, lines=None, to_string=False, emulate=False):
52 """
53 Disassemble near a specified address.
54 """
55
56 # Repeating nearpc (pressing enter) makes it show next addresses
57 # (writing nearpc explicitly again will reset its state)
58 if nearpc.repeat:
59 pc = nearpc.next_pc
60
61 result = []
62
63 # Fix the case where we only have one argument, and
64 # it's a small value.
65 if lines is None and (pc is None or int(pc) < 0x100):
66 lines = pc
67 pc = None
68
69 if pc is None:
70 pc = pwndbg.regs.pc
71
72 if lines is None:
73 lines = nearpc_lines // 2
74
75 pc = int(pc)
76 lines = int(lines)
77
78 # Check whether we can even read this address
79 if not pwndbg.memory.peek(pc):
80 result.append(message.error('Invalid address %#x' % pc))
81
82 # # Load source data if it's available
83 # pc_to_linenos = collections.defaultdict(lambda: [])
84 # lineno_to_src = {}
85 # frame = gdb.selected_frame()
86 # if frame:
87 # sal = frame.find_sal()
88 # if sal:
89 # symtab = sal.symtab
90 # objfile = symtab.objfile
91 # sourcefilename = symtab.filename
92 # with open(sourcefilename, 'r') as sourcefile:
93 # lineno_to_src = {i:l for i,l in enumerate(sourcefile.readlines())}
94
95 # for line in symtab.linetable():
96 # pc_to_linenos[line.pc].append(line.line)
97 instructions = pwndbg.disasm.near(pc, lines, emulate=emulate, show_prev_insns=not nearpc.repeat)
98
99 if pwndbg.memory.peek(pc) and not instructions:
100 result.append(message.error('Invalid instructions at %#x' % pc))
101
102 # In case $pc is in a new map we don't know about,
103 # this will trigger an exploratory search.
104 pwndbg.vmmap.find(pc)
105
106 # Gather all addresses and symbols for each instruction
107 symbols = [pwndbg.symbol.get(i.address) for i in instructions]
108 addresses = ['%#x' % i.address for i in instructions]
109
110 nearpc.next_pc = instructions[-1].address + instructions[-1].size if instructions else 0
111
112 # Format the symbol name for each instruction
113 symbols = ['<%s> ' % sym if sym else '' for sym in symbols]
114
115 # Pad out all of the symbols and addresses
116 if pwndbg.config.left_pad_disasm and not nearpc.repeat:
117 symbols = ljust_padding(symbols)
118 addresses = ljust_padding(addresses)
119
120 prev = None
121
122 # Print out each instruction
123 for address_str, symbol, instr in zip(addresses, symbols, instructions):
124 asm = D.instruction(instr)
125 prefix_sign = pwndbg.config.nearpc_prefix
126
127 # Show prefix only on the specified address and don't show it while in repeat-mode
128 show_prefix = instr.address == pc and not nearpc.repeat
129 prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign))
130 prefix = N.prefix(prefix)
131
132 pre = pwndbg.ida.Anterior(instr.address)
133 if pre:
134 result.append(N.ida_anterior(pre))
135
136 # Colorize address and symbol if not highlighted
137 # symbol is fetched from gdb and it can be e.g. '<main+8>'
138 if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat:
139 address_str = N.address(address_str)
140 symbol = N.symbol(symbol)
141 elif pwndbg.config.highlight_pc:
142 prefix = C.highlight(prefix)
143 address_str = C.highlight(address_str)
144 symbol = C.highlight(symbol)
145
146 line = ' '.join((prefix, address_str, symbol, asm))
147
148 # If there was a branch before this instruction which was not
149 # contiguous, put in some ellipses.
150 if prev and prev.address + prev.size != instr.address:
151 result.append(N.branch_marker('%s' % nearpc_branch_marker))
152
153 # Otherwise if it's a branch and it *is* contiguous, just put
154 # and empty line.
155 elif prev and any(g in prev.groups for g in (CS_GRP_CALL, CS_GRP_JUMP, CS_GRP_RET)):
156 if len('%s' % nearpc_branch_marker_contiguous) > 0:
157 result.append('%s' % nearpc_branch_marker_contiguous)
158
159 # For syscall instructions, put the name on the side
160 if instr.address == pc:
161 syscall_name = pwndbg.arguments.get_syscall_name(instr)
162 if syscall_name:
163 line += ' <%s>' % N.syscall_name(syscall_name)
164
165 result.append(line)
166
167 # For call instructions, attempt to resolve the target and
168 # determine the number of arguments.
169 if show_args:
170 result.extend(['%8s%s' % ('', arg) for arg in pwndbg.arguments.format_args(instruction=instr)])
171
172 prev = instr
173
174 if not to_string:
175 print('\n'.join(result))
176
177 return result
178
179
180 parser = argparse.ArgumentParser(description='''Like nearpc, but will emulate instructions from the current $PC forward.''')
181 parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to emulate near.")
182 parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.")
183 @pwndbg.commands.ArgparsedCommand(parser)
184 @pwndbg.commands.OnlyWhenRunning
185 def emulate(pc=None, lines=None, to_string=False, emulate=True):
186 """
187 Like nearpc, but will emulate instructions from the current $PC forward.
188 """
189 nearpc.repeat = emulate_command.repeat
190 return nearpc(pc, lines, to_string, emulate)
191
192
193 emulate_command = emulate
194
195
196 parser = argparse.ArgumentParser(description='''Compatibility layer for PEDA's pdisass command.''')
197 parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to disassemble near.")
198 parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.")
199 @pwndbg.commands.ArgparsedCommand(parser)
200 @pwndbg.commands.OnlyWhenRunning
201 def pdisass(pc=None, lines=None, to_string=False):
202 """
203 Compatibility layer for PEDA's pdisass command
204 """
205 nearpc.repeat = pdisass.repeat
206 return nearpc(pc, lines, to_string, False)
207
208
209 nearpc.next_pc = 0
210
[end of pwndbg/commands/nearpc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/commands/nearpc.py b/pwndbg/commands/nearpc.py
--- a/pwndbg/commands/nearpc.py
+++ b/pwndbg/commands/nearpc.py
@@ -119,13 +119,16 @@
prev = None
+ first_pc = True
+
# Print out each instruction
for address_str, symbol, instr in zip(addresses, symbols, instructions):
asm = D.instruction(instr)
prefix_sign = pwndbg.config.nearpc_prefix
# Show prefix only on the specified address and don't show it while in repeat-mode
- show_prefix = instr.address == pc and not nearpc.repeat
+ # or when showing current instruction for the second time
+ show_prefix = instr.address == pc and not nearpc.repeat and first_pc
prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign))
prefix = N.prefix(prefix)
@@ -138,10 +141,11 @@
if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat:
address_str = N.address(address_str)
symbol = N.symbol(symbol)
- elif pwndbg.config.highlight_pc:
+ elif pwndbg.config.highlight_pc and first_pc:
prefix = C.highlight(prefix)
address_str = C.highlight(address_str)
symbol = C.highlight(symbol)
+ first_pc = False
line = ' '.join((prefix, address_str, symbol, asm))
| {"golden_diff": "diff --git a/pwndbg/commands/nearpc.py b/pwndbg/commands/nearpc.py\n--- a/pwndbg/commands/nearpc.py\n+++ b/pwndbg/commands/nearpc.py\n@@ -119,13 +119,16 @@\n \n prev = None\n \n+ first_pc = True\n+\n # Print out each instruction\n for address_str, symbol, instr in zip(addresses, symbols, instructions):\n asm = D.instruction(instr)\n prefix_sign = pwndbg.config.nearpc_prefix\n \n # Show prefix only on the specified address and don't show it while in repeat-mode\n- show_prefix = instr.address == pc and not nearpc.repeat\n+ # or when showing current instruction for the second time\n+ show_prefix = instr.address == pc and not nearpc.repeat and first_pc\n prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign))\n prefix = N.prefix(prefix)\n \n@@ -138,10 +141,11 @@\n if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat:\n address_str = N.address(address_str)\n symbol = N.symbol(symbol)\n- elif pwndbg.config.highlight_pc:\n+ elif pwndbg.config.highlight_pc and first_pc:\n prefix = C.highlight(prefix)\n address_str = C.highlight(address_str)\n symbol = C.highlight(symbol)\n+ first_pc = False\n \n line = ' '.join((prefix, address_str, symbol, asm))\n", "issue": "Help: disasm with duplicate visual pointing references\nI'm learning about RE and just find out pwndbg.\r\n\r\nUsing splitmind (saw it in FEATURES readme) with a custom setup of window/pane splitting, I got some \"duplicate\" visual references pointing to the same instruction in DISASM section.\r\n\r\nI don't know if it's the right place to ask for help or not, sorry if it's not the right one \r\n\r\n\r\n\r\nI'm playing aroung with this:\r\nhttps://crackmes.one/crackme/5d22b9d833c5d410dc4d0c9f\r\n\r\n```\r\nGdb: 10.0.50.20200307-git\r\nPython: 3.8.2 (default, Apr 8 2020, 14:31:25) [GCC 9.3.0]\r\nPwndbg: 1.1.0 build: 3cf9b31\r\nCapstone: 4.0.1024\r\nUnicorn: 1.0.1\r\nalacritty: 0.4.2\r\ntmux: 3.1\r\nzsh: zsh 5.8 (x86_64-pc-linux-gnu)\r\nOS:\r\nDISTRIB_ID=ManjaroLinux\r\nDISTRIB_RELEASE=20.0\r\nDISTRIB_CODENAME=Lysia\r\nDISTRIB_DESCRIPTION=\"Manjaro Linux\"\r\n```\r\n\r\nMy .gdbinit\r\n```\r\nsource /home/lucasbertin/Projetos/OSS/pwndbg/gdbinit.py\r\nset context-clear-screen on\r\nset follow-fork-mode parent\r\n\r\nsource /home/lucasbertin/Projetos/OSS/splitmind/gdbinit.py\r\npython\r\nimport splitmind\r\n(splitmind.Mind()\r\n .tell_splitter(show_titles=True)\r\n .tell_splitter(set_title=\"Main\")\r\n\r\n .right(display=\"backtrace\", size=\"25%\")\r\n\r\n .above(of=\"main\", display=\"disasm\", banner=\"top\", size=\"85%\")\r\n .right(of=\"disasm\", display=\"legend\")\r\n .below(of=\"legend\", display=\"stack\")\r\n\r\n .show(\"regs\", on=\"legend\")\r\n .below(of=\"backtrace\", cmd='tty; tail -f /dev/null', clearing=False)\r\n .below(cmd=\"python3\")\r\n).build(nobanner=True)\r\nend\r\n\r\nset context-code-lines 30\r\nset context-source-code-lines 30\r\nset context-sections \"regs args code disasm stack backtrace\"\r\n```\r\n\r\nAny tips or hints are welcome, thanks!!\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport codecs\n\nimport gdb\nfrom capstone import *\n\nimport pwndbg.arguments\nimport pwndbg.color\nimport pwndbg.color.context as C\nimport pwndbg.color.disasm as D\nimport pwndbg.color.nearpc as N\nimport pwndbg.color.theme\nimport pwndbg.config\nimport pwndbg.disasm\nimport pwndbg.functions\nimport pwndbg.ida\nimport pwndbg.regs\nimport pwndbg.strings\nimport pwndbg.symbol\nimport pwndbg.ui\nimport pwndbg.vmmap\nfrom pwndbg.color import message\n\n\ndef ljust_padding(lst):\n longest_len = max(map(len, lst)) if lst else 0\n return [s.ljust(longest_len) for s in lst]\n\nnearpc_branch_marker = pwndbg.color.theme.Parameter('nearpc-branch-marker', ' \u2193', 'branch marker line for nearpc command')\nnearpc_branch_marker_contiguous = pwndbg.color.theme.Parameter('nearpc-branch-marker-contiguous', ' ', 'contiguous branch marker line for nearpc command')\npwndbg.color.theme.Parameter('highlight-pc', True, 'whether to highlight the current instruction')\npwndbg.color.theme.Parameter('nearpc-prefix', '\u25ba', 'prefix marker for nearpc command')\npwndbg.config.Parameter('left-pad-disasm', True, 'whether to left-pad disassembly')\nnearpc_lines = pwndbg.config.Parameter('nearpc-lines', 10, 'number of additional lines to print for the nearpc command')\nshow_args = pwndbg.config.Parameter('nearpc-show-args', True, 'show call arguments below instruction')\n\nparser = argparse.ArgumentParser(description='''Disassemble near a specified address.''')\nparser.add_argument(\"pc\", type=int, nargs=\"?\", default=None, help=\"Address to dissassemble near.\")\nparser.add_argument(\"lines\", type=int, nargs=\"?\", default=None, help=\"Number of lines to show on either side of the address.\")\n#parser.add_argument(\"to_string\", type=bool, nargs=\"?\", default=False, help=\"Whether to print it or not.\") #TODO make sure this should not be exposed\nparser.add_argument(\"emulate\", type=bool, nargs=\"?\", default=False, help=\"Whether to emulate instructions to find the next ones or just linearly disassemble.\")\[email protected](parser)\[email protected]\ndef nearpc(pc=None, lines=None, to_string=False, emulate=False):\n \"\"\"\n Disassemble near a specified address.\n \"\"\"\n\n # Repeating nearpc (pressing enter) makes it show next addresses\n # (writing nearpc explicitly again will reset its state)\n if nearpc.repeat:\n pc = nearpc.next_pc\n\n result = []\n\n # Fix the case where we only have one argument, and\n # it's a small value.\n if lines is None and (pc is None or int(pc) < 0x100):\n lines = pc\n pc = None\n\n if pc is None:\n pc = pwndbg.regs.pc\n\n if lines is None:\n lines = nearpc_lines // 2\n\n pc = int(pc)\n lines = int(lines)\n\n # Check whether we can even read this address\n if not pwndbg.memory.peek(pc):\n result.append(message.error('Invalid address %#x' % pc))\n\n # # Load source data if it's available\n # pc_to_linenos = collections.defaultdict(lambda: [])\n # lineno_to_src = {}\n # frame = gdb.selected_frame()\n # if frame:\n # sal = frame.find_sal()\n # if sal:\n # symtab = sal.symtab\n # objfile = symtab.objfile\n # sourcefilename = symtab.filename\n # with open(sourcefilename, 'r') as sourcefile:\n # lineno_to_src = {i:l for i,l in enumerate(sourcefile.readlines())}\n\n # for line in symtab.linetable():\n # pc_to_linenos[line.pc].append(line.line)\n instructions = pwndbg.disasm.near(pc, lines, emulate=emulate, show_prev_insns=not nearpc.repeat)\n\n if pwndbg.memory.peek(pc) and not instructions:\n result.append(message.error('Invalid instructions at %#x' % pc))\n\n # In case $pc is in a new map we don't know about,\n # this will trigger an exploratory search.\n pwndbg.vmmap.find(pc)\n\n # Gather all addresses and symbols for each instruction\n symbols = [pwndbg.symbol.get(i.address) for i in instructions]\n addresses = ['%#x' % i.address for i in instructions]\n\n nearpc.next_pc = instructions[-1].address + instructions[-1].size if instructions else 0\n\n # Format the symbol name for each instruction\n symbols = ['<%s> ' % sym if sym else '' for sym in symbols]\n\n # Pad out all of the symbols and addresses\n if pwndbg.config.left_pad_disasm and not nearpc.repeat:\n symbols = ljust_padding(symbols)\n addresses = ljust_padding(addresses)\n\n prev = None\n\n # Print out each instruction\n for address_str, symbol, instr in zip(addresses, symbols, instructions):\n asm = D.instruction(instr)\n prefix_sign = pwndbg.config.nearpc_prefix\n\n # Show prefix only on the specified address and don't show it while in repeat-mode\n show_prefix = instr.address == pc and not nearpc.repeat\n prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign))\n prefix = N.prefix(prefix)\n\n pre = pwndbg.ida.Anterior(instr.address)\n if pre:\n result.append(N.ida_anterior(pre))\n\n # Colorize address and symbol if not highlighted\n # symbol is fetched from gdb and it can be e.g. '<main+8>'\n if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat:\n address_str = N.address(address_str)\n symbol = N.symbol(symbol)\n elif pwndbg.config.highlight_pc:\n prefix = C.highlight(prefix)\n address_str = C.highlight(address_str)\n symbol = C.highlight(symbol)\n\n line = ' '.join((prefix, address_str, symbol, asm))\n\n # If there was a branch before this instruction which was not\n # contiguous, put in some ellipses.\n if prev and prev.address + prev.size != instr.address:\n result.append(N.branch_marker('%s' % nearpc_branch_marker))\n\n # Otherwise if it's a branch and it *is* contiguous, just put\n # and empty line.\n elif prev and any(g in prev.groups for g in (CS_GRP_CALL, CS_GRP_JUMP, CS_GRP_RET)):\n if len('%s' % nearpc_branch_marker_contiguous) > 0:\n result.append('%s' % nearpc_branch_marker_contiguous)\n\n # For syscall instructions, put the name on the side\n if instr.address == pc:\n syscall_name = pwndbg.arguments.get_syscall_name(instr)\n if syscall_name:\n line += ' <%s>' % N.syscall_name(syscall_name)\n\n result.append(line)\n\n # For call instructions, attempt to resolve the target and\n # determine the number of arguments.\n if show_args:\n result.extend(['%8s%s' % ('', arg) for arg in pwndbg.arguments.format_args(instruction=instr)])\n\n prev = instr\n\n if not to_string:\n print('\\n'.join(result))\n\n return result\n\n\nparser = argparse.ArgumentParser(description='''Like nearpc, but will emulate instructions from the current $PC forward.''')\nparser.add_argument(\"pc\", type=int, nargs=\"?\", default=None, help=\"Address to emulate near.\")\nparser.add_argument(\"lines\", type=int, nargs=\"?\", default=None, help=\"Number of lines to show on either side of the address.\")\[email protected](parser)\[email protected]\ndef emulate(pc=None, lines=None, to_string=False, emulate=True):\n \"\"\"\n Like nearpc, but will emulate instructions from the current $PC forward.\n \"\"\"\n nearpc.repeat = emulate_command.repeat\n return nearpc(pc, lines, to_string, emulate)\n\n\nemulate_command = emulate\n\n\nparser = argparse.ArgumentParser(description='''Compatibility layer for PEDA's pdisass command.''')\nparser.add_argument(\"pc\", type=int, nargs=\"?\", default=None, help=\"Address to disassemble near.\")\nparser.add_argument(\"lines\", type=int, nargs=\"?\", default=None, help=\"Number of lines to show on either side of the address.\")\[email protected](parser)\[email protected]\ndef pdisass(pc=None, lines=None, to_string=False):\n \"\"\"\n Compatibility layer for PEDA's pdisass command\n \"\"\"\n nearpc.repeat = pdisass.repeat\n return nearpc(pc, lines, to_string, False)\n\n\nnearpc.next_pc = 0\n", "path": "pwndbg/commands/nearpc.py"}]} | 3,732 | 341 |
gh_patches_debug_9873 | rasdani/github-patches | git_diff | wright-group__WrightTools-992 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
collection.convert
Would like to support syntax `collection.convert(unit)`.
Would convert all contained data objects recursively.
</issue>
<code>
[start of WrightTools/collection/_collection.py]
1 """Collection."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import numpy as np
8
9 from .. import data as wt_data
10 from .. import exceptions as wt_exceptions
11 from .._group import Group
12
13
14 # --- define --------------------------------------------------------------------------------------
15
16
17 __all__ = ["Collection"]
18
19
20 # --- classes -------------------------------------------------------------------------------------
21
22
23 class Collection(Group):
24 """Nestable Collection of Data objects."""
25
26 class_name = "Collection"
27
28 def __iter__(self):
29 self.__n = 0
30 return self
31
32 def __len__(self):
33 return len(self.item_names)
34
35 def __next__(self):
36 if self.__n < len(self):
37 out = self.item_names[self.__n]
38 self.__n += 1
39 else:
40 raise StopIteration
41 return out
42
43 def __repr__(self):
44 return "<WrightTools.Collection '{0}' {1} at {2}>".format(
45 self.natural_name, self.item_names, "::".join([self.filepath, self.name])
46 )
47
48 def __getitem__(self, key):
49 if isinstance(key, int):
50 key = self.item_names[key]
51 if key == "":
52 return None
53 return super().__getitem__(key)
54
55 def __setitem__(self, key, value):
56 raise NotImplementedError
57
58 @property
59 def _leaf(self):
60 return self.natural_name
61
62 def _print_branch(self, prefix, depth, verbose):
63 for i, name in enumerate(self.item_names):
64 item = self[name]
65 if i + 1 == len(self.item_names):
66 s = prefix + "└── {0}: {1}".format(i, item._leaf)
67 p = prefix + " "
68 else:
69 s = prefix + "├── {0}: {1}".format(i, item._leaf)
70 p = prefix + "│ "
71 print(s)
72 if depth > 1 and hasattr(item, "_print_branch"):
73 item._print_branch(p, depth=depth - 1, verbose=verbose)
74
75 def create_collection(self, name="collection", position=None, **kwargs):
76 """Create a new child colleciton.
77
78 Parameters
79 ----------
80 name : string
81 Unique identifier.
82 position : integer (optional)
83 Location to insert. Default is None (append).
84 kwargs
85 Additional arguments to child collection instantiation.
86
87 Returns
88 -------
89 WrightTools Collection
90 New child.
91 """
92 if name in self.item_names:
93 wt_exceptions.ObjectExistsWarning.warn(name)
94 return self[name]
95 collection = Collection(
96 filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs
97 )
98 if position is not None:
99 self.attrs["item_names"] = np.insert(
100 self.attrs["item_names"][:-1], position, collection.natural_name.encode()
101 )
102 setattr(self, name, collection)
103 return collection
104
105 def create_data(self, name="data", position=None, **kwargs):
106 """Create a new child data.
107
108 Parameters
109 ----------
110 name : string
111 Unique identifier.
112 position : integer (optional)
113 Location to insert. Default is None (append).
114 kwargs
115 Additional arguments to child data instantiation.
116
117 Returns
118 -------
119 WrightTools Data
120 New child.
121 """
122 if name in self.item_names:
123 wt_exceptions.ObjectExistsWarning.warn(name)
124 return self[name]
125
126 if name == "":
127 data = None
128 natural_name = "".encode()
129 else:
130 data = wt_data.Data(
131 filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs
132 )
133 natural_name = data.natural_name.encode()
134 if position is not None:
135 self.attrs["item_names"] = np.insert(
136 self.attrs["item_names"][:-1], position, natural_name
137 )
138 setattr(self, name, data)
139 return data
140
141 def index(self):
142 """Index."""
143 raise NotImplementedError
144
145 def print_tree(self, depth=9, *, verbose=False):
146 """Print a ascii-formatted tree representation of the collection contents.
147
148 Parameters
149 ----------
150 depth : integer (optional)
151 Number of layers to include in the tree. Default is 9.
152 verbose : boolean (optional)
153 Toggle inclusion of extra information. Default is True.
154 """
155 print("{0} ({1})".format(self.natural_name, self.filepath))
156 self._print_branch("", depth=depth, verbose=verbose)
157
158 def flush(self):
159 """Ensure contents are written to file."""
160 for name in self.item_names:
161 item = self[name]
162 item.flush()
163 self.file.flush()
164
[end of WrightTools/collection/_collection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/WrightTools/collection/_collection.py b/WrightTools/collection/_collection.py
--- a/WrightTools/collection/_collection.py
+++ b/WrightTools/collection/_collection.py
@@ -102,6 +102,18 @@
setattr(self, name, collection)
return collection
+ def convert(self, units, convert_variables=False, verbose=True):
+ """Convert units of a collection.
+ Parameters
+ ----------
+ units: string
+ Units to convert to.
+ """
+
+ for name in self.item_names:
+ item = self[name]
+ item.convert(units, convert_variables=convert_variables, verbose=verbose)
+
def create_data(self, name="data", position=None, **kwargs):
"""Create a new child data.
| {"golden_diff": "diff --git a/WrightTools/collection/_collection.py b/WrightTools/collection/_collection.py\n--- a/WrightTools/collection/_collection.py\n+++ b/WrightTools/collection/_collection.py\n@@ -102,6 +102,18 @@\n setattr(self, name, collection)\n return collection\n \n+ def convert(self, units, convert_variables=False, verbose=True):\n+ \"\"\"Convert units of a collection.\n+ Parameters\n+ ----------\n+ units: string\n+ Units to convert to.\n+ \"\"\"\n+\n+ for name in self.item_names:\n+ item = self[name]\n+ item.convert(units, convert_variables=convert_variables, verbose=verbose)\n+\n def create_data(self, name=\"data\", position=None, **kwargs):\n \"\"\"Create a new child data.\n", "issue": "collection.convert\nWould like to support syntax `collection.convert(unit)`.\r\n\r\nWould convert all contained data objects recursively.\n", "before_files": [{"content": "\"\"\"Collection.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nfrom .. import data as wt_data\nfrom .. import exceptions as wt_exceptions\nfrom .._group import Group\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"Collection\"]\n\n\n# --- classes -------------------------------------------------------------------------------------\n\n\nclass Collection(Group):\n \"\"\"Nestable Collection of Data objects.\"\"\"\n\n class_name = \"Collection\"\n\n def __iter__(self):\n self.__n = 0\n return self\n\n def __len__(self):\n return len(self.item_names)\n\n def __next__(self):\n if self.__n < len(self):\n out = self.item_names[self.__n]\n self.__n += 1\n else:\n raise StopIteration\n return out\n\n def __repr__(self):\n return \"<WrightTools.Collection '{0}' {1} at {2}>\".format(\n self.natural_name, self.item_names, \"::\".join([self.filepath, self.name])\n )\n\n def __getitem__(self, key):\n if isinstance(key, int):\n key = self.item_names[key]\n if key == \"\":\n return None\n return super().__getitem__(key)\n\n def __setitem__(self, key, value):\n raise NotImplementedError\n\n @property\n def _leaf(self):\n return self.natural_name\n\n def _print_branch(self, prefix, depth, verbose):\n for i, name in enumerate(self.item_names):\n item = self[name]\n if i + 1 == len(self.item_names):\n s = prefix + \"\u2514\u2500\u2500 {0}: {1}\".format(i, item._leaf)\n p = prefix + \" \"\n else:\n s = prefix + \"\u251c\u2500\u2500 {0}: {1}\".format(i, item._leaf)\n p = prefix + \"\u2502 \"\n print(s)\n if depth > 1 and hasattr(item, \"_print_branch\"):\n item._print_branch(p, depth=depth - 1, verbose=verbose)\n\n def create_collection(self, name=\"collection\", position=None, **kwargs):\n \"\"\"Create a new child colleciton.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n position : integer (optional)\n Location to insert. Default is None (append).\n kwargs\n Additional arguments to child collection instantiation.\n\n Returns\n -------\n WrightTools Collection\n New child.\n \"\"\"\n if name in self.item_names:\n wt_exceptions.ObjectExistsWarning.warn(name)\n return self[name]\n collection = Collection(\n filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs\n )\n if position is not None:\n self.attrs[\"item_names\"] = np.insert(\n self.attrs[\"item_names\"][:-1], position, collection.natural_name.encode()\n )\n setattr(self, name, collection)\n return collection\n\n def create_data(self, name=\"data\", position=None, **kwargs):\n \"\"\"Create a new child data.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n position : integer (optional)\n Location to insert. Default is None (append).\n kwargs\n Additional arguments to child data instantiation.\n\n Returns\n -------\n WrightTools Data\n New child.\n \"\"\"\n if name in self.item_names:\n wt_exceptions.ObjectExistsWarning.warn(name)\n return self[name]\n\n if name == \"\":\n data = None\n natural_name = \"\".encode()\n else:\n data = wt_data.Data(\n filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs\n )\n natural_name = data.natural_name.encode()\n if position is not None:\n self.attrs[\"item_names\"] = np.insert(\n self.attrs[\"item_names\"][:-1], position, natural_name\n )\n setattr(self, name, data)\n return data\n\n def index(self):\n \"\"\"Index.\"\"\"\n raise NotImplementedError\n\n def print_tree(self, depth=9, *, verbose=False):\n \"\"\"Print a ascii-formatted tree representation of the collection contents.\n\n Parameters\n ----------\n depth : integer (optional)\n Number of layers to include in the tree. Default is 9.\n verbose : boolean (optional)\n Toggle inclusion of extra information. Default is True.\n \"\"\"\n print(\"{0} ({1})\".format(self.natural_name, self.filepath))\n self._print_branch(\"\", depth=depth, verbose=verbose)\n\n def flush(self):\n \"\"\"Ensure contents are written to file.\"\"\"\n for name in self.item_names:\n item = self[name]\n item.flush()\n self.file.flush()\n", "path": "WrightTools/collection/_collection.py"}]} | 1,948 | 176 |
gh_patches_debug_51104 | rasdani/github-patches | git_diff | ivy-llc__ivy-14373 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
take
</issue>
<code>
[start of ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py]
1 # local
2 import ivy
3 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
4
5
6 @to_ivy_arrays_and_back
7 def adjoint(input):
8 return ivy.adjoint(input)
9
10
11 @to_ivy_arrays_and_back
12 def cat(tensors, dim=0, *, out=None):
13 return ivy.concat(tensors, axis=dim, out=out)
14
15
16 @to_ivy_arrays_and_back
17 def chunk(input, chunks, dim=0):
18 if ivy.shape(input) == ():
19 return [input]
20 else:
21 dim_size = ivy.shape(input)[dim]
22 chunk_size = dim_size // chunks
23 if chunk_size == 0:
24 return ivy.split(input, num_or_size_splits=dim_size, axis=dim)
25 else:
26 remainder = dim_size % chunks
27 if remainder == 0:
28 return ivy.split(input, num_or_size_splits=chunks, axis=dim)
29 else:
30 return ivy.split(
31 input,
32 num_or_size_splits=tuple(
33 [chunk_size + remainder] + [chunk_size] * (chunks - 1)
34 ),
35 axis=dim,
36 )
37
38
39 @to_ivy_arrays_and_back
40 def concat(tensors, dim=0, *, out=None):
41 return ivy.concat(tensors, axis=dim, out=out)
42
43
44 @to_ivy_arrays_and_back
45 def gather(input, dim, index, *, sparse_grad=False, out=None):
46 if sparse_grad:
47 raise ivy.utils.exceptions.IvyException(
48 "Gather does not yet support the sparse grad functionality"
49 )
50
51 dim = dim % len(input.shape)
52 all_indices = ivy.argwhere(ivy.full(index.shape, True))
53 gather_locations = ivy.reshape(index, [ivy.prod(ivy.array(index.shape))])
54
55 gather_indices = []
56 for axis in range(len(index.shape)):
57 if axis == dim:
58 gather_indices.append(ivy.array(gather_locations, dtype=index.dtype))
59 else:
60 gather_indices.append(ivy.array(all_indices[:, axis], dtype=index.dtype))
61
62 gather_indices = ivy.stack(gather_indices, axis=-1)
63 gathered = ivy.gather_nd(input, gather_indices)
64 reshaped = ivy.reshape(gathered, index.shape)
65 return reshaped
66
67
68 @to_ivy_arrays_and_back
69 def nonzero(input, *, out=None, as_tuple=False):
70 ret = ivy.nonzero(input)
71 if as_tuple is False:
72 ret = ivy.matrix_transpose(ivy.stack(ret))
73
74 if ivy.exists(out):
75 return ivy.inplace_update(out, ret)
76 return ret
77
78
79 @to_ivy_arrays_and_back
80 def permute(input, dims):
81 return ivy.permute_dims(input, axes=dims)
82
83
84 @to_ivy_arrays_and_back
85 def reshape(input, shape):
86 return ivy.reshape(input, shape)
87
88
89 @to_ivy_arrays_and_back
90 def squeeze(input, dim):
91 if isinstance(dim, int) and input.ndim > 0:
92 if input.shape[dim] > 1:
93 return input
94 return ivy.squeeze(input, dim)
95
96
97 @to_ivy_arrays_and_back
98 def stack(tensors, dim=0, *, out=None):
99 return ivy.stack(tensors, axis=dim, out=out)
100
101
102 @to_ivy_arrays_and_back
103 def swapaxes(input, axis0, axis1):
104 return ivy.swapaxes(input, axis0, axis1)
105
106
107 @to_ivy_arrays_and_back
108 def swapdims(input, dim0, dim1):
109 return ivy.swapaxes(input, dim0, dim1)
110
111
112 @to_ivy_arrays_and_back
113 def transpose(input, dim0, dim1):
114 return ivy.swapaxes(input, dim0, dim1)
115
116
117 @to_ivy_arrays_and_back
118 def t(input):
119 if input.ndim > 2:
120 raise ivy.utils.exceptions.IvyException(
121 "t(input) expects a tensor with <= 2 dimensions, but self is %dD"
122 % input.ndim
123 )
124 if input.ndim == 2:
125 return ivy.swapaxes(input, 0, 1)
126 else:
127 return input
128
129
130 @to_ivy_arrays_and_back
131 def tile(input, dims):
132 try:
133 tup = tuple(dims)
134 except TypeError:
135 tup = (dims,)
136 d = len(tup)
137 res = 0
138 if len(input.shape) > len([dims]) - 1:
139 res = input
140 if d < input.ndim:
141 tup = (1,) * (input.ndim - d) + tup
142 res = ivy.tile(input, tup)
143
144 else:
145 res = ivy.tile(input, repeats=dims, out=None)
146 return res
147
148
149 @to_ivy_arrays_and_back
150 def unsqueeze(input, dim=0):
151 return ivy.expand_dims(input, axis=dim)
152
153
154 @to_ivy_arrays_and_back
155 def argwhere(input):
156 return ivy.argwhere(input)
157
158
159 @to_ivy_arrays_and_back
160 def movedim(input, source, destination):
161 return ivy.moveaxis(input, source, destination)
162
163
164 @to_ivy_arrays_and_back
165 def moveaxis(input, source, destination):
166 return ivy.moveaxis(input, source, destination)
167
168
169 @to_ivy_arrays_and_back
170 def hstack(tensors, *, out=None):
171 return ivy.hstack(tensors, out=out)
172
173
174 @to_ivy_arrays_and_back
175 def index_select(input, dim, index, *, out=None):
176 return ivy.gather(input, index, axis=dim, out=out)
177
178
179 @to_ivy_arrays_and_back
180 def dstack(tensors, *, out=None):
181 return ivy.dstack(tensors, out=out)
182
183
184 @to_ivy_arrays_and_back
185 def take_along_dim(input, indices, dim, *, out=None):
186 return ivy.take_along_axis(input, indices, dim, out=out)
187
188
189 @to_ivy_arrays_and_back
190 def vstack(tensors, *, out=None):
191 return ivy.vstack(tensors, out=out)
192
193
194 @to_ivy_arrays_and_back
195 def split(tensor, split_size_or_sections, dim=0):
196 if isinstance(split_size_or_sections, int):
197 split_size = split_size_or_sections
198 split_size_or_sections = [split_size] * (tensor.shape[dim] // split_size)
199 if tensor.shape[dim] % split_size:
200 split_size_or_sections.append(tensor.shape[dim] % split_size)
201 return tuple(
202 ivy.split(
203 tensor,
204 num_or_size_splits=split_size_or_sections,
205 axis=dim,
206 with_remainder=True,
207 )
208 )
209
210
211 @to_ivy_arrays_and_back
212 def tensor_split(input, indices_or_sections, dim=0):
213 if isinstance(indices_or_sections, (list, tuple)):
214 indices_or_sections = (
215 ivy.diff(indices_or_sections, prepend=[0], append=[input.shape[dim]])
216 .astype(ivy.int8)
217 .to_list()
218 )
219 return ivy.split(
220 input, num_or_size_splits=indices_or_sections, axis=dim, with_remainder=False
221 )
222
223
224 @to_ivy_arrays_and_back
225 def unbind(input, dim=0):
226 shape = list(input.shape)
227 shape.pop(dim)
228 return tuple([x.reshape(tuple(shape)) for x in split(input, 1, dim=dim)])
229
230
231 def _get_indices_or_sections(indices_or_sections, indices, sections):
232 if not ivy.exists(indices_or_sections):
233 if ivy.exists(indices) and not ivy.exists(sections):
234 indices_or_sections = indices
235 elif ivy.exists(sections) and not ivy.exists(indices):
236 indices_or_sections = sections
237 else:
238 raise ivy.utils.exception.IvyError(
239 "got invalid argument for indices_or_sections"
240 )
241 return indices_or_sections
242
243
244 @to_ivy_arrays_and_back
245 def dsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):
246 indices_or_sections = _get_indices_or_sections(
247 indices_or_sections, indices, sections
248 )
249 return tuple(ivy.dsplit(input, indices_or_sections))
250
251
252 @to_ivy_arrays_and_back
253 def hsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):
254 indices_or_sections = _get_indices_or_sections(
255 indices_or_sections, indices, sections
256 )
257 return tuple(ivy.hsplit(input, indices_or_sections))
258
259
260 @to_ivy_arrays_and_back
261 def vsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):
262 indices_or_sections = _get_indices_or_sections(
263 indices_or_sections, indices, sections
264 )
265 return tuple(ivy.vsplit(input, indices_or_sections))
266
267
268 @to_ivy_arrays_and_back
269 def row_stack(tensors, *, out=None):
270 return ivy.vstack(tensors, out=out)
271
272
273 @to_ivy_arrays_and_back
274 def where(condition, input=None, other=None):
275 if not ivy.exists(input) and not ivy.exists(other):
276 return nonzero(condition, as_tuple=True)
277 return ivy.where(condition, input, other)
278
279
280 @to_ivy_arrays_and_back
281 def conj(input):
282 return ivy.conj(input)
283
284
285 @to_ivy_arrays_and_back
286 def index_add(input, dim, index, source, *, alpha=1, out=None):
287 input = ivy.swapaxes(input, dim, 0)
288 source = ivy.swapaxes(source, dim, 0)
289 _to_adds = []
290 index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda x: x[0]))
291 while index:
292 _curr_idx = index[0][0]
293 while len(_to_adds) < _curr_idx:
294 _to_adds.append(ivy.zeros_like(source[0]))
295 _to_add_cum = ivy.get_item(source, index[0][1])
296 while (1 < len(index)) and (index[0][0] == index[1][0]):
297 _to_add_cum = _to_add_cum + ivy.get_item(source, index.pop(1)[1])
298 index.pop(0)
299 _to_adds.append(_to_add_cum)
300 while len(_to_adds) < input.shape[0]:
301 _to_adds.append(ivy.zeros_like(source[0]))
302 _to_adds = ivy.stack(_to_adds)
303 if len(input.shape) < 2:
304 # Added this line due to the paddle backend treating scalars as 1-d arrays
305 _to_adds = ivy.flatten(_to_adds)
306
307 ret = ivy.add(input, _to_adds, alpha=alpha)
308 ret = ivy.swapaxes(ret, 0, dim, out=out)
309 return ret
310
311
312 @to_ivy_arrays_and_back
313 def index_copy(input, dim, index, source, *, out=None):
314 input = ivy.swapaxes(input, dim, 0)
315 source = ivy.swapaxes(source, dim, 0)
316 index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda x: x[0]))
317 res = []
318 while index:
319 _curr_idx = index[0][0]
320 for i in range(len(res), _curr_idx):
321 res.append(ivy.get_item(input, i))
322 while (1 < len(index)) and (index[0][0] == index[1][0]):
323 index.pop(0)
324 res.append(ivy.get_item(source, index[0][1]))
325 index.pop(0)
326 for i in range(len(res), input.shape[0]):
327 res.append(ivy.get_item(input, i))
328 res = ivy.stack(res)
329 if len(input.shape) < 2:
330 res = ivy.flatten(res)
331
332 return ivy.swapaxes(res, 0, dim, out=out)
333
334
335 @to_ivy_arrays_and_back
336 def masked_select(input, mask, out=None):
337 return ivy.flatten(input[mask], out=out)
338
[end of ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py b/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py
--- a/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py
+++ b/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py
@@ -335,3 +335,9 @@
@to_ivy_arrays_and_back
def masked_select(input, mask, out=None):
return ivy.flatten(input[mask], out=out)
+
+
+@to_ivy_arrays_and_back
+def take(input, index):
+ input = ivy.reshape(input, (-1,))
+ return ivy.gather(input, index, axis=0)
\ No newline at end of file
| {"golden_diff": "diff --git a/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py b/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py\n--- a/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py\n+++ b/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py\n@@ -335,3 +335,9 @@\n @to_ivy_arrays_and_back\r\n def masked_select(input, mask, out=None):\r\n return ivy.flatten(input[mask], out=out)\r\n+ \r\n+ \r\n+@to_ivy_arrays_and_back\r\n+def take(input, index):\r\n+ input = ivy.reshape(input, (-1,))\r\n+ return ivy.gather(input, index, axis=0) \n\\ No newline at end of file\n", "issue": "take\n\n", "before_files": [{"content": "# local\r\nimport ivy\r\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef adjoint(input):\r\n return ivy.adjoint(input)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef cat(tensors, dim=0, *, out=None):\r\n return ivy.concat(tensors, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef chunk(input, chunks, dim=0):\r\n if ivy.shape(input) == ():\r\n return [input]\r\n else:\r\n dim_size = ivy.shape(input)[dim]\r\n chunk_size = dim_size // chunks\r\n if chunk_size == 0:\r\n return ivy.split(input, num_or_size_splits=dim_size, axis=dim)\r\n else:\r\n remainder = dim_size % chunks\r\n if remainder == 0:\r\n return ivy.split(input, num_or_size_splits=chunks, axis=dim)\r\n else:\r\n return ivy.split(\r\n input,\r\n num_or_size_splits=tuple(\r\n [chunk_size + remainder] + [chunk_size] * (chunks - 1)\r\n ),\r\n axis=dim,\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef concat(tensors, dim=0, *, out=None):\r\n return ivy.concat(tensors, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef gather(input, dim, index, *, sparse_grad=False, out=None):\r\n if sparse_grad:\r\n raise ivy.utils.exceptions.IvyException(\r\n \"Gather does not yet support the sparse grad functionality\"\r\n )\r\n\r\n dim = dim % len(input.shape)\r\n all_indices = ivy.argwhere(ivy.full(index.shape, True))\r\n gather_locations = ivy.reshape(index, [ivy.prod(ivy.array(index.shape))])\r\n\r\n gather_indices = []\r\n for axis in range(len(index.shape)):\r\n if axis == dim:\r\n gather_indices.append(ivy.array(gather_locations, dtype=index.dtype))\r\n else:\r\n gather_indices.append(ivy.array(all_indices[:, axis], dtype=index.dtype))\r\n\r\n gather_indices = ivy.stack(gather_indices, axis=-1)\r\n gathered = ivy.gather_nd(input, gather_indices)\r\n reshaped = ivy.reshape(gathered, index.shape)\r\n return reshaped\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef nonzero(input, *, out=None, as_tuple=False):\r\n ret = ivy.nonzero(input)\r\n if as_tuple is False:\r\n ret = ivy.matrix_transpose(ivy.stack(ret))\r\n\r\n if ivy.exists(out):\r\n return ivy.inplace_update(out, ret)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef permute(input, dims):\r\n return ivy.permute_dims(input, axes=dims)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef reshape(input, shape):\r\n return ivy.reshape(input, shape)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef squeeze(input, dim):\r\n if isinstance(dim, int) and input.ndim > 0:\r\n if input.shape[dim] > 1:\r\n return input\r\n return ivy.squeeze(input, dim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef stack(tensors, dim=0, *, out=None):\r\n return ivy.stack(tensors, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef swapaxes(input, axis0, axis1):\r\n return ivy.swapaxes(input, axis0, axis1)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef swapdims(input, dim0, dim1):\r\n return ivy.swapaxes(input, dim0, dim1)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef transpose(input, dim0, dim1):\r\n return ivy.swapaxes(input, dim0, dim1)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef t(input):\r\n if input.ndim > 2:\r\n raise ivy.utils.exceptions.IvyException(\r\n \"t(input) expects a tensor with <= 2 dimensions, but self is %dD\"\r\n % input.ndim\r\n )\r\n if input.ndim == 2:\r\n return ivy.swapaxes(input, 0, 1)\r\n else:\r\n return input\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef tile(input, dims):\r\n try:\r\n tup = tuple(dims)\r\n except TypeError:\r\n tup = (dims,)\r\n d = len(tup)\r\n res = 0\r\n if len(input.shape) > len([dims]) - 1:\r\n res = input\r\n if d < input.ndim:\r\n tup = (1,) * (input.ndim - d) + tup\r\n res = ivy.tile(input, tup)\r\n\r\n else:\r\n res = ivy.tile(input, repeats=dims, out=None)\r\n return res\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef unsqueeze(input, dim=0):\r\n return ivy.expand_dims(input, axis=dim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef argwhere(input):\r\n return ivy.argwhere(input)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef movedim(input, source, destination):\r\n return ivy.moveaxis(input, source, destination)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef moveaxis(input, source, destination):\r\n return ivy.moveaxis(input, source, destination)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef hstack(tensors, *, out=None):\r\n return ivy.hstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef index_select(input, dim, index, *, out=None):\r\n return ivy.gather(input, index, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef dstack(tensors, *, out=None):\r\n return ivy.dstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef take_along_dim(input, indices, dim, *, out=None):\r\n return ivy.take_along_axis(input, indices, dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef vstack(tensors, *, out=None):\r\n return ivy.vstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef split(tensor, split_size_or_sections, dim=0):\r\n if isinstance(split_size_or_sections, int):\r\n split_size = split_size_or_sections\r\n split_size_or_sections = [split_size] * (tensor.shape[dim] // split_size)\r\n if tensor.shape[dim] % split_size:\r\n split_size_or_sections.append(tensor.shape[dim] % split_size)\r\n return tuple(\r\n ivy.split(\r\n tensor,\r\n num_or_size_splits=split_size_or_sections,\r\n axis=dim,\r\n with_remainder=True,\r\n )\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef tensor_split(input, indices_or_sections, dim=0):\r\n if isinstance(indices_or_sections, (list, tuple)):\r\n indices_or_sections = (\r\n ivy.diff(indices_or_sections, prepend=[0], append=[input.shape[dim]])\r\n .astype(ivy.int8)\r\n .to_list()\r\n )\r\n return ivy.split(\r\n input, num_or_size_splits=indices_or_sections, axis=dim, with_remainder=False\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef unbind(input, dim=0):\r\n shape = list(input.shape)\r\n shape.pop(dim)\r\n return tuple([x.reshape(tuple(shape)) for x in split(input, 1, dim=dim)])\r\n\r\n\r\ndef _get_indices_or_sections(indices_or_sections, indices, sections):\r\n if not ivy.exists(indices_or_sections):\r\n if ivy.exists(indices) and not ivy.exists(sections):\r\n indices_or_sections = indices\r\n elif ivy.exists(sections) and not ivy.exists(indices):\r\n indices_or_sections = sections\r\n else:\r\n raise ivy.utils.exception.IvyError(\r\n \"got invalid argument for indices_or_sections\"\r\n )\r\n return indices_or_sections\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef dsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):\r\n indices_or_sections = _get_indices_or_sections(\r\n indices_or_sections, indices, sections\r\n )\r\n return tuple(ivy.dsplit(input, indices_or_sections))\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef hsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):\r\n indices_or_sections = _get_indices_or_sections(\r\n indices_or_sections, indices, sections\r\n )\r\n return tuple(ivy.hsplit(input, indices_or_sections))\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef vsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):\r\n indices_or_sections = _get_indices_or_sections(\r\n indices_or_sections, indices, sections\r\n )\r\n return tuple(ivy.vsplit(input, indices_or_sections))\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef row_stack(tensors, *, out=None):\r\n return ivy.vstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef where(condition, input=None, other=None):\r\n if not ivy.exists(input) and not ivy.exists(other):\r\n return nonzero(condition, as_tuple=True)\r\n return ivy.where(condition, input, other)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef conj(input):\r\n return ivy.conj(input)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef index_add(input, dim, index, source, *, alpha=1, out=None):\r\n input = ivy.swapaxes(input, dim, 0)\r\n source = ivy.swapaxes(source, dim, 0)\r\n _to_adds = []\r\n index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda x: x[0]))\r\n while index:\r\n _curr_idx = index[0][0]\r\n while len(_to_adds) < _curr_idx:\r\n _to_adds.append(ivy.zeros_like(source[0]))\r\n _to_add_cum = ivy.get_item(source, index[0][1])\r\n while (1 < len(index)) and (index[0][0] == index[1][0]):\r\n _to_add_cum = _to_add_cum + ivy.get_item(source, index.pop(1)[1])\r\n index.pop(0)\r\n _to_adds.append(_to_add_cum)\r\n while len(_to_adds) < input.shape[0]:\r\n _to_adds.append(ivy.zeros_like(source[0]))\r\n _to_adds = ivy.stack(_to_adds)\r\n if len(input.shape) < 2:\r\n # Added this line due to the paddle backend treating scalars as 1-d arrays\r\n _to_adds = ivy.flatten(_to_adds)\r\n\r\n ret = ivy.add(input, _to_adds, alpha=alpha)\r\n ret = ivy.swapaxes(ret, 0, dim, out=out)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef index_copy(input, dim, index, source, *, out=None):\r\n input = ivy.swapaxes(input, dim, 0)\r\n source = ivy.swapaxes(source, dim, 0)\r\n index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda x: x[0]))\r\n res = []\r\n while index:\r\n _curr_idx = index[0][0]\r\n for i in range(len(res), _curr_idx):\r\n res.append(ivy.get_item(input, i))\r\n while (1 < len(index)) and (index[0][0] == index[1][0]):\r\n index.pop(0)\r\n res.append(ivy.get_item(source, index[0][1]))\r\n index.pop(0)\r\n for i in range(len(res), input.shape[0]):\r\n res.append(ivy.get_item(input, i))\r\n res = ivy.stack(res)\r\n if len(input.shape) < 2:\r\n res = ivy.flatten(res)\r\n\r\n return ivy.swapaxes(res, 0, dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef masked_select(input, mask, out=None):\r\n return ivy.flatten(input[mask], out=out)\r\n", "path": "ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py"}]} | 4,083 | 184 |
gh_patches_debug_7299 | rasdani/github-patches | git_diff | litestar-org__litestar-1488 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/contrib/htmx/response.py]
1 from __future__ import annotations
2
3 from dataclasses import dataclass, field
4 from typing import TYPE_CHECKING, Any, Generic, TypeVar
5 from urllib.parse import quote
6
7 from litestar import Litestar, MediaType, Request, Response
8 from litestar.contrib.htmx._utils import HTMX_STOP_POLLING, get_headers
9 from litestar.contrib.htmx.types import (
10 EventAfterType,
11 HtmxHeaderType,
12 LocationType,
13 PushUrlType,
14 ReSwapMethod,
15 TriggerEventType,
16 )
17 from litestar.response import TemplateResponse
18 from litestar.response_containers import ResponseContainer, Template
19 from litestar.status_codes import HTTP_200_OK
20
21 __all__ = (
22 "ClientRedirect",
23 "ClientRefresh",
24 "HTMXTemplate",
25 "HXLocation",
26 "HXStopPolling",
27 "PushUrl",
28 "ReplaceUrl",
29 "Reswap",
30 "Retarget",
31 "TriggerEvent",
32 )
33
34 if TYPE_CHECKING:
35 from litestar.background_tasks import BackgroundTask, BackgroundTasks
36 from litestar.datastructures import Cookie
37
38
39 # HTMX defined HTTP status code.
40 # Response carrying this status code will ask client to stop Polling.
41 T = TypeVar("T")
42
43
44 class HXStopPolling(Response):
45 """Stop HTMX client from Polling."""
46
47 def __init__(self) -> None:
48 """Initialize"""
49 super().__init__(content=None)
50 self.status_code = HTMX_STOP_POLLING
51
52
53 class ClientRedirect(Response):
54 """HTMX Response class to support client side redirect."""
55
56 def __init__(self, redirect_to: str) -> None:
57 """Set status code to 200 (required by HTMX), and pass redirect url."""
58 super().__init__(content=None, headers=get_headers(hx_headers=HtmxHeaderType(redirect=redirect_to)))
59 del self.headers["Location"]
60
61
62 class ClientRefresh(Response):
63 """Response to support HTMX client page refresh"""
64
65 def __init__(self) -> None:
66 """Set Status code to 200 and set headers."""
67 super().__init__(content=None, headers=get_headers(hx_headers=HtmxHeaderType(refresh=True)))
68
69
70 class PushUrl(Generic[T], Response[T]):
71 """Response to push new url into the history stack."""
72
73 def __init__(self, content: T, push_url: PushUrlType, **kwargs: Any) -> None:
74 """Initialize PushUrl."""
75 super().__init__(
76 content=content,
77 status_code=HTTP_200_OK,
78 headers=get_headers(hx_headers=HtmxHeaderType(push_url=push_url)),
79 **kwargs,
80 )
81
82
83 class ReplaceUrl(Generic[T], Response[T]):
84 """Response to replace url in the Browser Location bar."""
85
86 def __init__(self, content: T, replace_url: PushUrlType, **kwargs: Any) -> None:
87 """Initialize ReplaceUrl."""
88 super().__init__(
89 content=content,
90 status_code=HTTP_200_OK,
91 headers=get_headers(hx_headers=HtmxHeaderType(replace_url=replace_url)),
92 **kwargs,
93 )
94
95
96 class Reswap(Generic[T], Response[T]):
97 """Response to specify how the response will be swapped."""
98
99 def __init__(
100 self,
101 content: T,
102 method: ReSwapMethod,
103 **kwargs: Any,
104 ) -> None:
105 """Initialize Reswap."""
106 super().__init__(content=content, headers=get_headers(hx_headers=HtmxHeaderType(re_swap=method)), **kwargs)
107
108
109 class Retarget(Generic[T], Response[T]):
110 """Response to target different element on the page."""
111
112 def __init__(self, content: T, target: str, **kwargs: Any) -> None:
113 """Initialize Retarget."""
114 super().__init__(content=content, headers=get_headers(hx_headers=HtmxHeaderType(re_target=target)), **kwargs)
115
116
117 class TriggerEvent(Generic[T], Response[T]):
118 """Trigger Client side event."""
119
120 def __init__(
121 self,
122 content: T,
123 name: str,
124 after: EventAfterType,
125 params: dict[str, Any] | None = None,
126 **kwargs: Any,
127 ) -> None:
128 """Initialize TriggerEvent."""
129 event = TriggerEventType(name=name, params=params, after=after)
130 headers = get_headers(hx_headers=HtmxHeaderType(trigger_event=event))
131 super().__init__(content=content, headers=headers, **kwargs)
132
133
134 class HXLocation(Response):
135 """Client side redirect without full page reload."""
136
137 def __init__(
138 self,
139 redirect_to: str,
140 source: str | None = None,
141 event: str | None = None,
142 target: str | None = None,
143 swap: ReSwapMethod = None,
144 hx_headers: dict[str, Any] | None = None,
145 values: dict[str, str] | None = None,
146 **kwargs: Any,
147 ) -> None:
148 """Initialize HXLocation, Set status code to 200 (required by HTMX),
149 and pass redirect url.
150 """
151 super().__init__(
152 content=None,
153 headers={"Location": quote(redirect_to, safe="/#%[]=:;$&()+,!?*@'~")},
154 **kwargs,
155 )
156 spec: dict[str, Any] = get_headers(
157 hx_headers=HtmxHeaderType(
158 location=LocationType(
159 path=str(self.headers.get("Location")),
160 source=source,
161 event=event,
162 target=target,
163 swap=swap,
164 values=values,
165 hx_headers=hx_headers,
166 )
167 )
168 )
169 del self.headers["Location"]
170 self.headers.update(spec)
171
172
173 @dataclass
174 class HTMXTemplate(ResponseContainer[TemplateResponse]):
175 """HTMX template wrapper"""
176
177 name: str
178 """Path-like name for the template to be rendered, e.g. "index.html"."""
179 context: dict[str, Any] = field(default_factory=dict)
180 """A dictionary of key/value pairs to be passed to the temple engine's render method.
181
182 Defaults to None.
183 """
184 background: BackgroundTask | BackgroundTasks | None = field(default=None)
185 """A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or
186 :class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished. Defaults to
187 ``None``.
188 """
189 headers: dict[str, Any] = field(default_factory=dict)
190 """A string/string dictionary of response headers.Header keys are insensitive. Defaults to ``None``."""
191 cookies: list[Cookie] = field(default_factory=list)
192 """A list of :class:`Cookies <.datastructures.Cookie>` to be set under the response ``Set-Cookie`` header. Defaults
193 to ``None``.
194 """
195 media_type: MediaType | str | None = field(default=None)
196 """If defined, overrides the media type configured in the route decorator."""
197 encoding: str = field(default="utf-8")
198 """The encoding to be used for the response headers."""
199 push_url: PushUrlType | None = field(default=None)
200 """Either a string value specifying a URL to push to browser history or ``False`` to prevent HTMX client from
201 pushing a url to browser history."""
202 re_swap: ReSwapMethod | None = field(default=None)
203 """Method value to instruct HTMX which swapping method to use."""
204 re_target: str | None = field(default=None)
205 """Value for 'id of target element' to apply changes to."""
206 trigger_event: str | None = field(default=None)
207 """Event name to trigger."""
208 params: dict[str, Any] | None = field(default=None)
209 """Dictionary of parameters if any required with trigger event parameter."""
210 after: EventAfterType | None = field(default=None)
211 """Changes to apply after ``receive``, ``settle`` or ``swap`` event."""
212
213 def to_response(
214 self,
215 headers: dict[str, Any],
216 media_type: MediaType | str,
217 status_code: int,
218 app: Litestar,
219 request: Request,
220 ) -> TemplateResponse:
221 """Add HTMX headers and return a :class:`TemplateResponse <.response.TemplateResponse>`."""
222
223 event: TriggerEventType | None = None
224 if self.trigger_event:
225 event = TriggerEventType(name=str(self.trigger_event), params=self.params, after=self.after)
226
227 hx_headers: dict[str, Any] = get_headers(
228 hx_headers=HtmxHeaderType(
229 push_url=self.push_url, re_swap=self.re_swap, re_target=self.re_target, trigger_event=event
230 )
231 )
232
233 template = Template(
234 name=self.name,
235 background=self.background,
236 encoding=self.encoding,
237 )
238
239 return template.to_response(
240 headers=hx_headers, media_type=media_type, app=app, status_code=status_code, request=request
241 )
242
[end of litestar/contrib/htmx/response.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/contrib/htmx/response.py b/litestar/contrib/htmx/response.py
--- a/litestar/contrib/htmx/response.py
+++ b/litestar/contrib/htmx/response.py
@@ -230,11 +230,7 @@
)
)
- template = Template(
- name=self.name,
- background=self.background,
- encoding=self.encoding,
- )
+ template = Template(name=self.name, background=self.background, context=self.context, encoding=self.encoding)
return template.to_response(
headers=hx_headers, media_type=media_type, app=app, status_code=status_code, request=request
| {"golden_diff": "diff --git a/litestar/contrib/htmx/response.py b/litestar/contrib/htmx/response.py\n--- a/litestar/contrib/htmx/response.py\n+++ b/litestar/contrib/htmx/response.py\n@@ -230,11 +230,7 @@\n )\n )\n \n- template = Template(\n- name=self.name,\n- background=self.background,\n- encoding=self.encoding,\n- )\n+ template = Template(name=self.name, background=self.background, context=self.context, encoding=self.encoding)\n \n return template.to_response(\n headers=hx_headers, media_type=media_type, app=app, status_code=status_code, request=request\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, Any, Generic, TypeVar\nfrom urllib.parse import quote\n\nfrom litestar import Litestar, MediaType, Request, Response\nfrom litestar.contrib.htmx._utils import HTMX_STOP_POLLING, get_headers\nfrom litestar.contrib.htmx.types import (\n EventAfterType,\n HtmxHeaderType,\n LocationType,\n PushUrlType,\n ReSwapMethod,\n TriggerEventType,\n)\nfrom litestar.response import TemplateResponse\nfrom litestar.response_containers import ResponseContainer, Template\nfrom litestar.status_codes import HTTP_200_OK\n\n__all__ = (\n \"ClientRedirect\",\n \"ClientRefresh\",\n \"HTMXTemplate\",\n \"HXLocation\",\n \"HXStopPolling\",\n \"PushUrl\",\n \"ReplaceUrl\",\n \"Reswap\",\n \"Retarget\",\n \"TriggerEvent\",\n)\n\nif TYPE_CHECKING:\n from litestar.background_tasks import BackgroundTask, BackgroundTasks\n from litestar.datastructures import Cookie\n\n\n# HTMX defined HTTP status code.\n# Response carrying this status code will ask client to stop Polling.\nT = TypeVar(\"T\")\n\n\nclass HXStopPolling(Response):\n \"\"\"Stop HTMX client from Polling.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize\"\"\"\n super().__init__(content=None)\n self.status_code = HTMX_STOP_POLLING\n\n\nclass ClientRedirect(Response):\n \"\"\"HTMX Response class to support client side redirect.\"\"\"\n\n def __init__(self, redirect_to: str) -> None:\n \"\"\"Set status code to 200 (required by HTMX), and pass redirect url.\"\"\"\n super().__init__(content=None, headers=get_headers(hx_headers=HtmxHeaderType(redirect=redirect_to)))\n del self.headers[\"Location\"]\n\n\nclass ClientRefresh(Response):\n \"\"\"Response to support HTMX client page refresh\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Set Status code to 200 and set headers.\"\"\"\n super().__init__(content=None, headers=get_headers(hx_headers=HtmxHeaderType(refresh=True)))\n\n\nclass PushUrl(Generic[T], Response[T]):\n \"\"\"Response to push new url into the history stack.\"\"\"\n\n def __init__(self, content: T, push_url: PushUrlType, **kwargs: Any) -> None:\n \"\"\"Initialize PushUrl.\"\"\"\n super().__init__(\n content=content,\n status_code=HTTP_200_OK,\n headers=get_headers(hx_headers=HtmxHeaderType(push_url=push_url)),\n **kwargs,\n )\n\n\nclass ReplaceUrl(Generic[T], Response[T]):\n \"\"\"Response to replace url in the Browser Location bar.\"\"\"\n\n def __init__(self, content: T, replace_url: PushUrlType, **kwargs: Any) -> None:\n \"\"\"Initialize ReplaceUrl.\"\"\"\n super().__init__(\n content=content,\n status_code=HTTP_200_OK,\n headers=get_headers(hx_headers=HtmxHeaderType(replace_url=replace_url)),\n **kwargs,\n )\n\n\nclass Reswap(Generic[T], Response[T]):\n \"\"\"Response to specify how the response will be swapped.\"\"\"\n\n def __init__(\n self,\n content: T,\n method: ReSwapMethod,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize Reswap.\"\"\"\n super().__init__(content=content, headers=get_headers(hx_headers=HtmxHeaderType(re_swap=method)), **kwargs)\n\n\nclass Retarget(Generic[T], Response[T]):\n \"\"\"Response to target different element on the page.\"\"\"\n\n def __init__(self, content: T, target: str, **kwargs: Any) -> None:\n \"\"\"Initialize Retarget.\"\"\"\n super().__init__(content=content, headers=get_headers(hx_headers=HtmxHeaderType(re_target=target)), **kwargs)\n\n\nclass TriggerEvent(Generic[T], Response[T]):\n \"\"\"Trigger Client side event.\"\"\"\n\n def __init__(\n self,\n content: T,\n name: str,\n after: EventAfterType,\n params: dict[str, Any] | None = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize TriggerEvent.\"\"\"\n event = TriggerEventType(name=name, params=params, after=after)\n headers = get_headers(hx_headers=HtmxHeaderType(trigger_event=event))\n super().__init__(content=content, headers=headers, **kwargs)\n\n\nclass HXLocation(Response):\n \"\"\"Client side redirect without full page reload.\"\"\"\n\n def __init__(\n self,\n redirect_to: str,\n source: str | None = None,\n event: str | None = None,\n target: str | None = None,\n swap: ReSwapMethod = None,\n hx_headers: dict[str, Any] | None = None,\n values: dict[str, str] | None = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize HXLocation, Set status code to 200 (required by HTMX),\n and pass redirect url.\n \"\"\"\n super().__init__(\n content=None,\n headers={\"Location\": quote(redirect_to, safe=\"/#%[]=:;$&()+,!?*@'~\")},\n **kwargs,\n )\n spec: dict[str, Any] = get_headers(\n hx_headers=HtmxHeaderType(\n location=LocationType(\n path=str(self.headers.get(\"Location\")),\n source=source,\n event=event,\n target=target,\n swap=swap,\n values=values,\n hx_headers=hx_headers,\n )\n )\n )\n del self.headers[\"Location\"]\n self.headers.update(spec)\n\n\n@dataclass\nclass HTMXTemplate(ResponseContainer[TemplateResponse]):\n \"\"\"HTMX template wrapper\"\"\"\n\n name: str\n \"\"\"Path-like name for the template to be rendered, e.g. \"index.html\".\"\"\"\n context: dict[str, Any] = field(default_factory=dict)\n \"\"\"A dictionary of key/value pairs to be passed to the temple engine's render method.\n\n Defaults to None.\n \"\"\"\n background: BackgroundTask | BackgroundTasks | None = field(default=None)\n \"\"\"A :class:`BackgroundTask <.background_tasks.BackgroundTask>` instance or\n :class:`BackgroundTasks <.background_tasks.BackgroundTasks>` to execute after the response is finished. Defaults to\n ``None``.\n \"\"\"\n headers: dict[str, Any] = field(default_factory=dict)\n \"\"\"A string/string dictionary of response headers.Header keys are insensitive. Defaults to ``None``.\"\"\"\n cookies: list[Cookie] = field(default_factory=list)\n \"\"\"A list of :class:`Cookies <.datastructures.Cookie>` to be set under the response ``Set-Cookie`` header. Defaults\n to ``None``.\n \"\"\"\n media_type: MediaType | str | None = field(default=None)\n \"\"\"If defined, overrides the media type configured in the route decorator.\"\"\"\n encoding: str = field(default=\"utf-8\")\n \"\"\"The encoding to be used for the response headers.\"\"\"\n push_url: PushUrlType | None = field(default=None)\n \"\"\"Either a string value specifying a URL to push to browser history or ``False`` to prevent HTMX client from\n pushing a url to browser history.\"\"\"\n re_swap: ReSwapMethod | None = field(default=None)\n \"\"\"Method value to instruct HTMX which swapping method to use.\"\"\"\n re_target: str | None = field(default=None)\n \"\"\"Value for 'id of target element' to apply changes to.\"\"\"\n trigger_event: str | None = field(default=None)\n \"\"\"Event name to trigger.\"\"\"\n params: dict[str, Any] | None = field(default=None)\n \"\"\"Dictionary of parameters if any required with trigger event parameter.\"\"\"\n after: EventAfterType | None = field(default=None)\n \"\"\"Changes to apply after ``receive``, ``settle`` or ``swap`` event.\"\"\"\n\n def to_response(\n self,\n headers: dict[str, Any],\n media_type: MediaType | str,\n status_code: int,\n app: Litestar,\n request: Request,\n ) -> TemplateResponse:\n \"\"\"Add HTMX headers and return a :class:`TemplateResponse <.response.TemplateResponse>`.\"\"\"\n\n event: TriggerEventType | None = None\n if self.trigger_event:\n event = TriggerEventType(name=str(self.trigger_event), params=self.params, after=self.after)\n\n hx_headers: dict[str, Any] = get_headers(\n hx_headers=HtmxHeaderType(\n push_url=self.push_url, re_swap=self.re_swap, re_target=self.re_target, trigger_event=event\n )\n )\n\n template = Template(\n name=self.name,\n background=self.background,\n encoding=self.encoding,\n )\n\n return template.to_response(\n headers=hx_headers, media_type=media_type, app=app, status_code=status_code, request=request\n )\n", "path": "litestar/contrib/htmx/response.py"}]} | 3,248 | 151 |
gh_patches_debug_26291 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3117 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The requirement of superuser postgresql access is problematic
## Problem
Mathesar needs a Postgres superuser to function correctly, from the docs at https://docs.mathesar.org/installation/build-from-source/
## Proposed solution
The mathesar user should not require superuser access.
## Additional context
The superuser is a global permission meaning that a user that has superuser permission will be able to access (and do stuff) not on the mathesar but *all* the databases of the RDBMS. Considering that many production systems have a single RDBMS hosting many application this is a major problem since the mathsar user won't have any access boundaries on the same RDBMS. The mathesar user access can be unlimited but *must* be bounded without the mathesar database.
</issue>
<code>
[start of db/install.py]
1 from sqlalchemy import text
2 from sqlalchemy.exc import OperationalError
3
4 from db import engine
5 from db.sql import install as sql_install
6 from db.types import install as types_install
7
8
9 def install_mathesar(
10 database_name, username, password, hostname, port, skip_confirm
11 ):
12 """Create database and install Mathesar on it."""
13 user_db_engine = engine.create_future_engine(
14 username, password, hostname, database_name, port,
15 connect_args={"connect_timeout": 10}
16 )
17 try:
18 user_db_engine.connect()
19 print(f"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...")
20 sql_install.install(user_db_engine)
21 types_install.install_mathesar_on_database(user_db_engine)
22 user_db_engine.dispose()
23 except OperationalError:
24 database_created = _create_database(
25 database_name=database_name,
26 hostname=hostname,
27 username=username,
28 password=password,
29 port=port,
30 skip_confirm=skip_confirm
31 )
32 if database_created:
33 print(f"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...")
34 sql_install.install(user_db_engine)
35 types_install.install_mathesar_on_database(user_db_engine)
36 user_db_engine.dispose()
37 else:
38 print(f"Skipping installing on DB with key {database_name}.")
39
40
41 def _create_database(database_name, hostname, username, password, port, skip_confirm=True):
42 if skip_confirm is True:
43 create_database = "y"
44 else:
45 create_database = input(
46 f"Create a new Database called {database_name}? (y/n) > "
47 )
48 if create_database.lower() in ["y", "yes"]:
49 # We need to connect to an existing database inorder to create a new Database.
50 # So we use the default Database `postgres` that comes with postgres.
51 # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)
52 root_database = "postgres"
53 root_db_engine = engine.create_future_engine(
54 username, password, hostname, root_database, port,
55 connect_args={"connect_timeout": 10}
56 )
57 with root_db_engine.connect() as conn:
58 conn.execution_options(isolation_level="AUTOCOMMIT")
59 conn.execute(text(f'CREATE DATABASE "{database_name}"'))
60 root_db_engine.dispose()
61 print(f"Created DB is {database_name}.")
62 return True
63 else:
64 print(f"Database {database_name} not created!")
65 return False
66
[end of db/install.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/db/install.py b/db/install.py
--- a/db/install.py
+++ b/db/install.py
@@ -1,5 +1,6 @@
+from psycopg.errors import InsufficientPrivilege
from sqlalchemy import text
-from sqlalchemy.exc import OperationalError
+from sqlalchemy.exc import OperationalError, ProgrammingError
from db import engine
from db.sql import install as sql_install
@@ -54,12 +55,20 @@
username, password, hostname, root_database, port,
connect_args={"connect_timeout": 10}
)
- with root_db_engine.connect() as conn:
- conn.execution_options(isolation_level="AUTOCOMMIT")
- conn.execute(text(f'CREATE DATABASE "{database_name}"'))
- root_db_engine.dispose()
- print(f"Created DB is {database_name}.")
- return True
+ try:
+ with root_db_engine.connect() as conn:
+ conn.execution_options(isolation_level="AUTOCOMMIT")
+ conn.execute(text(f'CREATE DATABASE "{database_name}"'))
+ root_db_engine.dispose()
+ print(f"Created DB is {database_name}.")
+ return True
+ except ProgrammingError as e:
+ if isinstance(e.orig, InsufficientPrivilege):
+ print(f"Database {database_name} could not be created due to Insufficient Privilege")
+ return False
+ except Exception:
+ print(f"Database {database_name} could not be created!")
+ return False
else:
print(f"Database {database_name} not created!")
return False
| {"golden_diff": "diff --git a/db/install.py b/db/install.py\n--- a/db/install.py\n+++ b/db/install.py\n@@ -1,5 +1,6 @@\n+from psycopg.errors import InsufficientPrivilege\n from sqlalchemy import text\n-from sqlalchemy.exc import OperationalError\n+from sqlalchemy.exc import OperationalError, ProgrammingError\n \n from db import engine\n from db.sql import install as sql_install\n@@ -54,12 +55,20 @@\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n- with root_db_engine.connect() as conn:\n- conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n- conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n- root_db_engine.dispose()\n- print(f\"Created DB is {database_name}.\")\n- return True\n+ try:\n+ with root_db_engine.connect() as conn:\n+ conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n+ conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n+ root_db_engine.dispose()\n+ print(f\"Created DB is {database_name}.\")\n+ return True\n+ except ProgrammingError as e:\n+ if isinstance(e.orig, InsufficientPrivilege):\n+ print(f\"Database {database_name} could not be created due to Insufficient Privilege\")\n+ return False\n+ except Exception:\n+ print(f\"Database {database_name} could not be created!\")\n+ return False\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "issue": "The requirement of superuser postgresql access is problematic\n## Problem\r\nMathesar needs a Postgres superuser to function correctly, from the docs at https://docs.mathesar.org/installation/build-from-source/ \r\n\r\n## Proposed solution\r\nThe mathesar user should not require superuser access. \r\n\r\n## Additional context\r\nThe superuser is a global permission meaning that a user that has superuser permission will be able to access (and do stuff) not on the mathesar but *all* the databases of the RDBMS. Considering that many production systems have a single RDBMS hosting many application this is a major problem since the mathsar user won't have any access boundaries on the same RDBMS. The mathesar user access can be unlimited but *must* be bounded without the mathesar database.\n", "before_files": [{"content": "from sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.sql import install as sql_install\nfrom db.types import install as types_install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n sql_install.install(user_db_engine)\n types_install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n sql_install.install(user_db_engine)\n types_install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}]} | 1,346 | 342 |
gh_patches_debug_24580 | rasdani/github-patches | git_diff | qtile__qtile-3261 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Systray widget crash
2022-01-27 09:48:40,366 ERROR libqtile bar.py:_configure_widget():L324 Systray widget crashed during _configure with error: ConfigError('Only one Systray can be used.')
[config.txt](https://github.com/qtile/qtile/files/7948239/config.txt)
</issue>
<code>
[start of libqtile/widget/systray.py]
1 # Copyright (c) 2010 Aldo Cortesi
2 # Copyright (c) 2010-2011 dequis
3 # Copyright (c) 2010, 2012 roger
4 # Copyright (c) 2011 Mounier Florian
5 # Copyright (c) 2011-2012, 2014 Tycho Andersen
6 # Copyright (c) 2012 dmpayton
7 # Copyright (c) 2012-2013 Craig Barnes
8 # Copyright (c) 2013 hbc
9 # Copyright (c) 2013 Tao Sauvage
10 # Copyright (c) 2014 Sean Vig
11 #
12 # Permission is hereby granted, free of charge, to any person obtaining a copy
13 # of this software and associated documentation files (the "Software"), to deal
14 # in the Software without restriction, including without limitation the rights
15 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 # copies of the Software, and to permit persons to whom the Software is
17 # furnished to do so, subject to the following conditions:
18 #
19 # The above copyright notice and this permission notice shall be included in
20 # all copies or substantial portions of the Software.
21 #
22 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 # SOFTWARE.
29 from typing import TYPE_CHECKING
30
31 import xcffib
32 from xcffib.xproto import ClientMessageData, ClientMessageEvent, EventMask, SetMode
33
34 from libqtile import bar
35 from libqtile.backend.x11 import window
36 from libqtile.confreader import ConfigError
37 from libqtile.widget import base
38
39 if TYPE_CHECKING:
40 from typing import List, Optional
41
42 XEMBED_PROTOCOL_VERSION = 0
43
44
45 class Icon(window._Window):
46 _window_mask = EventMask.StructureNotify | EventMask.PropertyChange | EventMask.Exposure
47
48 def __init__(self, win, qtile, systray):
49 window._Window.__init__(self, win, qtile)
50 self.systray = systray
51 # win.get_name() may return None when apps provide a temporary window before the icon window
52 # we need something in self.name in order to sort icons so we use the window's WID.
53 self.name = win.get_name() or str(win.wid)
54 self.update_size()
55
56 def __eq__(self, other):
57 if not isinstance(other, Icon):
58 return False
59
60 return self.window.wid == other.window.wid
61
62 def update_size(self):
63 icon_size = self.systray.icon_size
64 self.update_hints()
65
66 width = self.hints.get("min_width", icon_size)
67 height = self.hints.get("min_height", icon_size)
68
69 width = max(width, icon_size)
70 height = max(height, icon_size)
71
72 if height > icon_size:
73 width = width * icon_size // height
74 height = icon_size
75
76 self.width = width
77 self.height = height
78 return False
79
80 def handle_PropertyNotify(self, e): # noqa: N802
81 name = self.qtile.core.conn.atoms.get_name(e.atom)
82 if name == "_XEMBED_INFO":
83 info = self.window.get_property("_XEMBED_INFO", unpack=int)
84 if info and info[1]:
85 self.systray.bar.draw()
86
87 return False
88
89 def handle_DestroyNotify(self, event): # noqa: N802
90 wid = event.window
91 icon = self.qtile.windows_map.pop(wid)
92 self.systray.tray_icons.remove(icon)
93 self.systray.bar.draw()
94 return False
95
96 handle_UnmapNotify = handle_DestroyNotify # noqa: N815
97
98
99 class Systray(window._Window, base._Widget):
100 """
101 A widget that manages system tray.
102
103 Only one Systray widget is allowed. Adding additional Systray
104 widgets will result in a ConfigError.
105
106 .. note::
107 Icons will not render correctly where the bar/widget is
108 drawn with a semi-transparent background. Instead, icons
109 will be drawn with a transparent background.
110
111 If using this widget it is therefore recommended to use
112 a fully opaque background colour or a fully transparent
113 one.
114 """
115
116 _instances = 0
117
118 _window_mask = EventMask.StructureNotify | EventMask.Exposure
119
120 orientations = base.ORIENTATION_BOTH
121
122 defaults = [
123 ("icon_size", 20, "Icon width"),
124 ("padding", 5, "Padding between icons"),
125 ]
126
127 def __init__(self, **config):
128 base._Widget.__init__(self, bar.CALCULATED, **config)
129 self.add_defaults(Systray.defaults)
130 self.tray_icons = []
131 self.screen = 0
132 self._name = config.get("name", "systray")
133 self._wm_class: Optional[List[str]] = None
134
135 def calculate_length(self):
136 if self.bar.horizontal:
137 length = sum(i.width for i in self.tray_icons)
138 else:
139 length = sum(i.height for i in self.tray_icons)
140 length += self.padding * len(self.tray_icons)
141 return length
142
143 def _configure(self, qtile, bar):
144 base._Widget._configure(self, qtile, bar)
145
146 if Systray._instances > 0:
147 raise ConfigError("Only one Systray can be used.")
148
149 if self.configured:
150 return
151
152 self.conn = conn = qtile.core.conn
153 win = conn.create_window(-1, -1, 1, 1)
154 window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)
155 qtile.windows_map[win.wid] = self
156
157 # window._Window.__init__ overwrites the widget name so we need to restore it
158 self.name = self._name
159
160 # Even when we have multiple "Screen"s, we are setting up as the system
161 # tray on a particular X display, that is the screen we need to
162 # reference in the atom
163 if qtile.current_screen:
164 self.screen = qtile.current_screen.index
165 self.bar = bar
166 atoms = conn.atoms
167
168 # We need tray to tell icons which visual to use.
169 # This needs to be the same as the bar/widget.
170 # This mainly benefits transparent bars.
171 conn.conn.core.ChangeProperty(
172 xcffib.xproto.PropMode.Replace,
173 win.wid,
174 atoms["_NET_SYSTEM_TRAY_VISUAL"],
175 xcffib.xproto.Atom.VISUALID,
176 32,
177 1,
178 [self.drawer._visual.visual_id],
179 )
180
181 conn.conn.core.SetSelectionOwner(
182 win.wid, atoms["_NET_SYSTEM_TRAY_S{:d}".format(self.screen)], xcffib.CurrentTime
183 )
184 data = [
185 xcffib.CurrentTime,
186 atoms["_NET_SYSTEM_TRAY_S{:d}".format(self.screen)],
187 win.wid,
188 0,
189 0,
190 ]
191 union = ClientMessageData.synthetic(data, "I" * 5)
192 event = ClientMessageEvent.synthetic(
193 format=32, window=qtile.core._root.wid, type=atoms["MANAGER"], data=union
194 )
195 qtile.core._root.send_event(event, mask=EventMask.StructureNotify)
196
197 Systray._instances += 1
198
199 def create_mirror(self):
200 """
201 Systray cannot be mirrored as we do not use a Drawer object to render icons.
202
203 Return itself so that, when the bar tries to configure it again, a ConfigError
204 is raised.
205 """
206 return self
207
208 def handle_ClientMessage(self, event): # noqa: N802
209 atoms = self.conn.atoms
210
211 opcode = event.type
212 data = event.data.data32
213 message = data[1]
214 wid = data[2]
215
216 parent = self.bar.window.window
217
218 if opcode == atoms["_NET_SYSTEM_TRAY_OPCODE"] and message == 0:
219 w = window.XWindow(self.conn, wid)
220 icon = Icon(w, self.qtile, self)
221 if icon not in self.tray_icons:
222 self.tray_icons.append(icon)
223 self.tray_icons.sort(key=lambda icon: icon.name)
224 self.qtile.windows_map[wid] = icon
225
226 self.conn.conn.core.ChangeSaveSet(SetMode.Insert, wid)
227 self.conn.conn.core.ReparentWindow(wid, parent.wid, 0, 0)
228 self.conn.conn.flush()
229
230 info = icon.window.get_property("_XEMBED_INFO", unpack=int)
231
232 if not info:
233 self.bar.draw()
234 return False
235
236 if info[1]:
237 self.bar.draw()
238
239 return False
240
241 def draw(self):
242 offset = self.padding
243 self.drawer.clear(self.background or self.bar.background)
244 self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length)
245 for pos, icon in enumerate(self.tray_icons):
246 icon.window.set_attribute(backpixmap=self.drawer.pixmap)
247 if self.bar.horizontal:
248 xoffset = self.offsetx + offset
249 yoffset = self.bar.height // 2 - self.icon_size // 2 + self.offsety
250 step = icon.width
251 else:
252 xoffset = self.bar.width // 2 - self.icon_size // 2 + self.offsetx
253 yoffset = self.offsety + offset
254 step = icon.height
255
256 icon.place(xoffset, yoffset, icon.width, self.icon_size, 0, None)
257 if icon.hidden:
258 icon.unhide()
259 data = [
260 self.conn.atoms["_XEMBED_EMBEDDED_NOTIFY"],
261 xcffib.xproto.Time.CurrentTime,
262 0,
263 self.bar.window.wid,
264 XEMBED_PROTOCOL_VERSION,
265 ]
266 u = xcffib.xproto.ClientMessageData.synthetic(data, "I" * 5)
267 event = xcffib.xproto.ClientMessageEvent.synthetic(
268 format=32, window=icon.wid, type=self.conn.atoms["_XEMBED"], data=u
269 )
270 self.window.send_event(event)
271
272 offset += step + self.padding
273
274 def finalize(self):
275 base._Widget.finalize(self)
276 atoms = self.conn.atoms
277 self.conn.conn.core.SetSelectionOwner(
278 0,
279 atoms["_NET_SYSTEM_TRAY_S{:d}".format(self.screen)],
280 xcffib.CurrentTime,
281 )
282 self.hide()
283
284 root = self.qtile.core._root.wid
285 for icon in self.tray_icons:
286 self.conn.conn.core.ReparentWindow(icon.window.wid, root, 0, 0)
287 self.conn.conn.flush()
288
289 del self.qtile.windows_map[self.wid]
290 self.conn.conn.core.DestroyWindow(self.wid)
291
292 Systray._instances -= 1
293
[end of libqtile/widget/systray.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/widget/systray.py b/libqtile/widget/systray.py
--- a/libqtile/widget/systray.py
+++ b/libqtile/widget/systray.py
@@ -143,12 +143,12 @@
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
- if Systray._instances > 0:
- raise ConfigError("Only one Systray can be used.")
-
if self.configured:
return
+ if Systray._instances > 0:
+ raise ConfigError("Only one Systray can be used.")
+
self.conn = conn = qtile.core.conn
win = conn.create_window(-1, -1, 1, 1)
window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)
@@ -200,10 +200,10 @@
"""
Systray cannot be mirrored as we do not use a Drawer object to render icons.
- Return itself so that, when the bar tries to configure it again, a ConfigError
- is raised.
+ Return new, unconfigured instance so that, when the bar tries to configure it
+ again, a ConfigError is raised.
"""
- return self
+ return Systray()
def handle_ClientMessage(self, event): # noqa: N802
atoms = self.conn.atoms
| {"golden_diff": "diff --git a/libqtile/widget/systray.py b/libqtile/widget/systray.py\n--- a/libqtile/widget/systray.py\n+++ b/libqtile/widget/systray.py\n@@ -143,12 +143,12 @@\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n \n- if Systray._instances > 0:\n- raise ConfigError(\"Only one Systray can be used.\")\n-\n if self.configured:\n return\n \n+ if Systray._instances > 0:\n+ raise ConfigError(\"Only one Systray can be used.\")\n+\n self.conn = conn = qtile.core.conn\n win = conn.create_window(-1, -1, 1, 1)\n window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)\n@@ -200,10 +200,10 @@\n \"\"\"\n Systray cannot be mirrored as we do not use a Drawer object to render icons.\n \n- Return itself so that, when the bar tries to configure it again, a ConfigError\n- is raised.\n+ Return new, unconfigured instance so that, when the bar tries to configure it\n+ again, a ConfigError is raised.\n \"\"\"\n- return self\n+ return Systray()\n \n def handle_ClientMessage(self, event): # noqa: N802\n atoms = self.conn.atoms\n", "issue": "Systray widget crash\n2022-01-27 09:48:40,366 ERROR libqtile bar.py:_configure_widget():L324 Systray widget crashed during _configure with error: ConfigError('Only one Systray can be used.')\r\n[config.txt](https://github.com/qtile/qtile/files/7948239/config.txt)\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2010 Aldo Cortesi\n# Copyright (c) 2010-2011 dequis\n# Copyright (c) 2010, 2012 roger\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2011-2012, 2014 Tycho Andersen\n# Copyright (c) 2012 dmpayton\n# Copyright (c) 2012-2013 Craig Barnes\n# Copyright (c) 2013 hbc\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2014 Sean Vig\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom typing import TYPE_CHECKING\n\nimport xcffib\nfrom xcffib.xproto import ClientMessageData, ClientMessageEvent, EventMask, SetMode\n\nfrom libqtile import bar\nfrom libqtile.backend.x11 import window\nfrom libqtile.confreader import ConfigError\nfrom libqtile.widget import base\n\nif TYPE_CHECKING:\n from typing import List, Optional\n\nXEMBED_PROTOCOL_VERSION = 0\n\n\nclass Icon(window._Window):\n _window_mask = EventMask.StructureNotify | EventMask.PropertyChange | EventMask.Exposure\n\n def __init__(self, win, qtile, systray):\n window._Window.__init__(self, win, qtile)\n self.systray = systray\n # win.get_name() may return None when apps provide a temporary window before the icon window\n # we need something in self.name in order to sort icons so we use the window's WID.\n self.name = win.get_name() or str(win.wid)\n self.update_size()\n\n def __eq__(self, other):\n if not isinstance(other, Icon):\n return False\n\n return self.window.wid == other.window.wid\n\n def update_size(self):\n icon_size = self.systray.icon_size\n self.update_hints()\n\n width = self.hints.get(\"min_width\", icon_size)\n height = self.hints.get(\"min_height\", icon_size)\n\n width = max(width, icon_size)\n height = max(height, icon_size)\n\n if height > icon_size:\n width = width * icon_size // height\n height = icon_size\n\n self.width = width\n self.height = height\n return False\n\n def handle_PropertyNotify(self, e): # noqa: N802\n name = self.qtile.core.conn.atoms.get_name(e.atom)\n if name == \"_XEMBED_INFO\":\n info = self.window.get_property(\"_XEMBED_INFO\", unpack=int)\n if info and info[1]:\n self.systray.bar.draw()\n\n return False\n\n def handle_DestroyNotify(self, event): # noqa: N802\n wid = event.window\n icon = self.qtile.windows_map.pop(wid)\n self.systray.tray_icons.remove(icon)\n self.systray.bar.draw()\n return False\n\n handle_UnmapNotify = handle_DestroyNotify # noqa: N815\n\n\nclass Systray(window._Window, base._Widget):\n \"\"\"\n A widget that manages system tray.\n\n Only one Systray widget is allowed. Adding additional Systray\n widgets will result in a ConfigError.\n\n .. note::\n Icons will not render correctly where the bar/widget is\n drawn with a semi-transparent background. Instead, icons\n will be drawn with a transparent background.\n\n If using this widget it is therefore recommended to use\n a fully opaque background colour or a fully transparent\n one.\n \"\"\"\n\n _instances = 0\n\n _window_mask = EventMask.StructureNotify | EventMask.Exposure\n\n orientations = base.ORIENTATION_BOTH\n\n defaults = [\n (\"icon_size\", 20, \"Icon width\"),\n (\"padding\", 5, \"Padding between icons\"),\n ]\n\n def __init__(self, **config):\n base._Widget.__init__(self, bar.CALCULATED, **config)\n self.add_defaults(Systray.defaults)\n self.tray_icons = []\n self.screen = 0\n self._name = config.get(\"name\", \"systray\")\n self._wm_class: Optional[List[str]] = None\n\n def calculate_length(self):\n if self.bar.horizontal:\n length = sum(i.width for i in self.tray_icons)\n else:\n length = sum(i.height for i in self.tray_icons)\n length += self.padding * len(self.tray_icons)\n return length\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n\n if Systray._instances > 0:\n raise ConfigError(\"Only one Systray can be used.\")\n\n if self.configured:\n return\n\n self.conn = conn = qtile.core.conn\n win = conn.create_window(-1, -1, 1, 1)\n window._Window.__init__(self, window.XWindow(conn, win.wid), qtile)\n qtile.windows_map[win.wid] = self\n\n # window._Window.__init__ overwrites the widget name so we need to restore it\n self.name = self._name\n\n # Even when we have multiple \"Screen\"s, we are setting up as the system\n # tray on a particular X display, that is the screen we need to\n # reference in the atom\n if qtile.current_screen:\n self.screen = qtile.current_screen.index\n self.bar = bar\n atoms = conn.atoms\n\n # We need tray to tell icons which visual to use.\n # This needs to be the same as the bar/widget.\n # This mainly benefits transparent bars.\n conn.conn.core.ChangeProperty(\n xcffib.xproto.PropMode.Replace,\n win.wid,\n atoms[\"_NET_SYSTEM_TRAY_VISUAL\"],\n xcffib.xproto.Atom.VISUALID,\n 32,\n 1,\n [self.drawer._visual.visual_id],\n )\n\n conn.conn.core.SetSelectionOwner(\n win.wid, atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)], xcffib.CurrentTime\n )\n data = [\n xcffib.CurrentTime,\n atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)],\n win.wid,\n 0,\n 0,\n ]\n union = ClientMessageData.synthetic(data, \"I\" * 5)\n event = ClientMessageEvent.synthetic(\n format=32, window=qtile.core._root.wid, type=atoms[\"MANAGER\"], data=union\n )\n qtile.core._root.send_event(event, mask=EventMask.StructureNotify)\n\n Systray._instances += 1\n\n def create_mirror(self):\n \"\"\"\n Systray cannot be mirrored as we do not use a Drawer object to render icons.\n\n Return itself so that, when the bar tries to configure it again, a ConfigError\n is raised.\n \"\"\"\n return self\n\n def handle_ClientMessage(self, event): # noqa: N802\n atoms = self.conn.atoms\n\n opcode = event.type\n data = event.data.data32\n message = data[1]\n wid = data[2]\n\n parent = self.bar.window.window\n\n if opcode == atoms[\"_NET_SYSTEM_TRAY_OPCODE\"] and message == 0:\n w = window.XWindow(self.conn, wid)\n icon = Icon(w, self.qtile, self)\n if icon not in self.tray_icons:\n self.tray_icons.append(icon)\n self.tray_icons.sort(key=lambda icon: icon.name)\n self.qtile.windows_map[wid] = icon\n\n self.conn.conn.core.ChangeSaveSet(SetMode.Insert, wid)\n self.conn.conn.core.ReparentWindow(wid, parent.wid, 0, 0)\n self.conn.conn.flush()\n\n info = icon.window.get_property(\"_XEMBED_INFO\", unpack=int)\n\n if not info:\n self.bar.draw()\n return False\n\n if info[1]:\n self.bar.draw()\n\n return False\n\n def draw(self):\n offset = self.padding\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length)\n for pos, icon in enumerate(self.tray_icons):\n icon.window.set_attribute(backpixmap=self.drawer.pixmap)\n if self.bar.horizontal:\n xoffset = self.offsetx + offset\n yoffset = self.bar.height // 2 - self.icon_size // 2 + self.offsety\n step = icon.width\n else:\n xoffset = self.bar.width // 2 - self.icon_size // 2 + self.offsetx\n yoffset = self.offsety + offset\n step = icon.height\n\n icon.place(xoffset, yoffset, icon.width, self.icon_size, 0, None)\n if icon.hidden:\n icon.unhide()\n data = [\n self.conn.atoms[\"_XEMBED_EMBEDDED_NOTIFY\"],\n xcffib.xproto.Time.CurrentTime,\n 0,\n self.bar.window.wid,\n XEMBED_PROTOCOL_VERSION,\n ]\n u = xcffib.xproto.ClientMessageData.synthetic(data, \"I\" * 5)\n event = xcffib.xproto.ClientMessageEvent.synthetic(\n format=32, window=icon.wid, type=self.conn.atoms[\"_XEMBED\"], data=u\n )\n self.window.send_event(event)\n\n offset += step + self.padding\n\n def finalize(self):\n base._Widget.finalize(self)\n atoms = self.conn.atoms\n self.conn.conn.core.SetSelectionOwner(\n 0,\n atoms[\"_NET_SYSTEM_TRAY_S{:d}\".format(self.screen)],\n xcffib.CurrentTime,\n )\n self.hide()\n\n root = self.qtile.core._root.wid\n for icon in self.tray_icons:\n self.conn.conn.core.ReparentWindow(icon.window.wid, root, 0, 0)\n self.conn.conn.flush()\n\n del self.qtile.windows_map[self.wid]\n self.conn.conn.core.DestroyWindow(self.wid)\n\n Systray._instances -= 1\n", "path": "libqtile/widget/systray.py"}]} | 3,893 | 337 |
gh_patches_debug_32051 | rasdani/github-patches | git_diff | apache__airflow-18224 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
POST /api/v1/users fails with exception
### Apache Airflow version
main (development)
### Operating System
From Astronomer’s QA team
### Versions of Apache Airflow Providers
_No response_
### Deployment
Astronomer
### Deployment details
_No response_
### What happened
When adding a new user, The following exception is emitted:
```
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.9/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/decorator.py", line 48, in wrapper
response = function(request)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/uri_parsing.py", line 144, in wrapper
response = function(request)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/validation.py", line 184, in wrapper
response = function(request)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/response.py", line 103, in wrapper
response = function(request)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/parameter.py", line 121, in wrapper
return function(**kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/api_connexion/security.py", line 47, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/api_connexion/endpoints/user_endpoint.py", line 105, in post_user
user.roles.extend(roles_to_add)
AttributeError: 'bool' object has no attribute 'roles'
```
The immediate cause to this exception is F.A.B. returns `False` when it fails to add a new user. The problem, however, is _why_ excactly it failed. This is the payload used:
```json
{
"username": "username6",
"password": "password1",
"email": "[email protected]",
"first_name": "user2",
"last_name": "test1",
"roles":[{"name":"Admin"},{"name":"Viewer"}]
}
```
This went through validation, therefore we know
1. The POST-ing user has permission to create a new user.
2. The format is correct (including the nested roles).
3. There is not already an existing `username6` in the database.
4. All listed roles exist.
(All these are already covered by unit tests.)
Further complicating the issue is F.A.B.’s security manager swallows an exception when this happens, and only logs the exception to the server. And we’re having trouble locating that line of log. It’s quite difficult to diagnose further, so I’m posting this hoping someone has better luck reproducing this.
I will submit a fix to correct the immediate issue, making the API emit 500 with something like “Failed to create user for unknown reason” to make the failure _slightly_ less confusing.
### What you expected to happen
_No response_
### How to reproduce
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of airflow/api_connexion/endpoints/user_endpoint.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 from flask import current_app, request
18 from flask_appbuilder.security.sqla.models import User
19 from marshmallow import ValidationError
20 from sqlalchemy import func
21 from werkzeug.security import generate_password_hash
22
23 from airflow.api_connexion import security
24 from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound
25 from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters
26 from airflow.api_connexion.schemas.user_schema import (
27 UserCollection,
28 user_collection_item_schema,
29 user_collection_schema,
30 user_schema,
31 )
32 from airflow.security import permissions
33
34
35 @security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER)])
36 def get_user(username):
37 """Get a user"""
38 ab_security_manager = current_app.appbuilder.sm
39 user = ab_security_manager.find_user(username=username)
40 if not user:
41 raise NotFound(title="User not found", detail=f"The User with username `{username}` was not found")
42 return user_collection_item_schema.dump(user)
43
44
45 @security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER)])
46 @format_parameters({'limit': check_limit})
47 def get_users(limit, order_by='id', offset=None):
48 """Get users"""
49 appbuilder = current_app.appbuilder
50 session = appbuilder.get_session
51 total_entries = session.query(func.count(User.id)).scalar()
52 to_replace = {"user_id": "id"}
53 allowed_filter_attrs = [
54 "user_id",
55 'id',
56 "first_name",
57 "last_name",
58 "user_name",
59 "email",
60 "is_active",
61 "role",
62 ]
63 query = session.query(User)
64 query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)
65 users = query.offset(offset).limit(limit).all()
66
67 return user_collection_schema.dump(UserCollection(users=users, total_entries=total_entries))
68
69
70 @security.requires_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_USER)])
71 def post_user():
72 """Create a new user"""
73 try:
74 data = user_schema.load(request.json)
75 except ValidationError as e:
76 raise BadRequest(detail=str(e.messages))
77
78 security_manager = current_app.appbuilder.sm
79
80 user = security_manager.find_user(username=data["username"])
81 if user is not None:
82 detail = f"Username `{user.username}` already exists. Use PATCH to update."
83 raise AlreadyExists(detail=detail)
84
85 roles_to_add = []
86 missing_role_names = []
87 for role_data in data.pop("roles", ()):
88 role_name = role_data["name"]
89 role = security_manager.find_role(role_name)
90 if role is None:
91 missing_role_names.append(role_name)
92 else:
93 roles_to_add.append(role)
94 if missing_role_names:
95 detail = f"Unknown roles: {', '.join(repr(n) for n in missing_role_names)}"
96 raise BadRequest(detail=detail)
97
98 if roles_to_add:
99 default_role = roles_to_add.pop()
100 else: # No roles provided, use the F.A.B's default registered user role.
101 default_role = security_manager.find_role(security_manager.auth_user_registration_role)
102
103 user = security_manager.add_user(role=default_role, **data)
104 if roles_to_add:
105 user.roles.extend(roles_to_add)
106 security_manager.update_user(user)
107 return user_schema.dump(user)
108
109
110 @security.requires_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_USER)])
111 def patch_user(username, update_mask=None):
112 """Update a role"""
113 try:
114 data = user_schema.load(request.json)
115 except ValidationError as e:
116 raise BadRequest(detail=str(e.messages))
117
118 security_manager = current_app.appbuilder.sm
119
120 user = security_manager.find_user(username=username)
121 if user is None:
122 detail = f"The User with username `{username}` was not found"
123 raise NotFound(title="User not found", detail=detail)
124
125 # Get fields to update. 'username' is always excluded (and it's an error to
126 # include it in update_maek).
127 if update_mask is not None:
128 masked_data = {}
129 missing_mask_names = []
130 for field in update_mask:
131 field = field.strip()
132 try:
133 masked_data[field] = data[field]
134 except KeyError:
135 missing_mask_names.append(field)
136 if missing_mask_names:
137 detail = f"Unknown update masks: {', '.join(repr(n) for n in missing_mask_names)}"
138 raise BadRequest(detail=detail)
139 if "username" in masked_data:
140 raise BadRequest("Cannot update fields: 'username'")
141 data = masked_data
142 else:
143 data.pop("username", None)
144
145 if "roles" in data:
146 roles_to_update = []
147 missing_role_names = []
148 for role_data in data.pop("roles", ()):
149 role_name = role_data["name"]
150 role = security_manager.find_role(role_name)
151 if role is None:
152 missing_role_names.append(role_name)
153 else:
154 roles_to_update.append(role)
155 if missing_role_names:
156 detail = f"Unknown roles: {', '.join(repr(n) for n in missing_role_names)}"
157 raise BadRequest(detail=detail)
158 else:
159 roles_to_update = None # Don't change existing value.
160
161 if "password" in data:
162 user.password = generate_password_hash(data.pop("password"))
163 if roles_to_update is not None:
164 user.roles = roles_to_update
165 for key, value in data.items():
166 setattr(user, key, value)
167 security_manager.update_user(user)
168
169 return user_schema.dump(user)
170
171
172 @security.requires_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_USER)])
173 def delete_user(username):
174 """Delete a user"""
175 security_manager = current_app.appbuilder.sm
176
177 user = security_manager.find_user(username=username)
178 if user is None:
179 detail = f"The User with username `{username}` was not found"
180 raise NotFound(title="User not found", detail=detail)
181
182 user.roles = [] # Clear foreign keys on this user first.
183 security_manager.get_session.delete(user)
184 security_manager.get_session.commit()
185
[end of airflow/api_connexion/endpoints/user_endpoint.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/api_connexion/endpoints/user_endpoint.py b/airflow/api_connexion/endpoints/user_endpoint.py
--- a/airflow/api_connexion/endpoints/user_endpoint.py
+++ b/airflow/api_connexion/endpoints/user_endpoint.py
@@ -21,7 +21,7 @@
from werkzeug.security import generate_password_hash
from airflow.api_connexion import security
-from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound
+from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound, Unknown
from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters
from airflow.api_connexion.schemas.user_schema import (
UserCollection,
@@ -76,10 +76,14 @@
raise BadRequest(detail=str(e.messages))
security_manager = current_app.appbuilder.sm
+ username = data["username"]
+ email = data["email"]
- user = security_manager.find_user(username=data["username"])
- if user is not None:
- detail = f"Username `{user.username}` already exists. Use PATCH to update."
+ if security_manager.find_user(username=username):
+ detail = f"Username `{username}` already exists. Use PATCH to update."
+ raise AlreadyExists(detail=detail)
+ if security_manager.find_user(email=email):
+ detail = f"The email `{email}` is already taken."
raise AlreadyExists(detail=detail)
roles_to_add = []
@@ -101,6 +105,10 @@
default_role = security_manager.find_role(security_manager.auth_user_registration_role)
user = security_manager.add_user(role=default_role, **data)
+ if not user:
+ detail = f"Failed to add user `{username}`."
+ return Unknown(detail=detail)
+
if roles_to_add:
user.roles.extend(roles_to_add)
security_manager.update_user(user)
| {"golden_diff": "diff --git a/airflow/api_connexion/endpoints/user_endpoint.py b/airflow/api_connexion/endpoints/user_endpoint.py\n--- a/airflow/api_connexion/endpoints/user_endpoint.py\n+++ b/airflow/api_connexion/endpoints/user_endpoint.py\n@@ -21,7 +21,7 @@\n from werkzeug.security import generate_password_hash\n \n from airflow.api_connexion import security\n-from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound\n+from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound, Unknown\n from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters\n from airflow.api_connexion.schemas.user_schema import (\n UserCollection,\n@@ -76,10 +76,14 @@\n raise BadRequest(detail=str(e.messages))\n \n security_manager = current_app.appbuilder.sm\n+ username = data[\"username\"]\n+ email = data[\"email\"]\n \n- user = security_manager.find_user(username=data[\"username\"])\n- if user is not None:\n- detail = f\"Username `{user.username}` already exists. Use PATCH to update.\"\n+ if security_manager.find_user(username=username):\n+ detail = f\"Username `{username}` already exists. Use PATCH to update.\"\n+ raise AlreadyExists(detail=detail)\n+ if security_manager.find_user(email=email):\n+ detail = f\"The email `{email}` is already taken.\"\n raise AlreadyExists(detail=detail)\n \n roles_to_add = []\n@@ -101,6 +105,10 @@\n default_role = security_manager.find_role(security_manager.auth_user_registration_role)\n \n user = security_manager.add_user(role=default_role, **data)\n+ if not user:\n+ detail = f\"Failed to add user `{username}`.\"\n+ return Unknown(detail=detail)\n+\n if roles_to_add:\n user.roles.extend(roles_to_add)\n security_manager.update_user(user)\n", "issue": "POST /api/v1/users fails with exception\n### Apache Airflow version\n\nmain (development)\n\n### Operating System\n\nFrom Astronomer\u2019s QA team\n\n### Versions of Apache Airflow Providers\n\n_No response_\n\n### Deployment\n\nAstronomer\n\n### Deployment details\n\n_No response_\n\n### What happened\n\nWhen adding a new user, The following exception is emitted:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.9/site-packages/flask/app.py\", line 2447, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/usr/local/lib/python3.9/site-packages/flask/app.py\", line 1952, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/usr/local/lib/python3.9/site-packages/flask/app.py\", line 1821, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/usr/local/lib/python3.9/site-packages/flask/_compat.py\", line 39, in reraise\r\n raise value\r\n File \"/usr/local/lib/python3.9/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/usr/local/lib/python3.9/site-packages/flask/app.py\", line 1936, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/decorator.py\", line 48, in wrapper\r\n response = function(request)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/uri_parsing.py\", line 144, in wrapper\r\n response = function(request)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/validation.py\", line 184, in wrapper\r\n response = function(request)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/response.py\", line 103, in wrapper\r\n response = function(request)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/parameter.py\", line 121, in wrapper\r\n return function(**kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/api_connexion/security.py\", line 47, in decorated\r\n return func(*args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/airflow/api_connexion/endpoints/user_endpoint.py\", line 105, in post_user\r\n user.roles.extend(roles_to_add)\r\nAttributeError: 'bool' object has no attribute 'roles'\r\n```\r\n\r\nThe immediate cause to this exception is F.A.B. returns `False` when it fails to add a new user. The problem, however, is _why_ excactly it failed. This is the payload used:\r\n\r\n```json\r\n{\r\n \"username\": \"username6\",\r\n \"password\": \"password1\",\r\n \"email\": \"[email protected]\",\r\n \"first_name\": \"user2\",\r\n \"last_name\": \"test1\",\r\n \"roles\":[{\"name\":\"Admin\"},{\"name\":\"Viewer\"}]\r\n}\r\n```\r\n\r\nThis went through validation, therefore we know\r\n\r\n1. The POST-ing user has permission to create a new user.\r\n2. The format is correct (including the nested roles).\r\n3. There is not already an existing `username6` in the database.\r\n4. All listed roles exist.\r\n\r\n(All these are already covered by unit tests.)\r\n\r\nFurther complicating the issue is F.A.B.\u2019s security manager swallows an exception when this happens, and only logs the exception to the server. And we\u2019re having trouble locating that line of log. It\u2019s quite difficult to diagnose further, so I\u2019m posting this hoping someone has better luck reproducing this.\r\n\r\nI will submit a fix to correct the immediate issue, making the API emit 500 with something like \u201cFailed to create user for unknown reason\u201d to make the failure _slightly_ less confusing.\n\n### What you expected to happen\n\n_No response_\n\n### How to reproduce\n\n_No response_\n\n### Anything else\n\n_No response_\n\n### Are you willing to submit PR?\n\n- [X] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom flask import current_app, request\nfrom flask_appbuilder.security.sqla.models import User\nfrom marshmallow import ValidationError\nfrom sqlalchemy import func\nfrom werkzeug.security import generate_password_hash\n\nfrom airflow.api_connexion import security\nfrom airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound\nfrom airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters\nfrom airflow.api_connexion.schemas.user_schema import (\n UserCollection,\n user_collection_item_schema,\n user_collection_schema,\n user_schema,\n)\nfrom airflow.security import permissions\n\n\[email protected]_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER)])\ndef get_user(username):\n \"\"\"Get a user\"\"\"\n ab_security_manager = current_app.appbuilder.sm\n user = ab_security_manager.find_user(username=username)\n if not user:\n raise NotFound(title=\"User not found\", detail=f\"The User with username `{username}` was not found\")\n return user_collection_item_schema.dump(user)\n\n\[email protected]_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER)])\n@format_parameters({'limit': check_limit})\ndef get_users(limit, order_by='id', offset=None):\n \"\"\"Get users\"\"\"\n appbuilder = current_app.appbuilder\n session = appbuilder.get_session\n total_entries = session.query(func.count(User.id)).scalar()\n to_replace = {\"user_id\": \"id\"}\n allowed_filter_attrs = [\n \"user_id\",\n 'id',\n \"first_name\",\n \"last_name\",\n \"user_name\",\n \"email\",\n \"is_active\",\n \"role\",\n ]\n query = session.query(User)\n query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)\n users = query.offset(offset).limit(limit).all()\n\n return user_collection_schema.dump(UserCollection(users=users, total_entries=total_entries))\n\n\[email protected]_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_USER)])\ndef post_user():\n \"\"\"Create a new user\"\"\"\n try:\n data = user_schema.load(request.json)\n except ValidationError as e:\n raise BadRequest(detail=str(e.messages))\n\n security_manager = current_app.appbuilder.sm\n\n user = security_manager.find_user(username=data[\"username\"])\n if user is not None:\n detail = f\"Username `{user.username}` already exists. Use PATCH to update.\"\n raise AlreadyExists(detail=detail)\n\n roles_to_add = []\n missing_role_names = []\n for role_data in data.pop(\"roles\", ()):\n role_name = role_data[\"name\"]\n role = security_manager.find_role(role_name)\n if role is None:\n missing_role_names.append(role_name)\n else:\n roles_to_add.append(role)\n if missing_role_names:\n detail = f\"Unknown roles: {', '.join(repr(n) for n in missing_role_names)}\"\n raise BadRequest(detail=detail)\n\n if roles_to_add:\n default_role = roles_to_add.pop()\n else: # No roles provided, use the F.A.B's default registered user role.\n default_role = security_manager.find_role(security_manager.auth_user_registration_role)\n\n user = security_manager.add_user(role=default_role, **data)\n if roles_to_add:\n user.roles.extend(roles_to_add)\n security_manager.update_user(user)\n return user_schema.dump(user)\n\n\[email protected]_access([(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_USER)])\ndef patch_user(username, update_mask=None):\n \"\"\"Update a role\"\"\"\n try:\n data = user_schema.load(request.json)\n except ValidationError as e:\n raise BadRequest(detail=str(e.messages))\n\n security_manager = current_app.appbuilder.sm\n\n user = security_manager.find_user(username=username)\n if user is None:\n detail = f\"The User with username `{username}` was not found\"\n raise NotFound(title=\"User not found\", detail=detail)\n\n # Get fields to update. 'username' is always excluded (and it's an error to\n # include it in update_maek).\n if update_mask is not None:\n masked_data = {}\n missing_mask_names = []\n for field in update_mask:\n field = field.strip()\n try:\n masked_data[field] = data[field]\n except KeyError:\n missing_mask_names.append(field)\n if missing_mask_names:\n detail = f\"Unknown update masks: {', '.join(repr(n) for n in missing_mask_names)}\"\n raise BadRequest(detail=detail)\n if \"username\" in masked_data:\n raise BadRequest(\"Cannot update fields: 'username'\")\n data = masked_data\n else:\n data.pop(\"username\", None)\n\n if \"roles\" in data:\n roles_to_update = []\n missing_role_names = []\n for role_data in data.pop(\"roles\", ()):\n role_name = role_data[\"name\"]\n role = security_manager.find_role(role_name)\n if role is None:\n missing_role_names.append(role_name)\n else:\n roles_to_update.append(role)\n if missing_role_names:\n detail = f\"Unknown roles: {', '.join(repr(n) for n in missing_role_names)}\"\n raise BadRequest(detail=detail)\n else:\n roles_to_update = None # Don't change existing value.\n\n if \"password\" in data:\n user.password = generate_password_hash(data.pop(\"password\"))\n if roles_to_update is not None:\n user.roles = roles_to_update\n for key, value in data.items():\n setattr(user, key, value)\n security_manager.update_user(user)\n\n return user_schema.dump(user)\n\n\[email protected]_access([(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_USER)])\ndef delete_user(username):\n \"\"\"Delete a user\"\"\"\n security_manager = current_app.appbuilder.sm\n\n user = security_manager.find_user(username=username)\n if user is None:\n detail = f\"The User with username `{username}` was not found\"\n raise NotFound(title=\"User not found\", detail=detail)\n\n user.roles = [] # Clear foreign keys on this user first.\n security_manager.get_session.delete(user)\n security_manager.get_session.commit()\n", "path": "airflow/api_connexion/endpoints/user_endpoint.py"}]} | 3,427 | 411 |
gh_patches_debug_30331 | rasdani/github-patches | git_diff | e-valuation__EvaP-424 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UserProfile missing when User is not created during import
Users which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.
UserProfile missing when User is not created during import
Users which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.
</issue>
<code>
[start of evap/evaluation/views.py]
1 from django.contrib import messages
2 from django.contrib.auth import login as auth_login
3 from django.shortcuts import redirect, render_to_response
4 from django.template import RequestContext
5 from django.utils.translation import ugettext as _
6
7 from evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm
8 from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate
9
10
11 def index(request):
12 """Main entry page into EvaP providing all the login options available. THe username/password
13 login is thought to be used for internal users, e.g. by connecting to a LDAP directory.
14 The login key mechanism is meant to be used to include external participants, e.g. visiting
15 students or visiting contributors.
16 """
17
18 # parse the form data into the respective form
19 submit_type = request.POST.get("submit_type", "no_submit")
20 new_key_form = NewKeyForm(request.POST if submit_type == "new_key" else None)
21 login_key_form = LoginKeyForm(request.POST if submit_type == "login_key" else None)
22 login_username_form = LoginUsernameForm(request, request.POST if submit_type == "login_username" else None)
23
24 # process form data
25 if request.method == 'POST':
26 if new_key_form.is_valid():
27 # user wants a new login key
28 profile = new_key_form.get_profile()
29 profile.generate_login_key()
30 profile.save()
31
32 EmailTemplate.get_login_key_template().send_to_user(new_key_form.get_user())
33
34 messages.success(request, _(u"Successfully sent email with new login key."))
35 elif login_key_form.is_valid():
36 # user would like to login with a login key and passed key test
37 auth_login(request, login_key_form.get_user())
38 elif login_username_form.is_valid():
39 # user would like to login with username and password and passed password test
40 auth_login(request, login_username_form.get_user())
41
42 # clean up our test cookie
43 if request.session.test_cookie_worked():
44 request.session.delete_test_cookie()
45
46 # if not logged in by now, render form
47 if not request.user.is_active:
48 # set test cookie to verify whether they work in the next step
49 request.session.set_test_cookie()
50
51 return render_to_response("index.html", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))
52 else:
53 # check for redirect variable
54 redirect_to = request.GET.get("next", None)
55 if redirect_to is not None:
56 if redirect_to.startswith("/fsr/"):
57 if request.user.is_staff:
58 return redirect(redirect_to)
59 elif redirect_to.startswith("/contributor/"):
60 if UserProfile.get_for_user(request.user).is_contributor:
61 return redirect(redirect_to)
62 else:
63 return redirect(redirect_to)
64
65 # redirect user to appropriate start page
66 if request.user.is_staff:
67 return redirect('evap.fsr.views.index')
68 elif UserProfile.get_for_user(request.user).is_editor_or_delegate:
69 return redirect('evap.contributor.views.index')
70 else:
71 return redirect('evap.student.views.index')
72
73
74 def faq(request):
75 return render_to_response("faq.html", dict(sections=FaqSection.objects.all()), context_instance=RequestContext(request))
76
[end of evap/evaluation/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py
--- a/evap/evaluation/views.py
+++ b/evap/evaluation/views.py
@@ -50,6 +50,8 @@
return render_to_response("index.html", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))
else:
+ userprofile, _ = UserProfile.objects.get_or_create(user=request.user)
+
# check for redirect variable
redirect_to = request.GET.get("next", None)
if redirect_to is not None:
@@ -57,7 +59,7 @@
if request.user.is_staff:
return redirect(redirect_to)
elif redirect_to.startswith("/contributor/"):
- if UserProfile.get_for_user(request.user).is_contributor:
+ if userprofile.is_contributor:
return redirect(redirect_to)
else:
return redirect(redirect_to)
@@ -65,7 +67,7 @@
# redirect user to appropriate start page
if request.user.is_staff:
return redirect('evap.fsr.views.index')
- elif UserProfile.get_for_user(request.user).is_editor_or_delegate:
+ elif userprofile.is_editor_or_delegate:
return redirect('evap.contributor.views.index')
else:
return redirect('evap.student.views.index')
| {"golden_diff": "diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py\n--- a/evap/evaluation/views.py\n+++ b/evap/evaluation/views.py\n@@ -50,6 +50,8 @@\n \n return render_to_response(\"index.html\", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))\n else:\n+ userprofile, _ = UserProfile.objects.get_or_create(user=request.user)\n+\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n@@ -57,7 +59,7 @@\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n- if UserProfile.get_for_user(request.user).is_contributor:\n+ if userprofile.is_contributor:\n return redirect(redirect_to)\n else:\n return redirect(redirect_to)\n@@ -65,7 +67,7 @@\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('evap.fsr.views.index')\n- elif UserProfile.get_for_user(request.user).is_editor_or_delegate:\n+ elif userprofile.is_editor_or_delegate:\n return redirect('evap.contributor.views.index')\n else:\n return redirect('evap.student.views.index')\n", "issue": "UserProfile missing when User is not created during import\nUsers which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.\n\nUserProfile missing when User is not created during import\nUsers which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.contrib.auth import login as auth_login\nfrom django.shortcuts import redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm\nfrom evap.evaluation.models import UserProfile, FaqSection, EmailTemplate\n\n\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. THe username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_key_form = LoginKeyForm(request.POST if submit_type == \"login_key\" else None)\n login_username_form = LoginUsernameForm(request, request.POST if submit_type == \"login_username\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_profile()\n profile.generate_login_key()\n profile.save()\n\n EmailTemplate.get_login_key_template().send_to_user(new_key_form.get_user())\n\n messages.success(request, _(u\"Successfully sent email with new login key.\"))\n elif login_key_form.is_valid():\n # user would like to login with a login key and passed key test\n auth_login(request, login_key_form.get_user())\n elif login_username_form.is_valid():\n # user would like to login with username and password and passed password test\n auth_login(request, login_username_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # if not logged in by now, render form\n if not request.user.is_active:\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n return render_to_response(\"index.html\", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))\n else:\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n if redirect_to.startswith(\"/fsr/\"):\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n if UserProfile.get_for_user(request.user).is_contributor:\n return redirect(redirect_to)\n else:\n return redirect(redirect_to)\n\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('evap.fsr.views.index')\n elif UserProfile.get_for_user(request.user).is_editor_or_delegate:\n return redirect('evap.contributor.views.index')\n else:\n return redirect('evap.student.views.index')\n\n\ndef faq(request):\n return render_to_response(\"faq.html\", dict(sections=FaqSection.objects.all()), context_instance=RequestContext(request))\n", "path": "evap/evaluation/views.py"}]} | 1,498 | 304 |
gh_patches_debug_21308 | rasdani/github-patches | git_diff | python-gitlab__python-gitlab-601 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gitlab --help requires a config file
## Description of the problem, including code/CLI snippet
`gitlab --help` reads its config file even when displaying help
## Expected Behavior
Print out the help text.
## Actual Behavior
Exits with error if no configuration file is found.
## Specifications
- python-gitlab version: 1.5.0
</issue>
<code>
[start of gitlab/cli.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Lesser General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Lesser General Public License for more details.
15 #
16 # You should have received a copy of the GNU Lesser General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18
19 from __future__ import print_function
20 import argparse
21 import functools
22 import importlib
23 import re
24 import sys
25
26 import gitlab.config
27
28 camel_re = re.compile('(.)([A-Z])')
29
30 # custom_actions = {
31 # cls: {
32 # action: (mandatory_args, optional_args, in_obj),
33 # },
34 # }
35 custom_actions = {}
36
37
38 def register_custom_action(cls_names, mandatory=tuple(), optional=tuple()):
39 def wrap(f):
40 @functools.wraps(f)
41 def wrapped_f(*args, **kwargs):
42 return f(*args, **kwargs)
43
44 # in_obj defines whether the method belongs to the obj or the manager
45 in_obj = True
46 classes = cls_names
47 if type(cls_names) != tuple:
48 classes = (cls_names, )
49
50 for cls_name in classes:
51 final_name = cls_name
52 if cls_name.endswith('Manager'):
53 final_name = cls_name.replace('Manager', '')
54 in_obj = False
55 if final_name not in custom_actions:
56 custom_actions[final_name] = {}
57
58 action = f.__name__.replace('_', '-')
59 custom_actions[final_name][action] = (mandatory, optional, in_obj)
60
61 return wrapped_f
62 return wrap
63
64
65 def die(msg, e=None):
66 if e:
67 msg = "%s (%s)" % (msg, e)
68 sys.stderr.write(msg + "\n")
69 sys.exit(1)
70
71
72 def what_to_cls(what):
73 return "".join([s.capitalize() for s in what.split("-")])
74
75
76 def cls_to_what(cls):
77 return camel_re.sub(r'\1-\2', cls.__name__).lower()
78
79
80 def _get_base_parser(add_help=True):
81 parser = argparse.ArgumentParser(
82 add_help=add_help,
83 description="GitLab API Command Line Interface")
84 parser.add_argument("--version", help="Display the version.",
85 action="store_true")
86 parser.add_argument("-v", "--verbose", "--fancy",
87 help="Verbose mode (legacy format only)",
88 action="store_true")
89 parser.add_argument("-d", "--debug",
90 help="Debug mode (display HTTP requests)",
91 action="store_true")
92 parser.add_argument("-c", "--config-file", action='append',
93 help=("Configuration file to use. Can be used "
94 "multiple times."))
95 parser.add_argument("-g", "--gitlab",
96 help=("Which configuration section should "
97 "be used. If not defined, the default selection "
98 "will be used."),
99 required=False)
100 parser.add_argument("-o", "--output",
101 help=("Output format (v4 only): json|legacy|yaml"),
102 required=False,
103 choices=['json', 'legacy', 'yaml'],
104 default="legacy")
105 parser.add_argument("-f", "--fields",
106 help=("Fields to display in the output (comma "
107 "separated). Not used with legacy output"),
108 required=False)
109
110 return parser
111
112
113 def _get_parser(cli_module):
114 parser = _get_base_parser()
115 return cli_module.extend_parser(parser)
116
117
118 def _parse_value(v):
119 if isinstance(v, str) and v.startswith('@'):
120 # If the user-provided value starts with @, we try to read the file
121 # path provided after @ as the real value. Exit on any error.
122 try:
123 with open(v[1:]) as fl:
124 return fl.read()
125 except Exception as e:
126 sys.stderr.write("%s\n" % e)
127 sys.exit(1)
128
129 return v
130
131
132 def main():
133 if "--version" in sys.argv:
134 print(gitlab.__version__)
135 exit(0)
136
137 parser = _get_base_parser(add_help=False)
138 # This first parsing step is used to find the gitlab config to use, and
139 # load the propermodule (v3 or v4) accordingly. At that point we don't have
140 # any subparser setup
141 (options, args) = parser.parse_known_args(sys.argv)
142
143 config = gitlab.config.GitlabConfigParser(options.gitlab,
144 options.config_file)
145 cli_module = importlib.import_module('gitlab.v%s.cli' % config.api_version)
146
147 # Now we build the entire set of subcommands and do the complete parsing
148 parser = _get_parser(cli_module)
149 args = parser.parse_args(sys.argv[1:])
150
151 config_files = args.config_file
152 gitlab_id = args.gitlab
153 verbose = args.verbose
154 output = args.output
155 fields = []
156 if args.fields:
157 fields = [x.strip() for x in args.fields.split(',')]
158 debug = args.debug
159 action = args.action
160 what = args.what
161
162 args = args.__dict__
163 # Remove CLI behavior-related args
164 for item in ('gitlab', 'config_file', 'verbose', 'debug', 'what', 'action',
165 'version', 'output'):
166 args.pop(item)
167 args = {k: _parse_value(v) for k, v in args.items() if v is not None}
168
169 try:
170 gl = gitlab.Gitlab.from_config(gitlab_id, config_files)
171 if gl.private_token or gl.oauth_token:
172 gl.auth()
173 except Exception as e:
174 die(str(e))
175
176 if debug:
177 gl.enable_debug()
178
179 cli_module.run(gl, what, action, args, verbose, output, fields)
180
181 sys.exit(0)
182
[end of gitlab/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gitlab/cli.py b/gitlab/cli.py
--- a/gitlab/cli.py
+++ b/gitlab/cli.py
@@ -98,7 +98,7 @@
"will be used."),
required=False)
parser.add_argument("-o", "--output",
- help=("Output format (v4 only): json|legacy|yaml"),
+ help="Output format (v4 only): json|legacy|yaml",
required=False,
choices=['json', 'legacy', 'yaml'],
default="legacy")
@@ -135,6 +135,10 @@
exit(0)
parser = _get_base_parser(add_help=False)
+ if "--help" in sys.argv or "-h" in sys.argv:
+ parser.print_help()
+ exit(0)
+
# This first parsing step is used to find the gitlab config to use, and
# load the propermodule (v3 or v4) accordingly. At that point we don't have
# any subparser setup
| {"golden_diff": "diff --git a/gitlab/cli.py b/gitlab/cli.py\n--- a/gitlab/cli.py\n+++ b/gitlab/cli.py\n@@ -98,7 +98,7 @@\n \"will be used.\"),\n required=False)\n parser.add_argument(\"-o\", \"--output\",\n- help=(\"Output format (v4 only): json|legacy|yaml\"),\n+ help=\"Output format (v4 only): json|legacy|yaml\",\n required=False,\n choices=['json', 'legacy', 'yaml'],\n default=\"legacy\")\n@@ -135,6 +135,10 @@\n exit(0)\n \n parser = _get_base_parser(add_help=False)\n+ if \"--help\" in sys.argv or \"-h\" in sys.argv:\n+ parser.print_help()\n+ exit(0)\n+\n # This first parsing step is used to find the gitlab config to use, and\n # load the propermodule (v3 or v4) accordingly. At that point we don't have\n # any subparser setup\n", "issue": "gitlab --help requires a config file\n## Description of the problem, including code/CLI snippet\r\n\r\n`gitlab --help` reads its config file even when displaying help\r\n\r\n## Expected Behavior\r\n\r\nPrint out the help text.\r\n\r\n## Actual Behavior\r\n\r\nExits with error if no configuration file is found.\r\n\r\n## Specifications\r\n\r\n - python-gitlab version: 1.5.0\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import print_function\nimport argparse\nimport functools\nimport importlib\nimport re\nimport sys\n\nimport gitlab.config\n\ncamel_re = re.compile('(.)([A-Z])')\n\n# custom_actions = {\n# cls: {\n# action: (mandatory_args, optional_args, in_obj),\n# },\n# }\ncustom_actions = {}\n\n\ndef register_custom_action(cls_names, mandatory=tuple(), optional=tuple()):\n def wrap(f):\n @functools.wraps(f)\n def wrapped_f(*args, **kwargs):\n return f(*args, **kwargs)\n\n # in_obj defines whether the method belongs to the obj or the manager\n in_obj = True\n classes = cls_names\n if type(cls_names) != tuple:\n classes = (cls_names, )\n\n for cls_name in classes:\n final_name = cls_name\n if cls_name.endswith('Manager'):\n final_name = cls_name.replace('Manager', '')\n in_obj = False\n if final_name not in custom_actions:\n custom_actions[final_name] = {}\n\n action = f.__name__.replace('_', '-')\n custom_actions[final_name][action] = (mandatory, optional, in_obj)\n\n return wrapped_f\n return wrap\n\n\ndef die(msg, e=None):\n if e:\n msg = \"%s (%s)\" % (msg, e)\n sys.stderr.write(msg + \"\\n\")\n sys.exit(1)\n\n\ndef what_to_cls(what):\n return \"\".join([s.capitalize() for s in what.split(\"-\")])\n\n\ndef cls_to_what(cls):\n return camel_re.sub(r'\\1-\\2', cls.__name__).lower()\n\n\ndef _get_base_parser(add_help=True):\n parser = argparse.ArgumentParser(\n add_help=add_help,\n description=\"GitLab API Command Line Interface\")\n parser.add_argument(\"--version\", help=\"Display the version.\",\n action=\"store_true\")\n parser.add_argument(\"-v\", \"--verbose\", \"--fancy\",\n help=\"Verbose mode (legacy format only)\",\n action=\"store_true\")\n parser.add_argument(\"-d\", \"--debug\",\n help=\"Debug mode (display HTTP requests)\",\n action=\"store_true\")\n parser.add_argument(\"-c\", \"--config-file\", action='append',\n help=(\"Configuration file to use. Can be used \"\n \"multiple times.\"))\n parser.add_argument(\"-g\", \"--gitlab\",\n help=(\"Which configuration section should \"\n \"be used. If not defined, the default selection \"\n \"will be used.\"),\n required=False)\n parser.add_argument(\"-o\", \"--output\",\n help=(\"Output format (v4 only): json|legacy|yaml\"),\n required=False,\n choices=['json', 'legacy', 'yaml'],\n default=\"legacy\")\n parser.add_argument(\"-f\", \"--fields\",\n help=(\"Fields to display in the output (comma \"\n \"separated). Not used with legacy output\"),\n required=False)\n\n return parser\n\n\ndef _get_parser(cli_module):\n parser = _get_base_parser()\n return cli_module.extend_parser(parser)\n\n\ndef _parse_value(v):\n if isinstance(v, str) and v.startswith('@'):\n # If the user-provided value starts with @, we try to read the file\n # path provided after @ as the real value. Exit on any error.\n try:\n with open(v[1:]) as fl:\n return fl.read()\n except Exception as e:\n sys.stderr.write(\"%s\\n\" % e)\n sys.exit(1)\n\n return v\n\n\ndef main():\n if \"--version\" in sys.argv:\n print(gitlab.__version__)\n exit(0)\n\n parser = _get_base_parser(add_help=False)\n # This first parsing step is used to find the gitlab config to use, and\n # load the propermodule (v3 or v4) accordingly. At that point we don't have\n # any subparser setup\n (options, args) = parser.parse_known_args(sys.argv)\n\n config = gitlab.config.GitlabConfigParser(options.gitlab,\n options.config_file)\n cli_module = importlib.import_module('gitlab.v%s.cli' % config.api_version)\n\n # Now we build the entire set of subcommands and do the complete parsing\n parser = _get_parser(cli_module)\n args = parser.parse_args(sys.argv[1:])\n\n config_files = args.config_file\n gitlab_id = args.gitlab\n verbose = args.verbose\n output = args.output\n fields = []\n if args.fields:\n fields = [x.strip() for x in args.fields.split(',')]\n debug = args.debug\n action = args.action\n what = args.what\n\n args = args.__dict__\n # Remove CLI behavior-related args\n for item in ('gitlab', 'config_file', 'verbose', 'debug', 'what', 'action',\n 'version', 'output'):\n args.pop(item)\n args = {k: _parse_value(v) for k, v in args.items() if v is not None}\n\n try:\n gl = gitlab.Gitlab.from_config(gitlab_id, config_files)\n if gl.private_token or gl.oauth_token:\n gl.auth()\n except Exception as e:\n die(str(e))\n\n if debug:\n gl.enable_debug()\n\n cli_module.run(gl, what, action, args, verbose, output, fields)\n\n sys.exit(0)\n", "path": "gitlab/cli.py"}]} | 2,420 | 225 |
gh_patches_debug_19870 | rasdani/github-patches | git_diff | Parsl__parsl-389 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
parsl installed from pip tries to determine its version using git
Every time I run parsl I get:
```
kacperk@dxl1: /dpool/kacperk/arxiv $ python scraper_parsl.py
fatal: Not a git repository: '/home/kacperk/.local/lib/python3.6/site-packages/.git'
```
and in logs:
```
2018-07-15 12:54:06 parsl.utils:24 [ERROR] Unable to determine code state
Traceback (most recent call last):
File "/home/kacperk/.local/lib/python3.6/site-packages/parsl/utils.py", line 19, in get_version
head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
File "/home/kacperk/miniconda3/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/home/kacperk/miniconda3/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['git', 'rev-parse', '--short', 'HEAD']' returned non-zero exit status 128.
```
</issue>
<code>
[start of parsl/utils.py]
1 import logging
2 import os
3 import shlex
4 import subprocess
5 import threading
6 import time
7 from contextlib import contextmanager
8 from functools import wraps
9
10 import parsl
11 from parsl.version import VERSION
12
13 logger = logging.getLogger(__name__)
14
15
16 def get_version():
17 version = parsl.__version__
18 work_tree = os.path.dirname(os.path.dirname(__file__))
19 git_dir = os.path.join(work_tree, '.git')
20 env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
21 try:
22 cmd = shlex.split('git rev-parse --short HEAD')
23 head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
24 diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
25 status = 'dirty' if diff else 'clean'
26 version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
27 except Exception as e:
28 pass
29
30 return version
31
32
33 def get_all_checkpoints(rundir="runinfo"):
34 """Finds the checkpoints from all last runs.
35
36 Note that checkpoints are incremental, and this helper will not find
37 previous checkpoints from earlier than the most recent run. It probably
38 should be made to do so.
39
40 Kwargs:
41 - rundir(str) : Path to the runinfo directory
42
43 Returns:
44 - a list suitable for the checkpointFiles parameter of DataFlowKernel
45 constructor
46
47 """
48
49 if(not(os.path.isdir(rundir))):
50 return []
51
52 dirs = sorted(os.listdir(rundir))
53
54 checkpoints = []
55
56 for runid in dirs:
57
58 checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))
59
60 if(os.path.isdir(checkpoint)):
61 checkpoints.append(checkpoint)
62
63 return checkpoints
64
65
66 def get_last_checkpoint(rundir="runinfo"):
67 """Finds the checkpoint from the last run, if one exists.
68
69 Note that checkpoints are incremental, and this helper will not find
70 previous checkpoints from earlier than the most recent run. It probably
71 should be made to do so.
72
73 Kwargs:
74 - rundir(str) : Path to the runinfo directory
75
76 Returns:
77 - a list suitable for checkpointFiles parameter of DataFlowKernel
78 constructor, with 0 or 1 elements
79
80 """
81
82 if(not(os.path.isdir(rundir))):
83 return []
84
85 dirs = sorted(os.listdir(rundir))
86
87 if(len(dirs) == 0):
88 return []
89
90 last_runid = dirs[-1]
91 last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))
92
93 if(not(os.path.isdir(last_checkpoint))):
94 return []
95
96 return [last_checkpoint]
97
98
99 def timeout(seconds=None):
100 def decorator(func, *args, **kwargs):
101 @wraps(func)
102 def wrapper(*args, **kwargs):
103 t = threading.Thread(target=func, args=args, kwargs=kwargs)
104 t.start()
105 result = t.join(seconds)
106 if t.is_alive():
107 raise RuntimeError('timed out in {}'.format(func))
108 return result
109 return wrapper
110 return decorator
111
112
113 @contextmanager
114 def wait_for_file(path, seconds=10):
115 for i in range(0, int(seconds * 100)):
116 time.sleep(seconds / 100.)
117 if os.path.exists(path):
118 break
119 yield
120
121
122 @contextmanager
123 def time_limited_open(path, mode, seconds=1):
124 wait_for_file(path, seconds)
125
126 f = open(path, mode)
127 yield f
128 f.close()
129
[end of parsl/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/utils.py b/parsl/utils.py
--- a/parsl/utils.py
+++ b/parsl/utils.py
@@ -17,15 +17,16 @@
version = parsl.__version__
work_tree = os.path.dirname(os.path.dirname(__file__))
git_dir = os.path.join(work_tree, '.git')
- env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
- try:
- cmd = shlex.split('git rev-parse --short HEAD')
- head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
- diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
- status = 'dirty' if diff else 'clean'
- version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
- except Exception as e:
- pass
+ if os.path.exists(git_dir):
+ env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
+ try:
+ cmd = shlex.split('git rev-parse --short HEAD')
+ head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
+ diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
+ status = 'dirty' if diff else 'clean'
+ version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
+ except Exception as e:
+ pass
return version
| {"golden_diff": "diff --git a/parsl/utils.py b/parsl/utils.py\n--- a/parsl/utils.py\n+++ b/parsl/utils.py\n@@ -17,15 +17,16 @@\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n- env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n- try:\n- cmd = shlex.split('git rev-parse --short HEAD')\n- head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n- diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n- status = 'dirty' if diff else 'clean'\n- version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n- except Exception as e:\n- pass\n+ if os.path.exists(git_dir):\n+ env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n+ try:\n+ cmd = shlex.split('git rev-parse --short HEAD')\n+ head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n+ diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n+ status = 'dirty' if diff else 'clean'\n+ version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n+ except Exception as e:\n+ pass\n \n return version\n", "issue": "parsl installed from pip tries to determine its version using git\nEvery time I run parsl I get:\r\n\r\n```\r\nkacperk@dxl1: /dpool/kacperk/arxiv $ python scraper_parsl.py\r\nfatal: Not a git repository: '/home/kacperk/.local/lib/python3.6/site-packages/.git'\r\n```\r\n\r\nand in logs:\r\n\r\n```\r\n2018-07-15 12:54:06 parsl.utils:24 [ERROR] Unable to determine code state\r\nTraceback (most recent call last):\r\n File \"/home/kacperk/.local/lib/python3.6/site-packages/parsl/utils.py\", line 19, in get_version\r\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\r\n File \"/home/kacperk/miniconda3/lib/python3.6/subprocess.py\", line 336, in check_output\r\n **kwargs).stdout\r\n File \"/home/kacperk/miniconda3/lib/python3.6/subprocess.py\", line 418, in run\r\n output=stdout, stderr=stderr)\r\nsubprocess.CalledProcessError: Command '['git', 'rev-parse', '--short', 'HEAD']' returned non-zero exit status 128.\r\n```\n", "before_files": [{"content": "import logging\nimport os\nimport shlex\nimport subprocess\nimport threading\nimport time\nfrom contextlib import contextmanager\nfrom functools import wraps\n\nimport parsl\nfrom parsl.version import VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_version():\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n try:\n cmd = shlex.split('git rev-parse --short HEAD')\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n status = 'dirty' if diff else 'clean'\n version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n except Exception as e:\n pass\n\n return version\n\n\ndef get_all_checkpoints(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoints from all last runs.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for the checkpointFiles parameter of DataFlowKernel\n constructor\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n checkpoints = []\n\n for runid in dirs:\n\n checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))\n\n if(os.path.isdir(checkpoint)):\n checkpoints.append(checkpoint)\n\n return checkpoints\n\n\ndef get_last_checkpoint(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoint from the last run, if one exists.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for checkpointFiles parameter of DataFlowKernel\n constructor, with 0 or 1 elements\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n if(len(dirs) == 0):\n return []\n\n last_runid = dirs[-1]\n last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))\n\n if(not(os.path.isdir(last_checkpoint))):\n return []\n\n return [last_checkpoint]\n\n\ndef timeout(seconds=None):\n def decorator(func, *args, **kwargs):\n @wraps(func)\n def wrapper(*args, **kwargs):\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\n t.start()\n result = t.join(seconds)\n if t.is_alive():\n raise RuntimeError('timed out in {}'.format(func))\n return result\n return wrapper\n return decorator\n\n\n@contextmanager\ndef wait_for_file(path, seconds=10):\n for i in range(0, int(seconds * 100)):\n time.sleep(seconds / 100.)\n if os.path.exists(path):\n break\n yield\n\n\n@contextmanager\ndef time_limited_open(path, mode, seconds=1):\n wait_for_file(path, seconds)\n\n f = open(path, mode)\n yield f\n f.close()\n", "path": "parsl/utils.py"}]} | 1,890 | 342 |
gh_patches_debug_13403 | rasdani/github-patches | git_diff | chainer__chainer-267 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Maybe there is a bug in chainer.optimizer.weight_decay(decay).
In the [optimizer.py](https://github.com/pfnet/chainer/blob/30807b4f9f40177f592099663b86a6bb3eb1f867/chainer/optimizer.py) file, `weight_decay` is implemented as following:
``` python
def weight_decay(self, decay):
for p, g, _ in self.tuples:
g -= decay * p
```
However, the gradient is decayed, not the weight. So if we update the weights `param -= lr * grad`, because the grad becomes smaller, the param (weight) becomes larger, which is not we want.
As far as I know the regularized gradient decent is `p = p * (1-decay) - lr * grad`, so I think the weight should be decayed not the gradient.
</issue>
<code>
[start of chainer/optimizer.py]
1 import math
2
3 import numpy
4
5 from chainer import cuda
6
7
8 # TODO(delta2323): Make it public function and move it to common directory.
9
10
11 def _sqnorm(x):
12 if isinstance(x, cuda.GPUArray):
13 with cuda.using_device(x):
14 return float(cuda.gpuarray.dot(x, x).get())
15 x = x.ravel()
16 return float(x.dot(x))
17
18
19 class Optimizer(object):
20
21 """Base class of all numerical optimizers.
22
23 Optimizer is set up with references to parameters and gradients, and
24 then on every call of :meth:`update`, it updates parameters based on
25 corresponding gradients. Optimizer implementations must override
26 :meth:`update_one` method, which updates one parameter array using the
27 corresponding gradient array.
28
29 Optimizer can optionally use state for each parameter/gradient pair. It is
30 initialized by :meth:`init_state` method at set up.
31
32 Attributes:
33 t (int): Number of update steps. It can be used in :meth:`update_one`
34 implementation, where :attr:`t` is incremented beforehand.
35
36 """
37
38 def setup(self, params_grads):
39 """Prepares states for all given parameter/gradient pairs.
40
41 Args:
42 params_grads: Tuple (pair) of two tuples. The first element is a
43 tuple of parameter arrays, and the second is a tuple of
44 corresponding gradient arrays.
45 Return value of :meth:`FunctionSet.collect_parameters` method
46 can be used.
47
48 """
49 self.t = 0
50 self.tuples = []
51 for p, g in zip(*params_grads):
52 with cuda.using_device(p):
53 state = self.init_state(p, g)
54 self.tuples.append((p, g, state))
55
56 def init_state(self, param, grad):
57 """Returns the initial state for given parameter and gradient.
58
59 Default implementation delegates the procedure to
60 :meth:`init_state_cpu` or :meth:`init_state_gpu` depending on the type
61 of ``param``.
62
63 Args:
64 param: Parameter array.
65 grad: Gradient array corresponding to ``param``.
66
67 Returns:
68 Initial state value.
69
70 .. warning::
71
72 Note that, on every call of :meth:`update_one`, the state value
73 is passed by value and then the method updates its content, so
74 the state must be a reference. Especiallly, one cannot use a
75 value of built-in numeric type. If the state is one scalar
76 value, it is recommended to use scalar array, i.e.
77 :class:`~numpy.ndarray` with shape ``()``.
78
79 """
80 if isinstance(param, cuda.GPUArray):
81 return self.init_state_gpu(param, grad)
82 return self.init_state_cpu(param, grad)
83
84 def init_state_cpu(self, param, grad):
85 """Returns the initial state for given parameter and gradient on GPU.
86
87 Args:
88 param (~numpy.ndarray): Parameter array.
89 grad (~numpy.ndarray): Gradient array.
90
91 Returns:
92 Initial state value.
93
94 .. seealso:: :meth:`init_state`, :meth:`init_state_gpu`
95
96 """
97 return None
98
99 def init_state_gpu(self, param, grad):
100 """Returns the initial state for given parameter and gradient on CPU.
101
102 Args:
103 param (~pycuda.gpuarray.GPUArray): Parameter array.
104 grad (~pycuda.gpuarray.GPUArray): Gradient array.
105
106 Returns:
107 Initial state value.
108
109 .. seealso:: :meth:`init_state`, :meth:`init_state_gpu`
110
111 """
112 return None
113
114 def zero_grads(self):
115 """Fills all gradient arrays by zeros.
116
117 This method should be call before backprop takes place, since
118 gradients are accumulated on backprop.
119
120 """
121 for _, g, _ in self.tuples:
122 if isinstance(g, cuda.GPUArray):
123 with cuda.using_device(g):
124 g.fill(0)
125 else:
126 g.fill(0)
127
128 def compute_grads_norm(self):
129 """Computes the norm of whole gradients.
130
131 Returns:
132 float: L2 norm of whole gradients, i.e. square root of sum of
133 square of all gradient elements.
134
135 .. warning::
136
137 This method returns a CPU-computed value, which means that this
138 method synchronizes between CPU and GPU if at least one of the
139 gradients reside on the GPU.
140
141 """
142 # TODO(beam2d): Make it asynchronous to CPU when gradients exist on GPU
143 sqnorm = 0
144 for _, g, _ in self.tuples:
145 sqnorm += _sqnorm(g)
146 return math.sqrt(sqnorm)
147
148 def clip_grads(self, maxnorm):
149 """Clips the norm of whole gradients up to given threshold.
150
151 Args:
152 maxnorm (float): Threshold of gradient L2 norm.
153
154 .. seealso::
155
156 :meth:`compute_grads_norm`
157 It uses this method to compute the gradient norm to be clipped.
158
159 """
160 norm = self.compute_grads_norm()
161 if norm > maxnorm:
162 ratio = maxnorm / norm
163 for _, g, _ in self.tuples:
164 with cuda.using_device(g):
165 g *= ratio
166
167 def weight_decay(self, decay):
168 """Applies weight decay to the parameter/gradient pairs.
169
170 Args:
171 decay (float): Coefficient of weight decay
172
173 """
174 for p, g, _ in self.tuples:
175 if isinstance(p, cuda.GPUArray):
176 with cuda.using_device(p):
177 cuda.elementwise('float* g, const float* p, float decay',
178 'g[i] -= decay * p[i]',
179 'weight_decay')(g, p, decay)
180 else:
181 g -= decay * p
182
183 def accumulate_grads(self, grads):
184 """Accumulates gradients from other source.
185
186 This method just adds given gradient arrays to gradients that this
187 optimizer holds. It is typically used in data-parallel optimization,
188 where gradients for different shards are computed in parallel and
189 aggregated by this method. This method correctly treats multiple GPU
190 devices.
191
192 Args:
193 grads (Iterable): Iterable of gradient arrays to be accumulated.
194
195 """
196 for (_, g_dst, _), g_src in zip(self.tuples, grads):
197 if isinstance(g_dst, numpy.ndarray):
198 g_dst += cuda.to_cpu(g_src)
199 continue
200
201 with cuda.using_device(g_dst):
202 if (isinstance(g_src, cuda.GPUArray) and
203 g_dst.gpudata.device != g_src.gpudata.device):
204 g_dst += cuda.copy(g_src, out_device=g_src.gpudata.device)
205 else:
206 g_dst += cuda.to_gpu(g_src)
207
208 def update(self):
209 """Updates all parameters and states using corresponding gradients.
210
211 This method iteratively calls :meth:`update_one` for each parameter/
212 gradient/state tuple. Beforehand, :attr:`t` attribute is incremented.
213
214 """
215 self.t += 1
216 for p, g, s in self.tuples:
217 with cuda.using_device(p):
218 self.update_one(p, g, s)
219
220 def update_one(self, param, grad, state):
221 """Updates a parameter array and its state using given gradient.
222
223 The default implementation delegates the procedure to
224 :meth:`update_one_cpu` or :meth:`update_one_gpu` depending on the type
225 of the parameter array. Optimizer implmentation must override these
226 type-specific methods or this :meth:`update_one` method directly.
227
228 Args:
229 param: Parameter array.
230 grad: Gradient array.
231 state: State value.
232
233 .. seealso:: :meth:`update_one_cpu`, :meth:`update_one_gpu`
234
235 """
236 if isinstance(param, cuda.GPUArray):
237 self.update_one_gpu(param, grad, state)
238 else:
239 self.update_one_cpu(param, grad, state)
240
241 def update_one_cpu(self, param, grad, state):
242 """Updates a parameter array and its state using given gradient on CPU.
243
244 Args:
245 param (~numpy.ndarray): Parameter array.
246 grad (~numpy.ndarray): Gradient array.
247 state: State value.
248
249 .. seealso:: :meth:`update_one`, :meth:`update_one_gpu`
250
251 """
252 raise NotImplementedError()
253
254 def update_one_gpu(self, param, grad, state):
255 """Updates a parameter array and its state using given gradient on GPU.
256
257 Args:
258 param (~pycuda.gpuarray.GPUArray): Parameter array.
259 grad (~pycuda.gpuarray.GPUArray): Gradient array.
260 state: State value.
261
262 .. seealso:: :meth:`update_one`, :meth:`update_one_cpu`
263
264 """
265 raise NotImplementedError()
266
[end of chainer/optimizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/optimizer.py b/chainer/optimizer.py
--- a/chainer/optimizer.py
+++ b/chainer/optimizer.py
@@ -175,10 +175,10 @@
if isinstance(p, cuda.GPUArray):
with cuda.using_device(p):
cuda.elementwise('float* g, const float* p, float decay',
- 'g[i] -= decay * p[i]',
+ 'g[i] += decay * p[i]',
'weight_decay')(g, p, decay)
else:
- g -= decay * p
+ g += decay * p
def accumulate_grads(self, grads):
"""Accumulates gradients from other source.
| {"golden_diff": "diff --git a/chainer/optimizer.py b/chainer/optimizer.py\n--- a/chainer/optimizer.py\n+++ b/chainer/optimizer.py\n@@ -175,10 +175,10 @@\n if isinstance(p, cuda.GPUArray):\n with cuda.using_device(p):\n cuda.elementwise('float* g, const float* p, float decay',\n- 'g[i] -= decay * p[i]',\n+ 'g[i] += decay * p[i]',\n 'weight_decay')(g, p, decay)\n else:\n- g -= decay * p\n+ g += decay * p\n \n def accumulate_grads(self, grads):\n \"\"\"Accumulates gradients from other source.\n", "issue": "Maybe there is a bug in chainer.optimizer.weight_decay(decay).\nIn the [optimizer.py](https://github.com/pfnet/chainer/blob/30807b4f9f40177f592099663b86a6bb3eb1f867/chainer/optimizer.py) file, `weight_decay` is implemented as following:\n\n``` python\n def weight_decay(self, decay):\n for p, g, _ in self.tuples:\n g -= decay * p\n```\n\nHowever, the gradient is decayed, not the weight. So if we update the weights `param -= lr * grad`, because the grad becomes smaller, the param (weight) becomes larger, which is not we want. \n\nAs far as I know the regularized gradient decent is `p = p * (1-decay) - lr * grad`, so I think the weight should be decayed not the gradient.\n\n", "before_files": [{"content": "import math\n\nimport numpy\n\nfrom chainer import cuda\n\n\n# TODO(delta2323): Make it public function and move it to common directory.\n\n\ndef _sqnorm(x):\n if isinstance(x, cuda.GPUArray):\n with cuda.using_device(x):\n return float(cuda.gpuarray.dot(x, x).get())\n x = x.ravel()\n return float(x.dot(x))\n\n\nclass Optimizer(object):\n\n \"\"\"Base class of all numerical optimizers.\n\n Optimizer is set up with references to parameters and gradients, and\n then on every call of :meth:`update`, it updates parameters based on\n corresponding gradients. Optimizer implementations must override\n :meth:`update_one` method, which updates one parameter array using the\n corresponding gradient array.\n\n Optimizer can optionally use state for each parameter/gradient pair. It is\n initialized by :meth:`init_state` method at set up.\n\n Attributes:\n t (int): Number of update steps. It can be used in :meth:`update_one`\n implementation, where :attr:`t` is incremented beforehand.\n\n \"\"\"\n\n def setup(self, params_grads):\n \"\"\"Prepares states for all given parameter/gradient pairs.\n\n Args:\n params_grads: Tuple (pair) of two tuples. The first element is a\n tuple of parameter arrays, and the second is a tuple of\n corresponding gradient arrays.\n Return value of :meth:`FunctionSet.collect_parameters` method\n can be used.\n\n \"\"\"\n self.t = 0\n self.tuples = []\n for p, g in zip(*params_grads):\n with cuda.using_device(p):\n state = self.init_state(p, g)\n self.tuples.append((p, g, state))\n\n def init_state(self, param, grad):\n \"\"\"Returns the initial state for given parameter and gradient.\n\n Default implementation delegates the procedure to\n :meth:`init_state_cpu` or :meth:`init_state_gpu` depending on the type\n of ``param``.\n\n Args:\n param: Parameter array.\n grad: Gradient array corresponding to ``param``.\n\n Returns:\n Initial state value.\n\n .. warning::\n\n Note that, on every call of :meth:`update_one`, the state value\n is passed by value and then the method updates its content, so\n the state must be a reference. Especiallly, one cannot use a\n value of built-in numeric type. If the state is one scalar\n value, it is recommended to use scalar array, i.e.\n :class:`~numpy.ndarray` with shape ``()``.\n\n \"\"\"\n if isinstance(param, cuda.GPUArray):\n return self.init_state_gpu(param, grad)\n return self.init_state_cpu(param, grad)\n\n def init_state_cpu(self, param, grad):\n \"\"\"Returns the initial state for given parameter and gradient on GPU.\n\n Args:\n param (~numpy.ndarray): Parameter array.\n grad (~numpy.ndarray): Gradient array.\n\n Returns:\n Initial state value.\n\n .. seealso:: :meth:`init_state`, :meth:`init_state_gpu`\n\n \"\"\"\n return None\n\n def init_state_gpu(self, param, grad):\n \"\"\"Returns the initial state for given parameter and gradient on CPU.\n\n Args:\n param (~pycuda.gpuarray.GPUArray): Parameter array.\n grad (~pycuda.gpuarray.GPUArray): Gradient array.\n\n Returns:\n Initial state value.\n\n .. seealso:: :meth:`init_state`, :meth:`init_state_gpu`\n\n \"\"\"\n return None\n\n def zero_grads(self):\n \"\"\"Fills all gradient arrays by zeros.\n\n This method should be call before backprop takes place, since\n gradients are accumulated on backprop.\n\n \"\"\"\n for _, g, _ in self.tuples:\n if isinstance(g, cuda.GPUArray):\n with cuda.using_device(g):\n g.fill(0)\n else:\n g.fill(0)\n\n def compute_grads_norm(self):\n \"\"\"Computes the norm of whole gradients.\n\n Returns:\n float: L2 norm of whole gradients, i.e. square root of sum of\n square of all gradient elements.\n\n .. warning::\n\n This method returns a CPU-computed value, which means that this\n method synchronizes between CPU and GPU if at least one of the\n gradients reside on the GPU.\n\n \"\"\"\n # TODO(beam2d): Make it asynchronous to CPU when gradients exist on GPU\n sqnorm = 0\n for _, g, _ in self.tuples:\n sqnorm += _sqnorm(g)\n return math.sqrt(sqnorm)\n\n def clip_grads(self, maxnorm):\n \"\"\"Clips the norm of whole gradients up to given threshold.\n\n Args:\n maxnorm (float): Threshold of gradient L2 norm.\n\n .. seealso::\n\n :meth:`compute_grads_norm`\n It uses this method to compute the gradient norm to be clipped.\n\n \"\"\"\n norm = self.compute_grads_norm()\n if norm > maxnorm:\n ratio = maxnorm / norm\n for _, g, _ in self.tuples:\n with cuda.using_device(g):\n g *= ratio\n\n def weight_decay(self, decay):\n \"\"\"Applies weight decay to the parameter/gradient pairs.\n\n Args:\n decay (float): Coefficient of weight decay\n\n \"\"\"\n for p, g, _ in self.tuples:\n if isinstance(p, cuda.GPUArray):\n with cuda.using_device(p):\n cuda.elementwise('float* g, const float* p, float decay',\n 'g[i] -= decay * p[i]',\n 'weight_decay')(g, p, decay)\n else:\n g -= decay * p\n\n def accumulate_grads(self, grads):\n \"\"\"Accumulates gradients from other source.\n\n This method just adds given gradient arrays to gradients that this\n optimizer holds. It is typically used in data-parallel optimization,\n where gradients for different shards are computed in parallel and\n aggregated by this method. This method correctly treats multiple GPU\n devices.\n\n Args:\n grads (Iterable): Iterable of gradient arrays to be accumulated.\n\n \"\"\"\n for (_, g_dst, _), g_src in zip(self.tuples, grads):\n if isinstance(g_dst, numpy.ndarray):\n g_dst += cuda.to_cpu(g_src)\n continue\n\n with cuda.using_device(g_dst):\n if (isinstance(g_src, cuda.GPUArray) and\n g_dst.gpudata.device != g_src.gpudata.device):\n g_dst += cuda.copy(g_src, out_device=g_src.gpudata.device)\n else:\n g_dst += cuda.to_gpu(g_src)\n\n def update(self):\n \"\"\"Updates all parameters and states using corresponding gradients.\n\n This method iteratively calls :meth:`update_one` for each parameter/\n gradient/state tuple. Beforehand, :attr:`t` attribute is incremented.\n\n \"\"\"\n self.t += 1\n for p, g, s in self.tuples:\n with cuda.using_device(p):\n self.update_one(p, g, s)\n\n def update_one(self, param, grad, state):\n \"\"\"Updates a parameter array and its state using given gradient.\n\n The default implementation delegates the procedure to\n :meth:`update_one_cpu` or :meth:`update_one_gpu` depending on the type\n of the parameter array. Optimizer implmentation must override these\n type-specific methods or this :meth:`update_one` method directly.\n\n Args:\n param: Parameter array.\n grad: Gradient array.\n state: State value.\n\n .. seealso:: :meth:`update_one_cpu`, :meth:`update_one_gpu`\n\n \"\"\"\n if isinstance(param, cuda.GPUArray):\n self.update_one_gpu(param, grad, state)\n else:\n self.update_one_cpu(param, grad, state)\n\n def update_one_cpu(self, param, grad, state):\n \"\"\"Updates a parameter array and its state using given gradient on CPU.\n\n Args:\n param (~numpy.ndarray): Parameter array.\n grad (~numpy.ndarray): Gradient array.\n state: State value.\n\n .. seealso:: :meth:`update_one`, :meth:`update_one_gpu`\n\n \"\"\"\n raise NotImplementedError()\n\n def update_one_gpu(self, param, grad, state):\n \"\"\"Updates a parameter array and its state using given gradient on GPU.\n\n Args:\n param (~pycuda.gpuarray.GPUArray): Parameter array.\n grad (~pycuda.gpuarray.GPUArray): Gradient array.\n state: State value.\n\n .. seealso:: :meth:`update_one`, :meth:`update_one_cpu`\n\n \"\"\"\n raise NotImplementedError()\n", "path": "chainer/optimizer.py"}]} | 3,338 | 156 |
gh_patches_debug_28583 | rasdani/github-patches | git_diff | pytorch__ignite-48 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Provide basic update + inference functions
See #37 for discussion on this. We should provide something for simple supervised training
cc @jasonkriss
</issue>
<code>
[start of ignite/trainer/trainer.py]
1 from __future__ import division
2
3 import logging
4 import time
5 from collections import Iterable
6
7 from enum import Enum
8 from ignite.trainer.history import History
9
10 __all__ = ["TrainingEvents", "Trainer"]
11
12
13 class TrainingEvents(Enum):
14 EPOCH_STARTED = "epoch_started"
15 EPOCH_COMPLETED = "epoch_completed"
16 TRAINING_EPOCH_STARTED = "training_epoch_started"
17 TRAINING_EPOCH_COMPLETED = "training_epoch_completed"
18 VALIDATION_STARTING = "validation_starting"
19 VALIDATION_COMPLETED = "validation_completed"
20 TRAINING_STARTED = "training_started"
21 TRAINING_COMPLETED = "training_completed"
22 TRAINING_ITERATION_STARTED = "training_iteration_started"
23 TRAINING_ITERATION_COMPLETED = "training_iteration_completed"
24 VALIDATION_ITERATION_STARTED = "validation_iteration_started"
25 VALIDATION_ITERATION_COMPLETED = "validation_iteration_completed"
26 EXCEPTION_RAISED = "exception_raised"
27
28
29 def _to_hours_mins_secs(time_taken):
30 mins, secs = divmod(time_taken, 60)
31 hours, mins = divmod(mins, 60)
32 return hours, mins, secs
33
34
35 class Trainer(object):
36 """
37 Generic trainer class.
38
39 Training update and validation functions receive batches of data and return values which will
40 be stored in the `training_history` and `validation_history`. The trainer defines multiple
41 events in `TrainingEvents` for which the user can attach event handlers to. The events get
42 passed the trainer, so they can access the training/validation history
43
44
45 Parameters
46 ----------
47 training_update_function : callable
48 Update function receiving the current training batch in each iteration
49
50 validation_inference_function : callable
51 Function receiving data and performing a feed forward without update
52 """
53
54 def __init__(self, training_update_function, validation_inference_function=None):
55
56 self._logger = self._get_logger()
57 self._training_update_function = training_update_function
58 self._validation_inference_function = validation_inference_function
59 self._event_handlers = {}
60
61 self.training_history = History()
62 self.validation_history = History()
63 self.current_iteration = 0
64 self.current_validation_iteration = 0
65 self.current_epoch = 0
66 self.max_epochs = 0
67 self.should_terminate = False
68
69 def _get_logger(self):
70 logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
71 logger.addHandler(logging.NullHandler())
72 return logger
73
74 def add_event_handler(self, event_name, handler, *args, **kwargs):
75 """
76 Add an event handler to be executed when the specified event is fired
77
78 Parameters
79 ----------
80 event_name: enum
81 event from ignite.trainer.TrainingEvents to attach the
82 handler to
83 handler: Callable
84 the callable event handler that should be invoked
85 args:
86 optional args to be passed to `handler`
87 kwargs:
88 optional keyword args to be passed to `handler`
89
90 Returns
91 -------
92 None
93 """
94 if event_name not in TrainingEvents.__members__.values():
95 self._logger.error("attempt to add event handler to non-existent event %s ",
96 event_name)
97 raise ValueError("Event {} not a valid training event".format(event_name))
98
99 if event_name not in self._event_handlers.keys():
100 self._event_handlers[event_name] = []
101
102 self._event_handlers[event_name].append((handler, args, kwargs))
103 self._logger.debug("added handler for event % ", event_name)
104
105 def _fire_event(self, event_name):
106 if event_name in self._event_handlers.keys():
107 self._logger.debug("firing handlers for event %s ", event_name)
108 for func, args, kwargs in self._event_handlers[event_name]:
109 func(self, *args, **kwargs)
110
111 def _train_one_epoch(self, training_data):
112 self._fire_event(TrainingEvents.TRAINING_EPOCH_STARTED)
113 start_time = time.time()
114
115 self.epoch_losses = []
116 for _, batch in enumerate(training_data, 1):
117 self._fire_event(TrainingEvents.TRAINING_ITERATION_STARTED)
118
119 training_step_result = self._training_update_function(batch)
120 if training_step_result is not None:
121 self.training_history.append(training_step_result)
122
123 self.current_iteration += 1
124
125 self._fire_event(TrainingEvents.TRAINING_ITERATION_COMPLETED)
126 if self.should_terminate:
127 return
128
129 time_taken = time.time() - start_time
130 hours, mins, secs = _to_hours_mins_secs(time_taken)
131 self._logger.info("Epoch[%s] Complete. Time taken: %02d:%02d:%02d", self.current_epoch, hours,
132 mins, secs)
133
134 self._fire_event(TrainingEvents.TRAINING_EPOCH_COMPLETED)
135
136 def validate(self, validation_data):
137 """ Evaluates the validation set"""
138 if self._validation_inference_function is None:
139 raise ValueError("Trainer must have a validation_inference_function in order to validate")
140
141 self.current_validation_iteration = 0
142 self._fire_event(TrainingEvents.VALIDATION_STARTING)
143 start_time = time.time()
144
145 for _, batch in enumerate(validation_data, 1):
146 self._fire_event(TrainingEvents.VALIDATION_ITERATION_STARTED)
147 validation_step_result = self._validation_inference_function(batch)
148 if validation_step_result is not None:
149 self.validation_history.append(validation_step_result)
150
151 self.current_validation_iteration += 1
152 self._fire_event(TrainingEvents.VALIDATION_ITERATION_COMPLETED)
153 if self.should_terminate:
154 break
155
156 time_taken = time.time() - start_time
157 hours, mins, secs = _to_hours_mins_secs(time_taken)
158 self._logger.info("Validation Complete. Time taken: %02d:%02d:%02d", hours, mins, secs)
159
160 self._fire_event(TrainingEvents.VALIDATION_COMPLETED)
161
162 def terminate(self):
163 """
164 Sends terminate signal to trainer, so that training terminates after the current iteration
165 """
166 self._logger.info("Terminate signaled to trainer. " +
167 "Training will stop after current iteration is finished")
168 self.should_terminate = True
169
170 def run(self, training_data, max_epochs=1):
171 """
172 Train the model, evaluate the validation set and update best parameters if the validation loss
173 improves.
174 In the event that the validation set is not run (or doesn't exist), the training loss is used
175 to update the best parameters.
176
177 Parameters
178 ----------
179 training_data : Iterable
180 Collection of training batches allowing repeated iteration (e.g., list or DataLoader)
181 max_epochs: int, optional
182 max epochs to train for [default=1]
183
184 Returns
185 -------
186 None
187 """
188
189 try:
190 self._logger.info("Training starting with max_epochs={}".format(max_epochs))
191
192 self.max_epochs = max_epochs
193
194 start_time = time.time()
195
196 self._fire_event(TrainingEvents.TRAINING_STARTED)
197 while self.current_epoch < max_epochs and not self.should_terminate:
198 self._fire_event(TrainingEvents.EPOCH_STARTED)
199 self._train_one_epoch(training_data)
200 if self.should_terminate:
201 break
202
203 self._fire_event(TrainingEvents.EPOCH_COMPLETED)
204 self.current_epoch += 1
205
206 self._fire_event(TrainingEvents.TRAINING_COMPLETED)
207 time_taken = time.time() - start_time
208 mins, secs = divmod(time_taken, 60)
209 hours, mins = divmod(mins, 60)
210 self._logger.info("Training complete. Time taken %02d:%02d:%02d" % (hours, mins, secs))
211 except BaseException as e:
212 self._logger.error("Training is terminating due to exception: %s", str(e))
213 self._fire_event(TrainingEvents.EXCEPTION_RAISED)
214 raise e
215
[end of ignite/trainer/trainer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/trainer/trainer.py b/ignite/trainer/trainer.py
--- a/ignite/trainer/trainer.py
+++ b/ignite/trainer/trainer.py
@@ -5,9 +5,10 @@
from collections import Iterable
from enum import Enum
+from torch.autograd import Variable
from ignite.trainer.history import History
-__all__ = ["TrainingEvents", "Trainer"]
+__all__ = ["TrainingEvents", "Trainer", "create_supervised"]
class TrainingEvents(Enum):
@@ -212,3 +213,41 @@
self._logger.error("Training is terminating due to exception: %s", str(e))
self._fire_event(TrainingEvents.EXCEPTION_RAISED)
raise e
+
+
+def create_supervised(model, optimizer, loss_fn, cuda=False):
+ """
+ Factory function for creating a trainer for supervised models
+
+ Args:
+ model (torch.nn.Module): the model to train
+ optimizer (torch.optim.Optimizer): the optimizer to use
+ loss_fn (torch.nn loss function): the loss function to use
+ cuda (bool, optional): whether or not to transfer batch to GPU (default: False)
+
+ Returns:
+ Trainer: a trainer instance with supervised update and inference functions
+ """
+ def _prepare_batch(batch, volatile=False):
+ x, y = batch
+ if cuda:
+ x, y = x.cuda(), y.cuda()
+ return Variable(x, volatile=volatile), Variable(y, volatile=volatile)
+
+ def _update(batch):
+ model.train()
+ optimizer.zero_grad()
+ x, y = _prepare_batch(batch)
+ y_pred = model(x)
+ loss = loss_fn(y_pred, y)
+ loss.backward()
+ optimizer.step()
+ return loss.data.cpu()[0]
+
+ def _inference(batch):
+ model.eval()
+ x, y = _prepare_batch(batch, volatile=True)
+ y_pred = model(x)
+ return y_pred.data.cpu(), y.data.cpu()
+
+ return Trainer(_update, _inference)
| {"golden_diff": "diff --git a/ignite/trainer/trainer.py b/ignite/trainer/trainer.py\n--- a/ignite/trainer/trainer.py\n+++ b/ignite/trainer/trainer.py\n@@ -5,9 +5,10 @@\n from collections import Iterable\n \n from enum import Enum\n+from torch.autograd import Variable\n from ignite.trainer.history import History\n \n-__all__ = [\"TrainingEvents\", \"Trainer\"]\n+__all__ = [\"TrainingEvents\", \"Trainer\", \"create_supervised\"]\n \n \n class TrainingEvents(Enum):\n@@ -212,3 +213,41 @@\n self._logger.error(\"Training is terminating due to exception: %s\", str(e))\n self._fire_event(TrainingEvents.EXCEPTION_RAISED)\n raise e\n+\n+\n+def create_supervised(model, optimizer, loss_fn, cuda=False):\n+ \"\"\"\n+ Factory function for creating a trainer for supervised models\n+\n+ Args:\n+ model (torch.nn.Module): the model to train\n+ optimizer (torch.optim.Optimizer): the optimizer to use\n+ loss_fn (torch.nn loss function): the loss function to use\n+ cuda (bool, optional): whether or not to transfer batch to GPU (default: False)\n+\n+ Returns:\n+ Trainer: a trainer instance with supervised update and inference functions\n+ \"\"\"\n+ def _prepare_batch(batch, volatile=False):\n+ x, y = batch\n+ if cuda:\n+ x, y = x.cuda(), y.cuda()\n+ return Variable(x, volatile=volatile), Variable(y, volatile=volatile)\n+\n+ def _update(batch):\n+ model.train()\n+ optimizer.zero_grad()\n+ x, y = _prepare_batch(batch)\n+ y_pred = model(x)\n+ loss = loss_fn(y_pred, y)\n+ loss.backward()\n+ optimizer.step()\n+ return loss.data.cpu()[0]\n+\n+ def _inference(batch):\n+ model.eval()\n+ x, y = _prepare_batch(batch, volatile=True)\n+ y_pred = model(x)\n+ return y_pred.data.cpu(), y.data.cpu()\n+\n+ return Trainer(_update, _inference)\n", "issue": "Provide basic update + inference functions\nSee #37 for discussion on this. We should provide something for simple supervised training\r\n\r\ncc @jasonkriss\n", "before_files": [{"content": "from __future__ import division\n\nimport logging\nimport time\nfrom collections import Iterable\n\nfrom enum import Enum\nfrom ignite.trainer.history import History\n\n__all__ = [\"TrainingEvents\", \"Trainer\"]\n\n\nclass TrainingEvents(Enum):\n EPOCH_STARTED = \"epoch_started\"\n EPOCH_COMPLETED = \"epoch_completed\"\n TRAINING_EPOCH_STARTED = \"training_epoch_started\"\n TRAINING_EPOCH_COMPLETED = \"training_epoch_completed\"\n VALIDATION_STARTING = \"validation_starting\"\n VALIDATION_COMPLETED = \"validation_completed\"\n TRAINING_STARTED = \"training_started\"\n TRAINING_COMPLETED = \"training_completed\"\n TRAINING_ITERATION_STARTED = \"training_iteration_started\"\n TRAINING_ITERATION_COMPLETED = \"training_iteration_completed\"\n VALIDATION_ITERATION_STARTED = \"validation_iteration_started\"\n VALIDATION_ITERATION_COMPLETED = \"validation_iteration_completed\"\n EXCEPTION_RAISED = \"exception_raised\"\n\n\ndef _to_hours_mins_secs(time_taken):\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n return hours, mins, secs\n\n\nclass Trainer(object):\n \"\"\"\n Generic trainer class.\n\n Training update and validation functions receive batches of data and return values which will\n be stored in the `training_history` and `validation_history`. The trainer defines multiple\n events in `TrainingEvents` for which the user can attach event handlers to. The events get\n passed the trainer, so they can access the training/validation history\n\n\n Parameters\n ----------\n training_update_function : callable\n Update function receiving the current training batch in each iteration\n\n validation_inference_function : callable\n Function receiving data and performing a feed forward without update\n \"\"\"\n\n def __init__(self, training_update_function, validation_inference_function=None):\n\n self._logger = self._get_logger()\n self._training_update_function = training_update_function\n self._validation_inference_function = validation_inference_function\n self._event_handlers = {}\n\n self.training_history = History()\n self.validation_history = History()\n self.current_iteration = 0\n self.current_validation_iteration = 0\n self.current_epoch = 0\n self.max_epochs = 0\n self.should_terminate = False\n\n def _get_logger(self):\n logger = logging.getLogger(__name__ + \".\" + self.__class__.__name__)\n logger.addHandler(logging.NullHandler())\n return logger\n\n def add_event_handler(self, event_name, handler, *args, **kwargs):\n \"\"\"\n Add an event handler to be executed when the specified event is fired\n\n Parameters\n ----------\n event_name: enum\n event from ignite.trainer.TrainingEvents to attach the\n handler to\n handler: Callable\n the callable event handler that should be invoked\n args:\n optional args to be passed to `handler`\n kwargs:\n optional keyword args to be passed to `handler`\n\n Returns\n -------\n None\n \"\"\"\n if event_name not in TrainingEvents.__members__.values():\n self._logger.error(\"attempt to add event handler to non-existent event %s \",\n event_name)\n raise ValueError(\"Event {} not a valid training event\".format(event_name))\n\n if event_name not in self._event_handlers.keys():\n self._event_handlers[event_name] = []\n\n self._event_handlers[event_name].append((handler, args, kwargs))\n self._logger.debug(\"added handler for event % \", event_name)\n\n def _fire_event(self, event_name):\n if event_name in self._event_handlers.keys():\n self._logger.debug(\"firing handlers for event %s \", event_name)\n for func, args, kwargs in self._event_handlers[event_name]:\n func(self, *args, **kwargs)\n\n def _train_one_epoch(self, training_data):\n self._fire_event(TrainingEvents.TRAINING_EPOCH_STARTED)\n start_time = time.time()\n\n self.epoch_losses = []\n for _, batch in enumerate(training_data, 1):\n self._fire_event(TrainingEvents.TRAINING_ITERATION_STARTED)\n\n training_step_result = self._training_update_function(batch)\n if training_step_result is not None:\n self.training_history.append(training_step_result)\n\n self.current_iteration += 1\n\n self._fire_event(TrainingEvents.TRAINING_ITERATION_COMPLETED)\n if self.should_terminate:\n return\n\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n self._logger.info(\"Epoch[%s] Complete. Time taken: %02d:%02d:%02d\", self.current_epoch, hours,\n mins, secs)\n\n self._fire_event(TrainingEvents.TRAINING_EPOCH_COMPLETED)\n\n def validate(self, validation_data):\n \"\"\" Evaluates the validation set\"\"\"\n if self._validation_inference_function is None:\n raise ValueError(\"Trainer must have a validation_inference_function in order to validate\")\n\n self.current_validation_iteration = 0\n self._fire_event(TrainingEvents.VALIDATION_STARTING)\n start_time = time.time()\n\n for _, batch in enumerate(validation_data, 1):\n self._fire_event(TrainingEvents.VALIDATION_ITERATION_STARTED)\n validation_step_result = self._validation_inference_function(batch)\n if validation_step_result is not None:\n self.validation_history.append(validation_step_result)\n\n self.current_validation_iteration += 1\n self._fire_event(TrainingEvents.VALIDATION_ITERATION_COMPLETED)\n if self.should_terminate:\n break\n\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n self._logger.info(\"Validation Complete. Time taken: %02d:%02d:%02d\", hours, mins, secs)\n\n self._fire_event(TrainingEvents.VALIDATION_COMPLETED)\n\n def terminate(self):\n \"\"\"\n Sends terminate signal to trainer, so that training terminates after the current iteration\n \"\"\"\n self._logger.info(\"Terminate signaled to trainer. \" +\n \"Training will stop after current iteration is finished\")\n self.should_terminate = True\n\n def run(self, training_data, max_epochs=1):\n \"\"\"\n Train the model, evaluate the validation set and update best parameters if the validation loss\n improves.\n In the event that the validation set is not run (or doesn't exist), the training loss is used\n to update the best parameters.\n\n Parameters\n ----------\n training_data : Iterable\n Collection of training batches allowing repeated iteration (e.g., list or DataLoader)\n max_epochs: int, optional\n max epochs to train for [default=1]\n\n Returns\n -------\n None\n \"\"\"\n\n try:\n self._logger.info(\"Training starting with max_epochs={}\".format(max_epochs))\n\n self.max_epochs = max_epochs\n\n start_time = time.time()\n\n self._fire_event(TrainingEvents.TRAINING_STARTED)\n while self.current_epoch < max_epochs and not self.should_terminate:\n self._fire_event(TrainingEvents.EPOCH_STARTED)\n self._train_one_epoch(training_data)\n if self.should_terminate:\n break\n\n self._fire_event(TrainingEvents.EPOCH_COMPLETED)\n self.current_epoch += 1\n\n self._fire_event(TrainingEvents.TRAINING_COMPLETED)\n time_taken = time.time() - start_time\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n self._logger.info(\"Training complete. Time taken %02d:%02d:%02d\" % (hours, mins, secs))\n except BaseException as e:\n self._logger.error(\"Training is terminating due to exception: %s\", str(e))\n self._fire_event(TrainingEvents.EXCEPTION_RAISED)\n raise e\n", "path": "ignite/trainer/trainer.py"}]} | 2,809 | 470 |
gh_patches_debug_8615 | rasdani/github-patches | git_diff | secdev__scapy-373 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
unknown.version in egg.info and in the banner
Hi,
I'm porting 2.3.3 to OpenBSD. I had two issues:
1. after the install with setuptools, the .egg-info generated is called 'lib/python2.7/site-packages/scapy-unknown.version-py2.7.egg-info'
I patched setup.py to hardcode the version then it worked
```
--- setup.py.orig Tue Oct 18 10:44:43 2016
+++ setup.py Mon Oct 31 17:19:45 2016
@@ -47,7 +47,7 @@ if os.name == "nt":
setup(
name='scapy',
- version=__import__('scapy').VERSION,
+ version='2.3.3',
packages=[
'scapy',
'scapy/arch',
```
I now have lib/python2.7/site-packages/scapy-2.3.3-py2.7.egg-info
2. running scapy it says "Welcome to Scapy (unknown.version)" even with the setup.py change. I went through scapy-2.3.3/scapy/main.py but I didn't find anything suspicious.
</issue>
<code>
[start of scapy/__init__.py]
1 ## This file is part of Scapy
2 ## See http://www.secdev.org/projects/scapy for more informations
3 ## Copyright (C) Philippe Biondi <[email protected]>
4 ## This program is published under a GPLv2 license
5
6 """
7 Scapy: create, send, sniff, dissect and manipulate network packets.
8
9 Usable either from an interactive console or as a Python library.
10 http://www.secdev.org/projects/scapy
11 """
12
13 import os
14 import re
15 import subprocess
16
17
18 _SCAPY_PKG_DIR = os.path.dirname(__file__)
19
20 def _version_from_git_describe():
21 """
22 Read the version from ``git describe``. It returns the latest tag with an
23 optional suffix if the current directory is not exactly on the tag.
24
25 Example::
26
27 $ git describe --always
28 v2.3.2-346-g164a52c075c8
29
30 The tag prefix (``v``) and the git commit sha1 (``-g164a52c075c8``) are
31 removed if present.
32
33 If the current directory is not exactly on the tag, a ``.devN`` suffix is
34 appended where N is the number of commits made after the last tag.
35
36 Example::
37
38 >>> _version_from_git_describe()
39 '2.3.2.dev346'
40 """
41 p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,
42 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
43
44 out, err = p.communicate()
45
46 if p.returncode == 0:
47 tag = out.strip()
48 match = re.match(r'^v?(.+?)-(\d+)-g[a-f0-9]+$', tag)
49 if match:
50 # remove the 'v' prefix and add a '.devN' suffix
51 return '%s.dev%s' % (match.group(1), match.group(2))
52 else:
53 # just remove the 'v' prefix
54 return re.sub(r'^v', '', tag)
55 else:
56 raise subprocess.CalledProcessError(p.returncode, err)
57
58 def _version():
59 version_file = os.path.join(_SCAPY_PKG_DIR, 'VERSION')
60 try:
61 tag = _version_from_git_describe()
62 # successfully read the tag from git, write it in VERSION for
63 # installation and/or archive generation.
64 with open(version_file, 'w') as f:
65 f.write(tag)
66 return tag
67 except:
68 # failed to read the tag from git, try to read it from a VERSION file
69 try:
70 with open(version_file, 'r') as f:
71 tag = f.read()
72 return tag
73 except:
74 return 'unknown.version'
75
76 VERSION = _version()
77
78 if __name__ == "__main__":
79 from scapy.main import interact
80 interact()
81
[end of scapy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scapy/__init__.py b/scapy/__init__.py
--- a/scapy/__init__.py
+++ b/scapy/__init__.py
@@ -71,7 +71,17 @@
tag = f.read()
return tag
except:
- return 'unknown.version'
+ # Rely on git archive "export-subst" git attribute.
+ # See 'man gitattributes' for more details.
+ git_archive_id = '$Format:%h %d$'
+ sha1 = git_archive_id.strip().split()[0]
+ match = re.search(r'tag:(\S+)', git_archive_id)
+ if match:
+ return match.group(1)
+ elif sha1:
+ return sha1
+ else:
+ return 'unknown.version'
VERSION = _version()
| {"golden_diff": "diff --git a/scapy/__init__.py b/scapy/__init__.py\n--- a/scapy/__init__.py\n+++ b/scapy/__init__.py\n@@ -71,7 +71,17 @@\n tag = f.read()\n return tag\n except:\n- return 'unknown.version'\n+ # Rely on git archive \"export-subst\" git attribute.\n+ # See 'man gitattributes' for more details.\n+ git_archive_id = '$Format:%h %d$'\n+ sha1 = git_archive_id.strip().split()[0]\n+ match = re.search(r'tag:(\\S+)', git_archive_id)\n+ if match:\n+ return match.group(1)\n+ elif sha1:\n+ return sha1\n+ else:\n+ return 'unknown.version'\n \n VERSION = _version()\n", "issue": "unknown.version in egg.info and in the banner\nHi,\r\n\r\nI'm porting 2.3.3 to OpenBSD. I had two issues:\r\n1. after the install with setuptools, the .egg-info generated is called 'lib/python2.7/site-packages/scapy-unknown.version-py2.7.egg-info'\r\n\r\nI patched setup.py to hardcode the version then it worked\r\n```\r\n--- setup.py.orig Tue Oct 18 10:44:43 2016\r\n+++ setup.py Mon Oct 31 17:19:45 2016\r\n@@ -47,7 +47,7 @@ if os.name == \"nt\":\r\n \r\n setup(\r\n name='scapy',\r\n- version=__import__('scapy').VERSION,\r\n+ version='2.3.3',\r\n packages=[\r\n 'scapy',\r\n 'scapy/arch',\r\n\r\n```\r\nI now have lib/python2.7/site-packages/scapy-2.3.3-py2.7.egg-info\r\n\r\n2. running scapy it says \"Welcome to Scapy (unknown.version)\" even with the setup.py change. I went through scapy-2.3.3/scapy/main.py but I didn't find anything suspicious.\n", "before_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nScapy: create, send, sniff, dissect and manipulate network packets.\n\nUsable either from an interactive console or as a Python library.\nhttp://www.secdev.org/projects/scapy\n\"\"\"\n\nimport os\nimport re\nimport subprocess\n\n\n_SCAPY_PKG_DIR = os.path.dirname(__file__)\n\ndef _version_from_git_describe():\n \"\"\"\n Read the version from ``git describe``. It returns the latest tag with an\n optional suffix if the current directory is not exactly on the tag.\n\n Example::\n\n $ git describe --always\n v2.3.2-346-g164a52c075c8\n\n The tag prefix (``v``) and the git commit sha1 (``-g164a52c075c8``) are\n removed if present.\n\n If the current directory is not exactly on the tag, a ``.devN`` suffix is\n appended where N is the number of commits made after the last tag.\n\n Example::\n\n >>> _version_from_git_describe()\n '2.3.2.dev346'\n \"\"\"\n p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out, err = p.communicate()\n\n if p.returncode == 0:\n tag = out.strip()\n match = re.match(r'^v?(.+?)-(\\d+)-g[a-f0-9]+$', tag)\n if match:\n # remove the 'v' prefix and add a '.devN' suffix\n return '%s.dev%s' % (match.group(1), match.group(2))\n else:\n # just remove the 'v' prefix\n return re.sub(r'^v', '', tag)\n else:\n raise subprocess.CalledProcessError(p.returncode, err)\n\ndef _version():\n version_file = os.path.join(_SCAPY_PKG_DIR, 'VERSION')\n try:\n tag = _version_from_git_describe()\n # successfully read the tag from git, write it in VERSION for\n # installation and/or archive generation.\n with open(version_file, 'w') as f:\n f.write(tag)\n return tag\n except:\n # failed to read the tag from git, try to read it from a VERSION file\n try:\n with open(version_file, 'r') as f:\n tag = f.read()\n return tag\n except:\n return 'unknown.version'\n\nVERSION = _version()\n\nif __name__ == \"__main__\":\n from scapy.main import interact\n interact()\n", "path": "scapy/__init__.py"}]} | 1,590 | 186 |
gh_patches_debug_24772 | rasdani/github-patches | git_diff | Flexget__Flexget-548 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[add] jinja split: Adds split into jinja filters
Sorry about this, I found the solution
</issue>
<code>
[start of flexget/plugins/input/trakt_emit.py]
1 from __future__ import unicode_literals, division, absolute_import
2 import hashlib
3 import logging
4 from urlparse import urljoin
5
6 from requests import RequestException
7
8 from flexget import plugin
9 from flexget.entry import Entry
10 from flexget.event import event
11 from flexget.utils import json
12 from flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url
13
14 log = logging.getLogger('trakt_emit')
15
16
17 class TraktEmit(object):
18 """
19 Creates an entry for the latest or the next item in your watched or collected
20 episodes in your trakt account.
21
22 Syntax:
23
24 trakt_emit:
25 username: <value>
26 position: <last|next>
27 context: <collect|collected|watch|watched>
28 list: <value>
29
30 Options username, password and api_key are required.
31
32 """
33
34 schema = {
35 'type': 'object',
36 'properties': {
37 'username': {'type': 'string'},
38 'password': {'type': 'string'},
39 'position': {'type': 'string', 'enum': ['last', 'next'], 'default': 'next'},
40 'context': {'type': 'string', 'enum': ['watched', 'collected'], 'default': 'watched'},
41 'list': {'type': 'string'}
42 },
43 'required': ['username'],
44 'additionalProperties': False
45 }
46
47 def on_task_input(self, task, config):
48 session = get_session(config['username'], config.get('password'))
49 listed_series = {}
50 if config.get('list'):
51 url = urljoin(API_URL, 'users/%s/' % config['username'])
52 if config['list'] in ['collection', 'watchlist', 'watched']:
53 url = urljoin(url, '%s/shows' % config['list'])
54 else:
55 url = urljoin(url, 'lists/%s/items' % make_list_slug(config['list']))
56 try:
57 data = session.get(url).json()
58 except RequestException as e:
59 raise plugin.PluginError('Unable to get trakt list `%s`: %s' % (config['list'], e))
60 if not data:
61 log.warning('The list "%s" is empty.' % config['list'])
62 return
63 for item in data:
64 if item['show'] is not None:
65 if not item['show']['title']:
66 # Seems we can get entries with a blank show title sometimes
67 log.warning('Found trakt list show with no series name.')
68 continue
69 trakt_id = item['show']['ids']['trakt']
70 listed_series[trakt_id] = {
71 'series_name': item['show']['title'],
72 'trakt_id': trakt_id,
73 'tvdb_id': item['show']['ids']['tvdb']}
74 context = config['context']
75 if context == 'collected':
76 context = 'collection'
77 entries = []
78 for trakt_id, fields in listed_series.iteritems():
79 url = get_api_url('shows', trakt_id, 'progress', context)
80 try:
81 data = session.get(url).json()
82 except RequestException as e:
83 raise plugin.PluginError('TODO: error message')
84 if config['position'] == 'next' and data.get('next_episode'):
85 # If the next episode is already in the trakt database, we'll get it here
86 eps = data['next_episode']['season']
87 epn = data['next_episode']['number']
88 else:
89 # If we need last ep, or next_episode was not provided, search for last ep
90 for seas in reversed(data['seasons']):
91 # Find the first season with collected/watched episodes
92 if seas['completed'] > 0:
93 eps = seas['number']
94 # Pick the highest collected/watched episode
95 epn = max(item['number'] for item in seas['episodes'] if item['completed'])
96 # If we are in next episode mode, we have to increment this number
97 if config['position'] == 'next':
98 if seas['completed'] >= seas['aired']:
99 # TODO: next_episode doesn't count unaired episodes right now, this will skip to next
100 # season too early when there are episodes left to air this season.
101 eps += 1
102 epn = 1
103 else:
104 epn += 1
105 break
106 if eps and epn:
107 entry = self.make_entry(fields, eps, epn)
108 entries.append(entry)
109 return entries
110
111 def make_entry(self, fields, season, episode):
112 entry = Entry()
113 entry.update(fields)
114 entry['series_season'] = season
115 entry['series_episode'] = episode
116 entry['series_id_type'] = 'ep'
117 entry['series_id'] = 'S%02dE%02d' % (season, episode)
118 entry['title'] = entry['series_name'] + ' ' + entry['series_id']
119 entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (fields['trakt_id'], season, episode)
120 return entry
121
122
123 @event('plugin.register')
124 def register_plugin():
125 plugin.register(TraktEmit, 'trakt_emit', api_ver=2)
126
[end of flexget/plugins/input/trakt_emit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/input/trakt_emit.py b/flexget/plugins/input/trakt_emit.py
--- a/flexget/plugins/input/trakt_emit.py
+++ b/flexget/plugins/input/trakt_emit.py
@@ -1,5 +1,4 @@
from __future__ import unicode_literals, division, absolute_import
-import hashlib
import logging
from urlparse import urljoin
@@ -8,7 +7,6 @@
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
-from flexget.utils import json
from flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url
log = logging.getLogger('trakt_emit')
@@ -103,6 +101,12 @@
else:
epn += 1
break
+ else:
+ if config['position'] == 'next':
+ eps = epn = 1
+ else:
+ # There were no watched/collected episodes, nothing to emit in 'last' mode
+ continue
if eps and epn:
entry = self.make_entry(fields, eps, epn)
entries.append(entry)
| {"golden_diff": "diff --git a/flexget/plugins/input/trakt_emit.py b/flexget/plugins/input/trakt_emit.py\n--- a/flexget/plugins/input/trakt_emit.py\n+++ b/flexget/plugins/input/trakt_emit.py\n@@ -1,5 +1,4 @@\n from __future__ import unicode_literals, division, absolute_import\n-import hashlib\n import logging\n from urlparse import urljoin\n \n@@ -8,7 +7,6 @@\n from flexget import plugin\n from flexget.entry import Entry\n from flexget.event import event\n-from flexget.utils import json\n from flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url\n \n log = logging.getLogger('trakt_emit')\n@@ -103,6 +101,12 @@\n else:\n epn += 1\n break\n+ else:\n+ if config['position'] == 'next':\n+ eps = epn = 1\n+ else:\n+ # There were no watched/collected episodes, nothing to emit in 'last' mode\n+ continue\n if eps and epn:\n entry = self.make_entry(fields, eps, epn)\n entries.append(entry)\n", "issue": "[add] jinja split: Adds split into jinja filters\nSorry about this, I found the solution\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nimport hashlib\nimport logging\nfrom urlparse import urljoin\n\nfrom requests import RequestException\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils import json\nfrom flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url\n\nlog = logging.getLogger('trakt_emit')\n\n\nclass TraktEmit(object):\n \"\"\"\n Creates an entry for the latest or the next item in your watched or collected\n episodes in your trakt account.\n\n Syntax:\n\n trakt_emit:\n username: <value>\n position: <last|next>\n context: <collect|collected|watch|watched>\n list: <value>\n\n Options username, password and api_key are required.\n\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'position': {'type': 'string', 'enum': ['last', 'next'], 'default': 'next'},\n 'context': {'type': 'string', 'enum': ['watched', 'collected'], 'default': 'watched'},\n 'list': {'type': 'string'}\n },\n 'required': ['username'],\n 'additionalProperties': False\n }\n\n def on_task_input(self, task, config):\n session = get_session(config['username'], config.get('password'))\n listed_series = {}\n if config.get('list'):\n url = urljoin(API_URL, 'users/%s/' % config['username'])\n if config['list'] in ['collection', 'watchlist', 'watched']:\n url = urljoin(url, '%s/shows' % config['list'])\n else:\n url = urljoin(url, 'lists/%s/items' % make_list_slug(config['list']))\n try:\n data = session.get(url).json()\n except RequestException as e:\n raise plugin.PluginError('Unable to get trakt list `%s`: %s' % (config['list'], e))\n if not data:\n log.warning('The list \"%s\" is empty.' % config['list'])\n return\n for item in data:\n if item['show'] is not None:\n if not item['show']['title']:\n # Seems we can get entries with a blank show title sometimes\n log.warning('Found trakt list show with no series name.')\n continue\n trakt_id = item['show']['ids']['trakt']\n listed_series[trakt_id] = {\n 'series_name': item['show']['title'],\n 'trakt_id': trakt_id,\n 'tvdb_id': item['show']['ids']['tvdb']}\n context = config['context']\n if context == 'collected':\n context = 'collection'\n entries = []\n for trakt_id, fields in listed_series.iteritems():\n url = get_api_url('shows', trakt_id, 'progress', context)\n try:\n data = session.get(url).json()\n except RequestException as e:\n raise plugin.PluginError('TODO: error message')\n if config['position'] == 'next' and data.get('next_episode'):\n # If the next episode is already in the trakt database, we'll get it here\n eps = data['next_episode']['season']\n epn = data['next_episode']['number']\n else:\n # If we need last ep, or next_episode was not provided, search for last ep\n for seas in reversed(data['seasons']):\n # Find the first season with collected/watched episodes\n if seas['completed'] > 0:\n eps = seas['number']\n # Pick the highest collected/watched episode\n epn = max(item['number'] for item in seas['episodes'] if item['completed'])\n # If we are in next episode mode, we have to increment this number\n if config['position'] == 'next':\n if seas['completed'] >= seas['aired']:\n # TODO: next_episode doesn't count unaired episodes right now, this will skip to next\n # season too early when there are episodes left to air this season.\n eps += 1\n epn = 1\n else:\n epn += 1\n break\n if eps and epn:\n entry = self.make_entry(fields, eps, epn)\n entries.append(entry)\n return entries\n\n def make_entry(self, fields, season, episode):\n entry = Entry()\n entry.update(fields)\n entry['series_season'] = season\n entry['series_episode'] = episode\n entry['series_id_type'] = 'ep'\n entry['series_id'] = 'S%02dE%02d' % (season, episode)\n entry['title'] = entry['series_name'] + ' ' + entry['series_id']\n entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (fields['trakt_id'], season, episode)\n return entry\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(TraktEmit, 'trakt_emit', api_ver=2)\n", "path": "flexget/plugins/input/trakt_emit.py"}]} | 1,976 | 257 |
gh_patches_debug_918 | rasdani/github-patches | git_diff | vas3k__vas3k.club-260 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Сломался check_PR экшн на новые пуллреквесты
Вот здесь все пошло не так после пары изменений в requirements и докерфайлах: https://github.com/vas3k/vas3k.club/blob/master/.github/workflows/CI.yml
Из-за этого все новые пуллреквесты красненькие и мержить их приходится только суровой админской рукой. Надо бы переосмыслить этот CI как-нибудь. У кого есть идеи?
По сути мне важны линтеры и чтобы докер с новым кодом успешно поднимался. Остального пока нет.
</issue>
<code>
[start of utils/images.py]
1 import io
2 import logging
3 import os
4 from urllib.parse import urlparse
5
6 import requests
7 from PIL import Image
8 from django.conf import settings
9
10 log = logging.getLogger(__name__)
11
12
13 def upload_image_bytes(
14 filename, data, resize=(192, 192), convert_to=None, quality=None
15 ):
16 if not data:
17 return None
18
19 if resize:
20 try:
21 image = Image.open(data)
22 except Exception as ex:
23 log.warning(f"Bad image data: {ex}")
24 return None
25
26 image.thumbnail(resize)
27 saved_image = io.BytesIO()
28 saved_image.name = filename
29
30 try:
31 image.save(saved_image)
32 except OSError:
33 log.warning(f"Error saving image data: {ex}")
34 return None
35
36 data = saved_image.getvalue()
37
38 upload_params = {
39 "code": settings.MEDIA_UPLOAD_CODE
40 }
41
42 if convert_to:
43 upload_params["convert_to"] = convert_to
44
45 if quality:
46 upload_params["quality"] = quality
47
48 try:
49 uploaded = requests.post(
50 url=settings.MEDIA_UPLOAD_URL,
51 params=upload_params,
52 files={"media": (filename, data)},
53 )
54 except requests.exceptions.RequestException as ex:
55 log.error(f"Image upload error: {ex}")
56 return None
57
58 if 200 <= uploaded.status_code <= 299:
59 try:
60 response_data = uploaded.json()
61 except Exception as ex:
62 log.error(f"Image upload error: {ex} ({uploaded.content})")
63 return None
64
65 return response_data["uploaded"][0]
66
67 return None
68
69
70 def upload_image_from_url(url, resize=(192, 192), convert_to="jpg", quality=90):
71 if settings.DEBUG or not settings.MEDIA_UPLOAD_URL or not settings.MEDIA_UPLOAD_CODE:
72 return url
73
74 if not url:
75 return None
76
77 image_name = os.path.basename(urlparse(url).path)
78 if "." not in image_name:
79 image_name += ".jpg"
80
81 try:
82 image_data = io.BytesIO(requests.get(url).content)
83 except requests.exceptions.RequestException:
84 return None
85
86 return upload_image_bytes(image_name, image_data, resize=resize, convert_to=convert_to, quality=quality)
87
[end of utils/images.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/images.py b/utils/images.py
--- a/utils/images.py
+++ b/utils/images.py
@@ -29,7 +29,7 @@
try:
image.save(saved_image)
- except OSError:
+ except OSError as ex:
log.warning(f"Error saving image data: {ex}")
return None
| {"golden_diff": "diff --git a/utils/images.py b/utils/images.py\n--- a/utils/images.py\n+++ b/utils/images.py\n@@ -29,7 +29,7 @@\n \n try:\n image.save(saved_image)\n- except OSError:\n+ except OSError as ex:\n log.warning(f\"Error saving image data: {ex}\")\n return None\n", "issue": "\u0421\u043b\u043e\u043c\u0430\u043b\u0441\u044f check_PR \u044d\u043a\u0448\u043d \u043d\u0430 \u043d\u043e\u0432\u044b\u0435 \u043f\u0443\u043b\u043b\u0440\u0435\u043a\u0432\u0435\u0441\u0442\u044b\n\u0412\u043e\u0442 \u0437\u0434\u0435\u0441\u044c \u0432\u0441\u0435 \u043f\u043e\u0448\u043b\u043e \u043d\u0435 \u0442\u0430\u043a \u043f\u043e\u0441\u043b\u0435 \u043f\u0430\u0440\u044b \u0438\u0437\u043c\u0435\u043d\u0435\u043d\u0438\u0439 \u0432 requirements \u0438 \u0434\u043e\u043a\u0435\u0440\u0444\u0430\u0439\u043b\u0430\u0445: https://github.com/vas3k/vas3k.club/blob/master/.github/workflows/CI.yml\r\n\r\n\u0418\u0437-\u0437\u0430 \u044d\u0442\u043e\u0433\u043e \u0432\u0441\u0435 \u043d\u043e\u0432\u044b\u0435 \u043f\u0443\u043b\u043b\u0440\u0435\u043a\u0432\u0435\u0441\u0442\u044b \u043a\u0440\u0430\u0441\u043d\u0435\u043d\u044c\u043a\u0438\u0435 \u0438 \u043c\u0435\u0440\u0436\u0438\u0442\u044c \u0438\u0445 \u043f\u0440\u0438\u0445\u043e\u0434\u0438\u0442\u0441\u044f \u0442\u043e\u043b\u044c\u043a\u043e \u0441\u0443\u0440\u043e\u0432\u043e\u0439 \u0430\u0434\u043c\u0438\u043d\u0441\u043a\u043e\u0439 \u0440\u0443\u043a\u043e\u0439. \u041d\u0430\u0434\u043e \u0431\u044b \u043f\u0435\u0440\u0435\u043e\u0441\u043c\u044b\u0441\u043b\u0438\u0442\u044c \u044d\u0442\u043e\u0442 CI \u043a\u0430\u043a-\u043d\u0438\u0431\u0443\u0434\u044c. \u0423 \u043a\u043e\u0433\u043e \u0435\u0441\u0442\u044c \u0438\u0434\u0435\u0438?\r\n\r\n\u041f\u043e \u0441\u0443\u0442\u0438 \u043c\u043d\u0435 \u0432\u0430\u0436\u043d\u044b \u043b\u0438\u043d\u0442\u0435\u0440\u044b \u0438 \u0447\u0442\u043e\u0431\u044b \u0434\u043e\u043a\u0435\u0440 \u0441 \u043d\u043e\u0432\u044b\u043c \u043a\u043e\u0434\u043e\u043c \u0443\u0441\u043f\u0435\u0448\u043d\u043e \u043f\u043e\u0434\u043d\u0438\u043c\u0430\u043b\u0441\u044f. \u041e\u0441\u0442\u0430\u043b\u044c\u043d\u043e\u0433\u043e \u043f\u043e\u043a\u0430 \u043d\u0435\u0442.\n", "before_files": [{"content": "import io\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nimport requests\nfrom PIL import Image\nfrom django.conf import settings\n\nlog = logging.getLogger(__name__)\n\n\ndef upload_image_bytes(\n filename, data, resize=(192, 192), convert_to=None, quality=None\n):\n if not data:\n return None\n\n if resize:\n try:\n image = Image.open(data)\n except Exception as ex:\n log.warning(f\"Bad image data: {ex}\")\n return None\n\n image.thumbnail(resize)\n saved_image = io.BytesIO()\n saved_image.name = filename\n\n try:\n image.save(saved_image)\n except OSError:\n log.warning(f\"Error saving image data: {ex}\")\n return None\n\n data = saved_image.getvalue()\n\n upload_params = {\n \"code\": settings.MEDIA_UPLOAD_CODE\n }\n\n if convert_to:\n upload_params[\"convert_to\"] = convert_to\n\n if quality:\n upload_params[\"quality\"] = quality\n\n try:\n uploaded = requests.post(\n url=settings.MEDIA_UPLOAD_URL,\n params=upload_params,\n files={\"media\": (filename, data)},\n )\n except requests.exceptions.RequestException as ex:\n log.error(f\"Image upload error: {ex}\")\n return None\n\n if 200 <= uploaded.status_code <= 299:\n try:\n response_data = uploaded.json()\n except Exception as ex:\n log.error(f\"Image upload error: {ex} ({uploaded.content})\")\n return None\n\n return response_data[\"uploaded\"][0]\n\n return None\n\n\ndef upload_image_from_url(url, resize=(192, 192), convert_to=\"jpg\", quality=90):\n if settings.DEBUG or not settings.MEDIA_UPLOAD_URL or not settings.MEDIA_UPLOAD_CODE:\n return url\n\n if not url:\n return None\n\n image_name = os.path.basename(urlparse(url).path)\n if \".\" not in image_name:\n image_name += \".jpg\"\n\n try:\n image_data = io.BytesIO(requests.get(url).content)\n except requests.exceptions.RequestException:\n return None\n\n return upload_image_bytes(image_name, image_data, resize=resize, convert_to=convert_to, quality=quality)\n", "path": "utils/images.py"}]} | 1,349 | 75 |
gh_patches_debug_3052 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-1486 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The type of `n_gram` is mislabeled as bool, which should be int type.
## 🐛 Bug
In Translation Task:
The type of `n_gram` is mislabeled as bool, which should be int type.
### To Reproduce
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
```
flash translation from_hf_datasets --help
```
The error raised:
```
translation: error: Configuration check failed :: Parser key "model.n_gram": Expected a <class 'bool'> but got "4"
```
</issue>
<code>
[start of flash/text/seq2seq/translation/model.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Dict, Optional, Union
15
16 from torchmetrics import BLEUScore
17
18 from flash.core.utilities.imports import _TM_GREATER_EQUAL_0_7_0
19 from flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE
20 from flash.text.seq2seq.core.model import Seq2SeqTask
21
22
23 class TranslationTask(Seq2SeqTask):
24 """The ``TranslationTask`` is a :class:`~flash.Task` for Seq2Seq text translation. For more details, see
25 :ref:`translation`.
26
27 You can change the backbone to any translation model from `HuggingFace/transformers
28 <https://huggingface.co/models?filter=pytorch&pipeline_tag=translation>`__ using the ``backbone`` argument.
29
30 Args:
31 backbone: backbone model to use for the task.
32 max_source_length: The maximum length to pad / truncate input sequences to.
33 max_target_length: The maximum length to pad / truncate target sequences to.
34 padding: The type of padding to apply. One of: "longest" or ``True``, "max_length", "do_not_pad" or
35 ``False``.
36 loss_fn: Loss function for training.
37 optimizer: Optimizer to use for training.
38 lr_scheduler: The LR scheduler to use during training.
39 metrics: Metrics to compute for training and evaluation. Defauls to calculating the BLEU metric.
40 Changing this argument currently has no effect.
41 learning_rate: Learning rate to use for training, defaults to `1e-5`
42 num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`
43 n_gram: Maximum n_grams to use in metric calculation. Defaults to `4`
44 smooth: Apply smoothing in BLEU calculation. Defaults to `True`
45 enable_ort: Enable Torch ONNX Runtime Optimization: https://onnxruntime.ai/docs/#onnx-runtime-for-training
46 """
47
48 def __init__(
49 self,
50 backbone: str = "t5-small",
51 tokenizer_kwargs: Optional[Dict[str, Any]] = None,
52 max_source_length: int = 128,
53 max_target_length: int = 128,
54 padding: Union[str, bool] = "max_length",
55 loss_fn: LOSS_FN_TYPE = None,
56 optimizer: OPTIMIZER_TYPE = "Adam",
57 lr_scheduler: LR_SCHEDULER_TYPE = None,
58 metrics: METRICS_TYPE = None,
59 learning_rate: Optional[float] = None,
60 num_beams: Optional[int] = 4,
61 n_gram: bool = 4,
62 smooth: bool = True,
63 enable_ort: bool = False,
64 ):
65 self.save_hyperparameters()
66 super().__init__(
67 backbone=backbone,
68 tokenizer_kwargs=tokenizer_kwargs,
69 max_source_length=max_source_length,
70 max_target_length=max_target_length,
71 padding=padding,
72 loss_fn=loss_fn,
73 optimizer=optimizer,
74 lr_scheduler=lr_scheduler,
75 metrics=metrics,
76 learning_rate=learning_rate,
77 num_beams=num_beams,
78 enable_ort=enable_ort,
79 )
80 self.bleu = BLEUScore(
81 n_gram=n_gram,
82 smooth=smooth,
83 )
84
85 @property
86 def task(self) -> str:
87 return "translation"
88
89 def compute_metrics(self, generated_tokens, batch, prefix):
90 reference_corpus = self.decode(batch["labels"])
91 # wrap targets in list as score expects a list of potential references
92 reference_corpus = [[reference] for reference in reference_corpus]
93
94 translate_corpus = self.decode(generated_tokens)
95 translate_corpus = [line for line in translate_corpus]
96
97 if _TM_GREATER_EQUAL_0_7_0:
98 result = self.bleu(translate_corpus, reference_corpus)
99 else:
100 result = self.bleu(reference_corpus, translate_corpus)
101 self.log(f"{prefix}_bleu_score", result, on_step=False, on_epoch=True, prog_bar=True)
102
[end of flash/text/seq2seq/translation/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flash/text/seq2seq/translation/model.py b/flash/text/seq2seq/translation/model.py
--- a/flash/text/seq2seq/translation/model.py
+++ b/flash/text/seq2seq/translation/model.py
@@ -58,7 +58,7 @@
metrics: METRICS_TYPE = None,
learning_rate: Optional[float] = None,
num_beams: Optional[int] = 4,
- n_gram: bool = 4,
+ n_gram: int = 4,
smooth: bool = True,
enable_ort: bool = False,
):
| {"golden_diff": "diff --git a/flash/text/seq2seq/translation/model.py b/flash/text/seq2seq/translation/model.py\n--- a/flash/text/seq2seq/translation/model.py\n+++ b/flash/text/seq2seq/translation/model.py\n@@ -58,7 +58,7 @@\n metrics: METRICS_TYPE = None,\n learning_rate: Optional[float] = None,\n num_beams: Optional[int] = 4,\n- n_gram: bool = 4,\n+ n_gram: int = 4,\n smooth: bool = True,\n enable_ort: bool = False,\n ):\n", "issue": "The type of `n_gram` is mislabeled as bool, which should be int type.\n## \ud83d\udc1b Bug\r\n\r\nIn Translation Task:\r\nThe type of `n_gram` is mislabeled as bool, which should be int type.\r\n\r\n### To Reproduce\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n```\r\nflash translation from_hf_datasets --help\r\n```\r\nThe error raised:\r\n```\r\ntranslation: error: Configuration check failed :: Parser key \"model.n_gram\": Expected a <class 'bool'> but got \"4\"\r\n```\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional, Union\n\nfrom torchmetrics import BLEUScore\n\nfrom flash.core.utilities.imports import _TM_GREATER_EQUAL_0_7_0\nfrom flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE\nfrom flash.text.seq2seq.core.model import Seq2SeqTask\n\n\nclass TranslationTask(Seq2SeqTask):\n \"\"\"The ``TranslationTask`` is a :class:`~flash.Task` for Seq2Seq text translation. For more details, see\n :ref:`translation`.\n\n You can change the backbone to any translation model from `HuggingFace/transformers\n <https://huggingface.co/models?filter=pytorch&pipeline_tag=translation>`__ using the ``backbone`` argument.\n\n Args:\n backbone: backbone model to use for the task.\n max_source_length: The maximum length to pad / truncate input sequences to.\n max_target_length: The maximum length to pad / truncate target sequences to.\n padding: The type of padding to apply. One of: \"longest\" or ``True``, \"max_length\", \"do_not_pad\" or\n ``False``.\n loss_fn: Loss function for training.\n optimizer: Optimizer to use for training.\n lr_scheduler: The LR scheduler to use during training.\n metrics: Metrics to compute for training and evaluation. Defauls to calculating the BLEU metric.\n Changing this argument currently has no effect.\n learning_rate: Learning rate to use for training, defaults to `1e-5`\n num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`\n n_gram: Maximum n_grams to use in metric calculation. Defaults to `4`\n smooth: Apply smoothing in BLEU calculation. Defaults to `True`\n enable_ort: Enable Torch ONNX Runtime Optimization: https://onnxruntime.ai/docs/#onnx-runtime-for-training\n \"\"\"\n\n def __init__(\n self,\n backbone: str = \"t5-small\",\n tokenizer_kwargs: Optional[Dict[str, Any]] = None,\n max_source_length: int = 128,\n max_target_length: int = 128,\n padding: Union[str, bool] = \"max_length\",\n loss_fn: LOSS_FN_TYPE = None,\n optimizer: OPTIMIZER_TYPE = \"Adam\",\n lr_scheduler: LR_SCHEDULER_TYPE = None,\n metrics: METRICS_TYPE = None,\n learning_rate: Optional[float] = None,\n num_beams: Optional[int] = 4,\n n_gram: bool = 4,\n smooth: bool = True,\n enable_ort: bool = False,\n ):\n self.save_hyperparameters()\n super().__init__(\n backbone=backbone,\n tokenizer_kwargs=tokenizer_kwargs,\n max_source_length=max_source_length,\n max_target_length=max_target_length,\n padding=padding,\n loss_fn=loss_fn,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n metrics=metrics,\n learning_rate=learning_rate,\n num_beams=num_beams,\n enable_ort=enable_ort,\n )\n self.bleu = BLEUScore(\n n_gram=n_gram,\n smooth=smooth,\n )\n\n @property\n def task(self) -> str:\n return \"translation\"\n\n def compute_metrics(self, generated_tokens, batch, prefix):\n reference_corpus = self.decode(batch[\"labels\"])\n # wrap targets in list as score expects a list of potential references\n reference_corpus = [[reference] for reference in reference_corpus]\n\n translate_corpus = self.decode(generated_tokens)\n translate_corpus = [line for line in translate_corpus]\n\n if _TM_GREATER_EQUAL_0_7_0:\n result = self.bleu(translate_corpus, reference_corpus)\n else:\n result = self.bleu(reference_corpus, translate_corpus)\n self.log(f\"{prefix}_bleu_score\", result, on_step=False, on_epoch=True, prog_bar=True)\n", "path": "flash/text/seq2seq/translation/model.py"}]} | 1,862 | 139 |
gh_patches_debug_28402 | rasdani/github-patches | git_diff | dask__distributed-416 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Distributed.joblib code fails to affect sklearn
The comments in [this stackoverflow answer](http://stackoverflow.com/questions/38601026/easy-way-to-use-parallel-options-of-scikit-learn-functions-on-hpc/38814491#38814491) raise concerns about the effectiveness of `distributed.joblib` to parallelize vanilla sklearn code. It appears that sklearn ships with its own version of Joblib, which the plugin registration in `distributed.joblib` does not affect.
It would be good to test sklearn functionality and, if necessary, add plugin registration to `sklearn.externals.joblib` in the same way we do to normal `joblib`.
</issue>
<code>
[start of distributed/joblib.py]
1 from __future__ import print_function, division, absolute_import
2
3 from joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin
4 from joblib.parallel import register_parallel_backend
5 from tornado import gen
6
7 from .executor import Executor, _wait
8
9
10 class DistributedBackend(ParallelBackendBase, AutoBatchingMixin):
11 MIN_IDEAL_BATCH_DURATION = 0.2
12 MAX_IDEAL_BATCH_DURATION = 1.0
13
14 def __init__(self, scheduler_host='127.0.0.1:8786', loop=None):
15 self.executor = Executor(scheduler_host, loop=loop)
16 self.futures = set()
17
18 def configure(self, n_jobs=1, parallel=None, **backend_args):
19 return self.effective_n_jobs(n_jobs)
20
21 def effective_n_jobs(self, n_jobs=1):
22 return sum(self.executor.ncores().values())
23
24 def apply_async(self, func, *args, **kwargs):
25 callback = kwargs.pop('callback', None)
26 kwargs['pure'] = False
27 future = self.executor.submit(func, *args, **kwargs)
28 self.futures.add(future)
29
30 @gen.coroutine
31 def callback_wrapper():
32 result = yield _wait([future])
33 self.futures.remove(future)
34 callback(result) # gets called in separate thread
35
36 self.executor.loop.add_callback(callback_wrapper)
37
38 future.get = future.result # monkey patch to achieve AsyncResult API
39 return future
40
41 def abort_everything(self, ensure_ready=True):
42 # Tell the executor to cancel any task submitted via this instance
43 # as joblib.Parallel will never access those results.
44 self.executor.cancel(self.futures)
45 self.futures.clear()
46
47
48 register_parallel_backend('distributed', DistributedBackend)
49
[end of distributed/joblib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/joblib.py b/distributed/joblib.py
--- a/distributed/joblib.py
+++ b/distributed/joblib.py
@@ -1,10 +1,36 @@
from __future__ import print_function, division, absolute_import
-from joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin
-from joblib.parallel import register_parallel_backend
+from distutils.version import LooseVersion
+
from tornado import gen
from .executor import Executor, _wait
+from .utils import ignoring
+
+
+# A user could have installed joblib, sklearn, both, or neither. Further, only
+# joblib >= 0.10.0 supports backends, so we also need to check for that. This
+# bit of logic is to ensure that we create and register the backend for all
+# viable installations of joblib.
+joblib = sk_joblib = None
+with ignoring(ImportError):
+ import joblib
+ if LooseVersion(joblib.__version__) < '0.10.0':
+ joblib = None
+with ignoring(ImportError):
+ import sklearn.externals.joblib as sk_joblib
+ if LooseVersion(sk_joblib.__version__) < '0.10.0':
+ sk_joblib = None
+
+if joblib:
+ from joblib._parallel_backends import (ParallelBackendBase,
+ AutoBatchingMixin)
+elif sk_joblib:
+ from sklearn.externals.joblib._parallel_backends import (
+ ParallelBackendBase, AutoBatchingMixin)
+else:
+ raise RuntimeError("Joblib backend requires either `joblib` >= '0.10.0' "
+ " or `sklearn` > '0.17.1'. Please install or upgrade")
class DistributedBackend(ParallelBackendBase, AutoBatchingMixin):
@@ -45,4 +71,8 @@
self.futures.clear()
-register_parallel_backend('distributed', DistributedBackend)
+# Register the backend with any available versions of joblib
+if joblib:
+ joblib.register_parallel_backend('distributed', DistributedBackend)
+if sk_joblib:
+ sk_joblib.register_parallel_backend('distributed', DistributedBackend)
| {"golden_diff": "diff --git a/distributed/joblib.py b/distributed/joblib.py\n--- a/distributed/joblib.py\n+++ b/distributed/joblib.py\n@@ -1,10 +1,36 @@\n from __future__ import print_function, division, absolute_import\n \n-from joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin\n-from joblib.parallel import register_parallel_backend\n+from distutils.version import LooseVersion\n+\n from tornado import gen\n \n from .executor import Executor, _wait\n+from .utils import ignoring\n+\n+\n+# A user could have installed joblib, sklearn, both, or neither. Further, only\n+# joblib >= 0.10.0 supports backends, so we also need to check for that. This\n+# bit of logic is to ensure that we create and register the backend for all\n+# viable installations of joblib.\n+joblib = sk_joblib = None\n+with ignoring(ImportError):\n+ import joblib\n+ if LooseVersion(joblib.__version__) < '0.10.0':\n+ joblib = None\n+with ignoring(ImportError):\n+ import sklearn.externals.joblib as sk_joblib\n+ if LooseVersion(sk_joblib.__version__) < '0.10.0':\n+ sk_joblib = None\n+\n+if joblib:\n+ from joblib._parallel_backends import (ParallelBackendBase,\n+ AutoBatchingMixin)\n+elif sk_joblib:\n+ from sklearn.externals.joblib._parallel_backends import (\n+ ParallelBackendBase, AutoBatchingMixin)\n+else:\n+ raise RuntimeError(\"Joblib backend requires either `joblib` >= '0.10.0' \"\n+ \" or `sklearn` > '0.17.1'. Please install or upgrade\")\n \n \n class DistributedBackend(ParallelBackendBase, AutoBatchingMixin):\n@@ -45,4 +71,8 @@\n self.futures.clear()\n \n \n-register_parallel_backend('distributed', DistributedBackend)\n+# Register the backend with any available versions of joblib\n+if joblib:\n+ joblib.register_parallel_backend('distributed', DistributedBackend)\n+if sk_joblib:\n+ sk_joblib.register_parallel_backend('distributed', DistributedBackend)\n", "issue": "Distributed.joblib code fails to affect sklearn\nThe comments in [this stackoverflow answer](http://stackoverflow.com/questions/38601026/easy-way-to-use-parallel-options-of-scikit-learn-functions-on-hpc/38814491#38814491) raise concerns about the effectiveness of `distributed.joblib` to parallelize vanilla sklearn code. It appears that sklearn ships with its own version of Joblib, which the plugin registration in `distributed.joblib` does not affect.\n\nIt would be good to test sklearn functionality and, if necessary, add plugin registration to `sklearn.externals.joblib` in the same way we do to normal `joblib`.\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin\nfrom joblib.parallel import register_parallel_backend\nfrom tornado import gen\n\nfrom .executor import Executor, _wait\n\n\nclass DistributedBackend(ParallelBackendBase, AutoBatchingMixin):\n MIN_IDEAL_BATCH_DURATION = 0.2\n MAX_IDEAL_BATCH_DURATION = 1.0\n\n def __init__(self, scheduler_host='127.0.0.1:8786', loop=None):\n self.executor = Executor(scheduler_host, loop=loop)\n self.futures = set()\n\n def configure(self, n_jobs=1, parallel=None, **backend_args):\n return self.effective_n_jobs(n_jobs)\n\n def effective_n_jobs(self, n_jobs=1):\n return sum(self.executor.ncores().values())\n\n def apply_async(self, func, *args, **kwargs):\n callback = kwargs.pop('callback', None)\n kwargs['pure'] = False\n future = self.executor.submit(func, *args, **kwargs)\n self.futures.add(future)\n\n @gen.coroutine\n def callback_wrapper():\n result = yield _wait([future])\n self.futures.remove(future)\n callback(result) # gets called in separate thread\n\n self.executor.loop.add_callback(callback_wrapper)\n\n future.get = future.result # monkey patch to achieve AsyncResult API\n return future\n\n def abort_everything(self, ensure_ready=True):\n # Tell the executor to cancel any task submitted via this instance\n # as joblib.Parallel will never access those results.\n self.executor.cancel(self.futures)\n self.futures.clear()\n\n\nregister_parallel_backend('distributed', DistributedBackend)\n", "path": "distributed/joblib.py"}]} | 1,162 | 483 |
gh_patches_debug_60682 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-2076 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Login page: change spacing on left panel
The spacing in the left panel is odd. Change to something like the below:

Note, this will stay in backlog for now as we may want to revise this page to align with the Frog design.
</issue>
<code>
[start of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
1 hdx_version = 'v0.5.13'
2
[end of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version = 'v0.5.13'
+hdx_version = 'v0.5.15'
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.5.13'\n+hdx_version = 'v0.5.15'\n", "issue": "Login page: change spacing on left panel \nThe spacing in the left panel is odd. Change to something like the below: \n\n\n\nNote, this will stay in backlog for now as we may want to revise this page to align with the Frog design.\n\n", "before_files": [{"content": "hdx_version = 'v0.5.13'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]} | 682 | 108 |
gh_patches_debug_10301 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1157 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update /availability API to pull from new registry
### Issue Description
The [current API](https://github.com/cisagov/getgov/blob/784cc0f618e056c262512d688e8e4316dd25c9e4/src/api/views.py#L14) consists of a second-hand pull of canonical data that the .gov program [publishes to GitHub](https://github.com/cisagov/dotgov-data/blob/main/current-full.csv). Change this implementation so that queries poll the new registry/Whois/RDAP.
### Acceptance Criteria
- [x] The checkDomain method is used to check that a domain is in the registry or not
and
- [x] The check is used at /availability
- [x] Tests are implemented and/or updated
- [ ] In the new domain application flow, where a user is checking if a domain is available for use, it should now use this /availability endpoint (if not already) and tests should be updated to check that /availability and epp is being triggered on this page as well.
### Additional Context (optional)
Once done, the backend work of #476 and frontend work of #561 can be completed.
_Consider add-on to defend the endpoint from bot spam._
### Implementation Notes
Epp has a check command that can be run with just the name of a given domain. This should be used as the method for checking the desired domain at this endpoint.
### Issue Links
Blocking #476 and #561
Blocked by #1028
</issue>
<code>
[start of src/api/views.py]
1 """Internal API views"""
2 from django.apps import apps
3 from django.views.decorators.http import require_http_methods
4 from django.http import JsonResponse
5
6 from django.contrib.auth.decorators import login_required
7
8 import requests
9
10 from cachetools.func import ttl_cache
11
12
13 DOMAIN_FILE_URL = (
14 "https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv"
15 )
16
17
18 DOMAIN_API_MESSAGES = {
19 "required": "Enter the .gov domain you want. Don’t include “www” or “.gov.”"
20 " For example, if you want www.city.gov, you would enter “city”"
21 " (without the quotes).",
22 "extra_dots": "Enter the .gov domain you want without any periods.",
23 "unavailable": "That domain isn’t available. Try entering another one."
24 " Contact us if you need help coming up with a domain.",
25 "invalid": "Enter a domain using only letters,"
26 " numbers, or hyphens (though we don't recommend using hyphens).",
27 "success": "That domain is available!",
28 }
29
30
31 # this file doesn't change that often, nor is it that big, so cache the result
32 # in memory for ten minutes
33 @ttl_cache(ttl=600)
34 def _domains():
35 """Return a list of the current .gov domains.
36
37 Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,
38 lowercase everything and return the list.
39 """
40 DraftDomain = apps.get_model("registrar.DraftDomain")
41 # 5 second timeout
42 file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text
43 domains = set()
44 # skip the first line
45 for line in file_contents.splitlines()[1:]:
46 # get the domain before the first comma
47 domain = line.split(",", 1)[0]
48 # sanity-check the string we got from the file here
49 if DraftDomain.string_could_be_domain(domain):
50 # lowercase everything when we put it in domains
51 domains.add(domain.lower())
52 return domains
53
54
55 def in_domains(domain):
56 """Return true if the given domain is in the domains list.
57
58 The given domain is lowercased to match against the domains list. If the
59 given domain doesn't end with .gov, ".gov" is added when looking for
60 a match.
61 """
62 domain = domain.lower()
63 if domain.endswith(".gov"):
64 return domain.lower() in _domains()
65 else:
66 # domain search string doesn't end with .gov, add it on here
67 return (domain + ".gov") in _domains()
68
69
70 @require_http_methods(["GET"])
71 @login_required
72 def available(request, domain=""):
73 """Is a given domain available or not.
74
75 Response is a JSON dictionary with the key "available" and value true or
76 false.
77 """
78 DraftDomain = apps.get_model("registrar.DraftDomain")
79 # validate that the given domain could be a domain name and fail early if
80 # not.
81 if not (
82 DraftDomain.string_could_be_domain(domain)
83 or DraftDomain.string_could_be_domain(domain + ".gov")
84 ):
85 return JsonResponse(
86 {"available": False, "message": DOMAIN_API_MESSAGES["invalid"]}
87 )
88 # a domain is available if it is NOT in the list of current domains
89 if in_domains(domain):
90 return JsonResponse(
91 {"available": False, "message": DOMAIN_API_MESSAGES["unavailable"]}
92 )
93 else:
94 return JsonResponse(
95 {"available": True, "message": DOMAIN_API_MESSAGES["success"]}
96 )
97
[end of src/api/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/api/views.py b/src/api/views.py
--- a/src/api/views.py
+++ b/src/api/views.py
@@ -59,12 +59,12 @@
given domain doesn't end with .gov, ".gov" is added when looking for
a match.
"""
- domain = domain.lower()
+ Domain = apps.get_model("registrar.Domain")
if domain.endswith(".gov"):
- return domain.lower() in _domains()
+ return Domain.available(domain)
else:
# domain search string doesn't end with .gov, add it on here
- return (domain + ".gov") in _domains()
+ return Domain.available(domain + ".gov")
@require_http_methods(["GET"])
| {"golden_diff": "diff --git a/src/api/views.py b/src/api/views.py\n--- a/src/api/views.py\n+++ b/src/api/views.py\n@@ -59,12 +59,12 @@\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n- domain = domain.lower()\n+ Domain = apps.get_model(\"registrar.Domain\")\n if domain.endswith(\".gov\"):\n- return domain.lower() in _domains()\n+ return Domain.available(domain)\n else:\n # domain search string doesn't end with .gov, add it on here\n- return (domain + \".gov\") in _domains()\n+ return Domain.available(domain + \".gov\")\n \n \n @require_http_methods([\"GET\"])\n", "issue": "Update /availability API to pull from new registry\n### Issue Description\r\n\r\nThe [current API](https://github.com/cisagov/getgov/blob/784cc0f618e056c262512d688e8e4316dd25c9e4/src/api/views.py#L14) consists of a second-hand pull of canonical data that the .gov program [publishes to GitHub](https://github.com/cisagov/dotgov-data/blob/main/current-full.csv). Change this implementation so that queries poll the new registry/Whois/RDAP.\r\n\r\n### Acceptance Criteria\r\n\r\n- [x] The checkDomain method is used to check that a domain is in the registry or not\r\n\r\nand\r\n\r\n- [x] The check is used at /availability \r\n- [x] Tests are implemented and/or updated\r\n- [ ] In the new domain application flow, where a user is checking if a domain is available for use, it should now use this /availability endpoint (if not already) and tests should be updated to check that /availability and epp is being triggered on this page as well. \r\n\r\n### Additional Context (optional)\r\n\r\nOnce done, the backend work of #476 and frontend work of #561 can be completed.\r\n\r\n_Consider add-on to defend the endpoint from bot spam._\r\n\r\n### Implementation Notes\r\n\r\nEpp has a check command that can be run with just the name of a given domain. This should be used as the method for checking the desired domain at this endpoint.\r\n\r\n### Issue Links\r\n\r\nBlocking #476 and #561\r\nBlocked by #1028 \n", "before_files": [{"content": "\"\"\"Internal API views\"\"\"\nfrom django.apps import apps\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse\n\nfrom django.contrib.auth.decorators import login_required\n\nimport requests\n\nfrom cachetools.func import ttl_cache\n\n\nDOMAIN_FILE_URL = (\n \"https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv\"\n)\n\n\nDOMAIN_API_MESSAGES = {\n \"required\": \"Enter the .gov domain you want. Don\u2019t include \u201cwww\u201d or \u201c.gov.\u201d\"\n \" For example, if you want www.city.gov, you would enter \u201ccity\u201d\"\n \" (without the quotes).\",\n \"extra_dots\": \"Enter the .gov domain you want without any periods.\",\n \"unavailable\": \"That domain isn\u2019t available. Try entering another one.\"\n \" Contact us if you need help coming up with a domain.\",\n \"invalid\": \"Enter a domain using only letters,\"\n \" numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available!\",\n}\n\n\n# this file doesn't change that often, nor is it that big, so cache the result\n# in memory for ten minutes\n@ttl_cache(ttl=600)\ndef _domains():\n \"\"\"Return a list of the current .gov domains.\n\n Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,\n lowercase everything and return the list.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # 5 second timeout\n file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text\n domains = set()\n # skip the first line\n for line in file_contents.splitlines()[1:]:\n # get the domain before the first comma\n domain = line.split(\",\", 1)[0]\n # sanity-check the string we got from the file here\n if DraftDomain.string_could_be_domain(domain):\n # lowercase everything when we put it in domains\n domains.add(domain.lower())\n return domains\n\n\ndef in_domains(domain):\n \"\"\"Return true if the given domain is in the domains list.\n\n The given domain is lowercased to match against the domains list. If the\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n domain = domain.lower()\n if domain.endswith(\".gov\"):\n return domain.lower() in _domains()\n else:\n # domain search string doesn't end with .gov, add it on here\n return (domain + \".gov\") in _domains()\n\n\n@require_http_methods([\"GET\"])\n@login_required\ndef available(request, domain=\"\"):\n \"\"\"Is a given domain available or not.\n\n Response is a JSON dictionary with the key \"available\" and value true or\n false.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # validate that the given domain could be a domain name and fail early if\n # not.\n if not (\n DraftDomain.string_could_be_domain(domain)\n or DraftDomain.string_could_be_domain(domain + \".gov\")\n ):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"invalid\"]}\n )\n # a domain is available if it is NOT in the list of current domains\n if in_domains(domain):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]}\n )\n else:\n return JsonResponse(\n {\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]}\n )\n", "path": "src/api/views.py"}]} | 1,835 | 161 |
gh_patches_debug_27706 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-416 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix capabilities bug
At the moment the button "Create Page" disapears for every role except the superuser.

Fix capabilities bug
At the moment the button "Create Page" disapears for every role except the superuser.

</issue>
<code>
[start of src/cms/views/pages/page_view.py]
1 """
2
3 Returns:
4 [type]: [description]
5 """
6 import logging
7
8 from django.contrib import messages
9 from django.contrib.auth.decorators import login_required
10 from django.contrib.auth.mixins import PermissionRequiredMixin
11 from django.core.exceptions import PermissionDenied
12 from django.shortcuts import render, redirect
13 from django.utils.decorators import method_decorator
14 from django.utils.translation import ugettext as _
15 from django.views.generic import TemplateView
16
17 from ...constants import status
18 from ...decorators import region_permission_required
19 from ...forms.pages import PageForm, PageTranslationForm
20 from ...models import Page, PageTranslation, Region, Language
21
22 logger = logging.getLogger(__name__)
23
24
25 @method_decorator(login_required, name='dispatch')
26 @method_decorator(region_permission_required, name='dispatch')
27 class PageView(PermissionRequiredMixin, TemplateView):
28 permission_required = 'cms.view_pages'
29 raise_exception = True
30
31 template_name = 'pages/page_form.html'
32 base_context = {
33 'current_menu_item': 'pages',
34 'PUBLIC': status.PUBLIC
35 }
36
37 def get(self, request, *args, **kwargs):
38
39 region = Region.objects.get(slug=kwargs.get('region_slug'))
40
41 language = Language.objects.get(code=kwargs.get('language_code'))
42
43 # get page and translation objects if they exist
44 page = Page.objects.filter(id=kwargs.get('page_id')).first()
45 page_translation = PageTranslation.objects.filter(
46 page=page,
47 language=language,
48 ).first()
49
50 # Make form disabled if user has no permission to edit the page
51 if not request.user.has_perm('cms.edit_page', page):
52 disabled = True
53 messages.warning(request, _("You don't have the permission to edit this page."))
54 elif page and page.archived:
55 disabled = True
56 messages.warning(request, _("You cannot edit this page because it is archived."))
57 else:
58 disabled = False
59
60 page_form = PageForm(
61 instance=page,
62 region=region,
63 language=language,
64 disabled=disabled
65 )
66 page_translation_form = PageTranslationForm(
67 instance=page_translation,
68 disabled=disabled
69 )
70
71 return render(request, self.template_name, {
72 **self.base_context,
73 'page_form': page_form,
74 'page_translation_form': page_translation_form,
75 'page': page,
76 'language': language,
77 # Languages for tab view
78 'languages': region.languages if page else [language],
79 })
80
81 # pylint: disable=too-many-branches,unused-argument
82 def post(self, request, *args, **kwargs):
83
84 region = Region.objects.get(slug=kwargs.get('region_slug'))
85 language = Language.objects.get(code=kwargs.get('language_code'))
86
87 page_instance = Page.objects.filter(id=kwargs.get('page_id')).first()
88 page_translation_instance = PageTranslation.objects.filter(
89 page=page_instance,
90 language=language,
91 ).first()
92
93 if not request.user.has_perm('cms.edit_page', page_instance):
94 raise PermissionDenied
95
96 page_form = PageForm(
97 request.POST,
98 instance=page_instance,
99 region=region,
100 language=language,
101 )
102 page_translation_form = PageTranslationForm(
103 request.POST,
104 instance=page_translation_instance,
105 region=region,
106 language=language,
107 )
108
109 if page_translation_form.data.get('public') and 'public' in page_translation_form.changed_data:
110 if not request.user.has_perm('cms.publish_page', page_instance):
111 raise PermissionDenied
112
113 # TODO: error handling
114 if not page_form.is_valid() or not page_translation_form.is_valid():
115 messages.error(request, _('Errors have occurred.'))
116 return render(request, self.template_name, {
117 **self.base_context,
118 'page_form': page_form,
119 'page_translation_form': page_translation_form,
120 'page': page_instance,
121 'language': language,
122 # Languages for tab view
123 'languages': region.languages if page_instance else [language],
124 })
125
126 if not page_form.has_changed() and not page_translation_form.has_changed():
127 messages.info(request, _('No changes detected.'))
128 return render(request, self.template_name, {
129 **self.base_context,
130 'page_form': page_form,
131 'page_translation_form': page_translation_form,
132 'page': page_instance,
133 'language': language,
134 # Languages for tab view
135 'languages': region.languages if page_instance else [language],
136 })
137
138 page = page_form.save()
139 page_translation = page_translation_form.save(
140 page=page,
141 user=request.user,
142 )
143
144 published = page_translation.status == status.PUBLIC
145 if not page_instance:
146 if published:
147 messages.success(request, _('Page was successfully created and published.'))
148 else:
149 messages.success(request, _('Page was successfully created.'))
150 elif not page_translation_instance:
151 if published:
152 messages.success(request, _('Translation was successfully created and published.'))
153 else:
154 messages.success(request, _('Translation was successfully created.'))
155 else:
156 if published:
157 messages.success(request, _('Translation was successfully published.'))
158 else:
159 messages.success(request, _('Translation was successfully saved.'))
160
161 return redirect('edit_page', **{
162 'page_id': page.id,
163 'region_slug': region.slug,
164 'language_code': language.code,
165 })
166
[end of src/cms/views/pages/page_view.py]
[start of src/cms/rules.py]
1 from rules import add_perm, predicate
2
3
4 # Predicates
5
6 @predicate
7 def is_page_editor(user, page):
8 if not page:
9 return False
10 return user in page.editors.all()
11
12 @predicate
13 def is_page_publisher(user, page):
14 if not page:
15 return False
16 return user in page.publishers.all()
17
18 @predicate
19 # pylint: disable=unused-argument
20 def can_edit_all_pages(user, page):
21 return user.has_perm('edit_pages')
22
23 @predicate
24 # pylint: disable=unused-argument
25 def can_publish_all_pages(user, page):
26 return user.has_perm('publish_pages')
27
28
29 # Permissions
30
31 add_perm('cms.edit_page', can_edit_all_pages | is_page_editor | can_publish_all_pages | is_page_publisher)
32 add_perm('cms.publish_page', can_publish_all_pages | is_page_publisher)
33
[end of src/cms/rules.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cms/rules.py b/src/cms/rules.py
--- a/src/cms/rules.py
+++ b/src/cms/rules.py
@@ -18,12 +18,12 @@
@predicate
# pylint: disable=unused-argument
def can_edit_all_pages(user, page):
- return user.has_perm('edit_pages')
+ return user.has_perm('cms.edit_pages')
@predicate
# pylint: disable=unused-argument
def can_publish_all_pages(user, page):
- return user.has_perm('publish_pages')
+ return user.has_perm('cms.publish_pages')
# Permissions
diff --git a/src/cms/views/pages/page_view.py b/src/cms/views/pages/page_view.py
--- a/src/cms/views/pages/page_view.py
+++ b/src/cms/views/pages/page_view.py
@@ -48,14 +48,17 @@
).first()
# Make form disabled if user has no permission to edit the page
- if not request.user.has_perm('cms.edit_page', page):
- disabled = True
- messages.warning(request, _("You don't have the permission to edit this page."))
- elif page and page.archived:
- disabled = True
- messages.warning(request, _("You cannot edit this page because it is archived."))
+ disabled = False
+ if page:
+ if page.archived:
+ disabled = True
+ messages.warning(request, _("You cannot edit this page because it is archived."))
+ elif not request.user.has_perm('cms.edit_page', page):
+ disabled = True
+ messages.warning(request, _("You don't have the permission to edit this page, but you can propose changes and submit them for review instead."))
else:
- disabled = False
+ if not request.user.has_perm('cms.edit_pages'):
+ raise PermissionDenied
page_form = PageForm(
instance=page,
| {"golden_diff": "diff --git a/src/cms/rules.py b/src/cms/rules.py\n--- a/src/cms/rules.py\n+++ b/src/cms/rules.py\n@@ -18,12 +18,12 @@\n @predicate\n # pylint: disable=unused-argument\n def can_edit_all_pages(user, page):\n- return user.has_perm('edit_pages')\n+ return user.has_perm('cms.edit_pages')\n \n @predicate\n # pylint: disable=unused-argument\n def can_publish_all_pages(user, page):\n- return user.has_perm('publish_pages')\n+ return user.has_perm('cms.publish_pages')\n \n \n # Permissions\ndiff --git a/src/cms/views/pages/page_view.py b/src/cms/views/pages/page_view.py\n--- a/src/cms/views/pages/page_view.py\n+++ b/src/cms/views/pages/page_view.py\n@@ -48,14 +48,17 @@\n ).first()\n \n # Make form disabled if user has no permission to edit the page\n- if not request.user.has_perm('cms.edit_page', page):\n- disabled = True\n- messages.warning(request, _(\"You don't have the permission to edit this page.\"))\n- elif page and page.archived:\n- disabled = True\n- messages.warning(request, _(\"You cannot edit this page because it is archived.\"))\n+ disabled = False\n+ if page:\n+ if page.archived:\n+ disabled = True\n+ messages.warning(request, _(\"You cannot edit this page because it is archived.\"))\n+ elif not request.user.has_perm('cms.edit_page', page):\n+ disabled = True\n+ messages.warning(request, _(\"You don't have the permission to edit this page, but you can propose changes and submit them for review instead.\"))\n else:\n- disabled = False\n+ if not request.user.has_perm('cms.edit_pages'):\n+ raise PermissionDenied\n \n page_form = PageForm(\n instance=page,\n", "issue": "Fix capabilities bug\nAt the moment the button \"Create Page\" disapears for every role except the superuser.\r\n\r\n\nFix capabilities bug\nAt the moment the button \"Create Page\" disapears for every role except the superuser.\r\n\r\n\n", "before_files": [{"content": "\"\"\"\n\nReturns:\n [type]: [description]\n\"\"\"\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import status\nfrom ...decorators import region_permission_required\nfrom ...forms.pages import PageForm, PageTranslationForm\nfrom ...models import Page, PageTranslation, Region, Language\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass PageView(PermissionRequiredMixin, TemplateView):\n permission_required = 'cms.view_pages'\n raise_exception = True\n\n template_name = 'pages/page_form.html'\n base_context = {\n 'current_menu_item': 'pages',\n 'PUBLIC': status.PUBLIC\n }\n\n def get(self, request, *args, **kwargs):\n\n region = Region.objects.get(slug=kwargs.get('region_slug'))\n\n language = Language.objects.get(code=kwargs.get('language_code'))\n\n # get page and translation objects if they exist\n page = Page.objects.filter(id=kwargs.get('page_id')).first()\n page_translation = PageTranslation.objects.filter(\n page=page,\n language=language,\n ).first()\n\n # Make form disabled if user has no permission to edit the page\n if not request.user.has_perm('cms.edit_page', page):\n disabled = True\n messages.warning(request, _(\"You don't have the permission to edit this page.\"))\n elif page and page.archived:\n disabled = True\n messages.warning(request, _(\"You cannot edit this page because it is archived.\"))\n else:\n disabled = False\n\n page_form = PageForm(\n instance=page,\n region=region,\n language=language,\n disabled=disabled\n )\n page_translation_form = PageTranslationForm(\n instance=page_translation,\n disabled=disabled\n )\n\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page else [language],\n })\n\n # pylint: disable=too-many-branches,unused-argument\n def post(self, request, *args, **kwargs):\n\n region = Region.objects.get(slug=kwargs.get('region_slug'))\n language = Language.objects.get(code=kwargs.get('language_code'))\n\n page_instance = Page.objects.filter(id=kwargs.get('page_id')).first()\n page_translation_instance = PageTranslation.objects.filter(\n page=page_instance,\n language=language,\n ).first()\n\n if not request.user.has_perm('cms.edit_page', page_instance):\n raise PermissionDenied\n\n page_form = PageForm(\n request.POST,\n instance=page_instance,\n region=region,\n language=language,\n )\n page_translation_form = PageTranslationForm(\n request.POST,\n instance=page_translation_instance,\n region=region,\n language=language,\n )\n\n if page_translation_form.data.get('public') and 'public' in page_translation_form.changed_data:\n if not request.user.has_perm('cms.publish_page', page_instance):\n raise PermissionDenied\n\n # TODO: error handling\n if not page_form.is_valid() or not page_translation_form.is_valid():\n messages.error(request, _('Errors have occurred.'))\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page_instance,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page_instance else [language],\n })\n\n if not page_form.has_changed() and not page_translation_form.has_changed():\n messages.info(request, _('No changes detected.'))\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page_instance,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page_instance else [language],\n })\n\n page = page_form.save()\n page_translation = page_translation_form.save(\n page=page,\n user=request.user,\n )\n\n published = page_translation.status == status.PUBLIC\n if not page_instance:\n if published:\n messages.success(request, _('Page was successfully created and published.'))\n else:\n messages.success(request, _('Page was successfully created.'))\n elif not page_translation_instance:\n if published:\n messages.success(request, _('Translation was successfully created and published.'))\n else:\n messages.success(request, _('Translation was successfully created.'))\n else:\n if published:\n messages.success(request, _('Translation was successfully published.'))\n else:\n messages.success(request, _('Translation was successfully saved.'))\n\n return redirect('edit_page', **{\n 'page_id': page.id,\n 'region_slug': region.slug,\n 'language_code': language.code,\n })\n", "path": "src/cms/views/pages/page_view.py"}, {"content": "from rules import add_perm, predicate\n\n\n# Predicates\n\n@predicate\ndef is_page_editor(user, page):\n if not page:\n return False\n return user in page.editors.all()\n\n@predicate\ndef is_page_publisher(user, page):\n if not page:\n return False\n return user in page.publishers.all()\n\n@predicate\n# pylint: disable=unused-argument\ndef can_edit_all_pages(user, page):\n return user.has_perm('edit_pages')\n\n@predicate\n# pylint: disable=unused-argument\ndef can_publish_all_pages(user, page):\n return user.has_perm('publish_pages')\n\n\n# Permissions\n\nadd_perm('cms.edit_page', can_edit_all_pages | is_page_editor | can_publish_all_pages | is_page_publisher)\nadd_perm('cms.publish_page', can_publish_all_pages | is_page_publisher)\n", "path": "src/cms/rules.py"}]} | 2,485 | 412 |
gh_patches_debug_24069 | rasdani/github-patches | git_diff | Qiskit__qiskit-2700 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Timeslots should have informative representation
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
Currently `Interval`, `Timeslot` and `TimeslotCollection` have the standard python object representation. This should be replaced with an informative implementation of `__repr__`. As `TimeslotCollection` contains `Timeslot`s and `Timeslot`s contain `Interval`s the hierarchal nature of these data structure should be used to derive representations.
### Example
A possible representation might look like
Interval:
`(t0, tf)` eg. `(0, 10)`
Timeslot: Is an interval on a channel
`(channel, repr(interval))`: `(DriveChannel(0), (0, 10))`
TimeslotCollection: Is a dictionary of timeslots. Ie. it groups together intervals on channels.
`{channel1: [repr(interval1_c1), repr(interval2_c1), ...]), channel2: [repr(interval1_c2), repr(interval2_c2), ...]}`
</issue>
<code>
[start of qiskit/pulse/timeslots.py]
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017, 2019.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """
16 Timeslots for channels.
17 """
18 from collections import defaultdict
19 import itertools
20 from typing import List, Tuple
21
22 from .channels import Channel
23 from .exceptions import PulseError
24
25
26 # pylint: disable=missing-return-doc
27
28
29 class Interval:
30 """Time interval."""
31
32 def __init__(self, begin: int, end: int):
33 """Create an interval = (begin, end))
34
35 Args:
36 begin: begin time of this interval
37 end: end time of this interval
38
39 Raises:
40 PulseError: when invalid time or duration is specified
41 """
42 if begin < 0:
43 raise PulseError("Cannot create Interval with negative begin time")
44 if end < 0:
45 raise PulseError("Cannot create Interval with negative end time")
46 self._begin = begin
47 self._end = end
48
49 @property
50 def begin(self):
51 """Begin time of this interval."""
52 return self._begin
53
54 @property
55 def end(self):
56 """End time of this interval."""
57 return self._end
58
59 @property
60 def duration(self):
61 """Duration of this interval."""
62 return self._end - self._begin
63
64 def has_overlap(self, interval: 'Interval') -> bool:
65 """Check if self has overlap with `interval`.
66
67 Args:
68 interval: interval to be examined
69
70 Returns:
71 bool: True if self has overlap with `interval` otherwise False
72 """
73 if self.begin < interval.end and interval.begin < self.end:
74 return True
75 return False
76
77 def shift(self, time: int) -> 'Interval':
78 """Return a new interval shifted by `time` from self
79
80 Args:
81 time: time to be shifted
82
83 Returns:
84 Interval: interval shifted by `time`
85 """
86 return Interval(self._begin + time, self._end + time)
87
88 def __eq__(self, other):
89 """Two intervals are the same if they have the same begin and end.
90
91 Args:
92 other (Interval): other Interval
93
94 Returns:
95 bool: are self and other equal.
96 """
97 if self._begin == other._begin and self._end == other._end:
98 return True
99 return False
100
101
102 class Timeslot:
103 """Named tuple of (Interval, Channel)."""
104
105 def __init__(self, interval: Interval, channel: Channel):
106 self._interval = interval
107 self._channel = channel
108
109 @property
110 def interval(self):
111 """Interval of this time slot."""
112 return self._interval
113
114 @property
115 def channel(self):
116 """Channel of this time slot."""
117 return self._channel
118
119 def shift(self, time: int) -> 'Timeslot':
120 """Return a new Timeslot shifted by `time`.
121
122 Args:
123 time: time to be shifted
124 """
125 return Timeslot(self.interval.shift(time), self.channel)
126
127 def __eq__(self, other) -> bool:
128 """Two time-slots are the same if they have the same interval and channel.
129
130 Args:
131 other (Timeslot): other Timeslot
132 """
133 if self.interval == other.interval and self.channel == other.channel:
134 return True
135 return False
136
137
138 class TimeslotCollection:
139 """Collection of `Timeslot`s."""
140
141 def __init__(self, *timeslots: List[Timeslot]):
142 """Create a new time-slot collection.
143
144 Args:
145 *timeslots: list of time slots
146 Raises:
147 PulseError: when overlapped time slots are specified
148 """
149 self._table = defaultdict(list)
150
151 for slot in timeslots:
152 for interval in self._table[slot.channel]:
153 if slot.interval.has_overlap(interval):
154 raise PulseError("Cannot create TimeslotCollection from overlapped timeslots")
155 self._table[slot.channel].append(slot.interval)
156
157 self._timeslots = tuple(timeslots)
158
159 @property
160 def timeslots(self) -> Tuple[Timeslot]:
161 """`Timeslot`s in collection."""
162 return self._timeslots
163
164 @property
165 def channels(self) -> Tuple[Timeslot]:
166 """Channels within the timeslot collection."""
167 return tuple(self._table.keys())
168
169 @property
170 def start_time(self) -> int:
171 """Return earliest start time in this collection."""
172 return self.ch_start_time(*self.channels)
173
174 @property
175 def stop_time(self) -> int:
176 """Return maximum time of timeslots over all channels."""
177 return self.ch_stop_time(*self.channels)
178
179 @property
180 def duration(self) -> int:
181 """Return maximum duration of timeslots over all channels."""
182 return self.stop_time
183
184 def ch_start_time(self, *channels: List[Channel]) -> int:
185 """Return earliest start time in this collection.
186
187 Args:
188 *channels: Channels over which to obtain start_time.
189 """
190 intervals = list(itertools.chain(*(self._table[chan] for chan in channels
191 if chan in self._table)))
192 if intervals:
193 return min(interval.begin for interval in intervals)
194 return 0
195
196 def ch_stop_time(self, *channels: List[Channel]) -> int:
197 """Return maximum time of timeslots over all channels.
198
199 Args:
200 *channels: Channels over which to obtain stop time.
201 """
202 intervals = list(itertools.chain(*(self._table[chan] for chan in channels
203 if chan in self._table)))
204 if intervals:
205 return max(interval.end for interval in intervals)
206 return 0
207
208 def ch_duration(self, *channels: List[Channel]) -> int:
209 """Return maximum duration of timeslots over all channels.
210
211 Args:
212 *channels: Channels over which to obtain the duration.
213 """
214 return self.ch_stop_time(*channels)
215
216 def is_mergeable_with(self, timeslots: 'TimeslotCollection') -> bool:
217 """Return if self is mergeable with `timeslots`.
218
219 Args:
220 timeslots: TimeslotCollection to be checked
221 """
222 for slot in timeslots.timeslots:
223 if slot.channel in self.channels:
224 for interval in self._table[slot.channel]:
225 if slot.interval.has_overlap(interval):
226 return False
227 return True
228
229 def merged(self, timeslots: 'TimeslotCollection') -> 'TimeslotCollection':
230 """Return a new TimeslotCollection merged with a specified `timeslots`
231
232 Args:
233 timeslots: TimeslotCollection to be merged
234 """
235 slots = [Timeslot(slot.interval, slot.channel) for slot in self.timeslots]
236 slots.extend([Timeslot(slot.interval, slot.channel) for slot in timeslots.timeslots])
237 return TimeslotCollection(*slots)
238
239 def shift(self, time: int) -> 'TimeslotCollection':
240 """Return a new TimeslotCollection shifted by `time`.
241
242 Args:
243 time: time to be shifted by
244 """
245 slots = [Timeslot(slot.interval.shift(time), slot.channel) for slot in self.timeslots]
246 return TimeslotCollection(*slots)
247
248 def __eq__(self, other) -> bool:
249 """Two time-slot collections are the same if they have the same time-slots.
250
251 Args:
252 other (TimeslotCollection): other TimeslotCollection
253 """
254 if self.timeslots == other.timeslots:
255 return True
256 return False
257
[end of qiskit/pulse/timeslots.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qiskit/pulse/timeslots.py b/qiskit/pulse/timeslots.py
--- a/qiskit/pulse/timeslots.py
+++ b/qiskit/pulse/timeslots.py
@@ -98,6 +98,10 @@
return True
return False
+ def __repr__(self):
+ """Return a readable representation of Interval Object"""
+ return "{}({}, {})".format(self.__class__.__name__, self.begin, self.end)
+
class Timeslot:
"""Named tuple of (Interval, Channel)."""
@@ -134,6 +138,12 @@
return True
return False
+ def __repr__(self):
+ """Return a readable representation of Timeslot Object"""
+ return "{}({}, {})".format(self.__class__.__name__,
+ self.channel,
+ (self.interval.begin, self.interval.end))
+
class TimeslotCollection:
"""Collection of `Timeslot`s."""
@@ -254,3 +264,10 @@
if self.timeslots == other.timeslots:
return True
return False
+
+ def __repr__(self):
+ """Return a readable representation of TimeslotCollection Object"""
+ rep = dict()
+ for key, val in self._table.items():
+ rep[key] = [(interval.begin, interval.end) for interval in val]
+ return self.__class__.__name__ + str(rep)
| {"golden_diff": "diff --git a/qiskit/pulse/timeslots.py b/qiskit/pulse/timeslots.py\n--- a/qiskit/pulse/timeslots.py\n+++ b/qiskit/pulse/timeslots.py\n@@ -98,6 +98,10 @@\n return True\n return False\n \n+ def __repr__(self):\n+ \"\"\"Return a readable representation of Interval Object\"\"\"\n+ return \"{}({}, {})\".format(self.__class__.__name__, self.begin, self.end)\n+\n \n class Timeslot:\n \"\"\"Named tuple of (Interval, Channel).\"\"\"\n@@ -134,6 +138,12 @@\n return True\n return False\n \n+ def __repr__(self):\n+ \"\"\"Return a readable representation of Timeslot Object\"\"\"\n+ return \"{}({}, {})\".format(self.__class__.__name__,\n+ self.channel,\n+ (self.interval.begin, self.interval.end))\n+\n \n class TimeslotCollection:\n \"\"\"Collection of `Timeslot`s.\"\"\"\n@@ -254,3 +264,10 @@\n if self.timeslots == other.timeslots:\n return True\n return False\n+\n+ def __repr__(self):\n+ \"\"\"Return a readable representation of TimeslotCollection Object\"\"\"\n+ rep = dict()\n+ for key, val in self._table.items():\n+ rep[key] = [(interval.begin, interval.end) for interval in val]\n+ return self.__class__.__name__ + str(rep)\n", "issue": "Timeslots should have informative representation\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\nCurrently `Interval`, `Timeslot` and `TimeslotCollection` have the standard python object representation. This should be replaced with an informative implementation of `__repr__`. As `TimeslotCollection` contains `Timeslot`s and `Timeslot`s contain `Interval`s the hierarchal nature of these data structure should be used to derive representations.\r\n\r\n### Example\r\nA possible representation might look like\r\n\r\nInterval:\r\n`(t0, tf)` eg. `(0, 10)`\r\n\r\nTimeslot: Is an interval on a channel\r\n`(channel, repr(interval))`: `(DriveChannel(0), (0, 10))`\r\n\r\nTimeslotCollection: Is a dictionary of timeslots. Ie. it groups together intervals on channels.\r\n\r\n`{channel1: [repr(interval1_c1), repr(interval2_c1), ...]), channel2: [repr(interval1_c2), repr(interval2_c2), ...]}`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nTimeslots for channels.\n\"\"\"\nfrom collections import defaultdict\nimport itertools\nfrom typing import List, Tuple\n\nfrom .channels import Channel\nfrom .exceptions import PulseError\n\n\n# pylint: disable=missing-return-doc\n\n\nclass Interval:\n \"\"\"Time interval.\"\"\"\n\n def __init__(self, begin: int, end: int):\n \"\"\"Create an interval = (begin, end))\n\n Args:\n begin: begin time of this interval\n end: end time of this interval\n\n Raises:\n PulseError: when invalid time or duration is specified\n \"\"\"\n if begin < 0:\n raise PulseError(\"Cannot create Interval with negative begin time\")\n if end < 0:\n raise PulseError(\"Cannot create Interval with negative end time\")\n self._begin = begin\n self._end = end\n\n @property\n def begin(self):\n \"\"\"Begin time of this interval.\"\"\"\n return self._begin\n\n @property\n def end(self):\n \"\"\"End time of this interval.\"\"\"\n return self._end\n\n @property\n def duration(self):\n \"\"\"Duration of this interval.\"\"\"\n return self._end - self._begin\n\n def has_overlap(self, interval: 'Interval') -> bool:\n \"\"\"Check if self has overlap with `interval`.\n\n Args:\n interval: interval to be examined\n\n Returns:\n bool: True if self has overlap with `interval` otherwise False\n \"\"\"\n if self.begin < interval.end and interval.begin < self.end:\n return True\n return False\n\n def shift(self, time: int) -> 'Interval':\n \"\"\"Return a new interval shifted by `time` from self\n\n Args:\n time: time to be shifted\n\n Returns:\n Interval: interval shifted by `time`\n \"\"\"\n return Interval(self._begin + time, self._end + time)\n\n def __eq__(self, other):\n \"\"\"Two intervals are the same if they have the same begin and end.\n\n Args:\n other (Interval): other Interval\n\n Returns:\n bool: are self and other equal.\n \"\"\"\n if self._begin == other._begin and self._end == other._end:\n return True\n return False\n\n\nclass Timeslot:\n \"\"\"Named tuple of (Interval, Channel).\"\"\"\n\n def __init__(self, interval: Interval, channel: Channel):\n self._interval = interval\n self._channel = channel\n\n @property\n def interval(self):\n \"\"\"Interval of this time slot.\"\"\"\n return self._interval\n\n @property\n def channel(self):\n \"\"\"Channel of this time slot.\"\"\"\n return self._channel\n\n def shift(self, time: int) -> 'Timeslot':\n \"\"\"Return a new Timeslot shifted by `time`.\n\n Args:\n time: time to be shifted\n \"\"\"\n return Timeslot(self.interval.shift(time), self.channel)\n\n def __eq__(self, other) -> bool:\n \"\"\"Two time-slots are the same if they have the same interval and channel.\n\n Args:\n other (Timeslot): other Timeslot\n \"\"\"\n if self.interval == other.interval and self.channel == other.channel:\n return True\n return False\n\n\nclass TimeslotCollection:\n \"\"\"Collection of `Timeslot`s.\"\"\"\n\n def __init__(self, *timeslots: List[Timeslot]):\n \"\"\"Create a new time-slot collection.\n\n Args:\n *timeslots: list of time slots\n Raises:\n PulseError: when overlapped time slots are specified\n \"\"\"\n self._table = defaultdict(list)\n\n for slot in timeslots:\n for interval in self._table[slot.channel]:\n if slot.interval.has_overlap(interval):\n raise PulseError(\"Cannot create TimeslotCollection from overlapped timeslots\")\n self._table[slot.channel].append(slot.interval)\n\n self._timeslots = tuple(timeslots)\n\n @property\n def timeslots(self) -> Tuple[Timeslot]:\n \"\"\"`Timeslot`s in collection.\"\"\"\n return self._timeslots\n\n @property\n def channels(self) -> Tuple[Timeslot]:\n \"\"\"Channels within the timeslot collection.\"\"\"\n return tuple(self._table.keys())\n\n @property\n def start_time(self) -> int:\n \"\"\"Return earliest start time in this collection.\"\"\"\n return self.ch_start_time(*self.channels)\n\n @property\n def stop_time(self) -> int:\n \"\"\"Return maximum time of timeslots over all channels.\"\"\"\n return self.ch_stop_time(*self.channels)\n\n @property\n def duration(self) -> int:\n \"\"\"Return maximum duration of timeslots over all channels.\"\"\"\n return self.stop_time\n\n def ch_start_time(self, *channels: List[Channel]) -> int:\n \"\"\"Return earliest start time in this collection.\n\n Args:\n *channels: Channels over which to obtain start_time.\n \"\"\"\n intervals = list(itertools.chain(*(self._table[chan] for chan in channels\n if chan in self._table)))\n if intervals:\n return min(interval.begin for interval in intervals)\n return 0\n\n def ch_stop_time(self, *channels: List[Channel]) -> int:\n \"\"\"Return maximum time of timeslots over all channels.\n\n Args:\n *channels: Channels over which to obtain stop time.\n \"\"\"\n intervals = list(itertools.chain(*(self._table[chan] for chan in channels\n if chan in self._table)))\n if intervals:\n return max(interval.end for interval in intervals)\n return 0\n\n def ch_duration(self, *channels: List[Channel]) -> int:\n \"\"\"Return maximum duration of timeslots over all channels.\n\n Args:\n *channels: Channels over which to obtain the duration.\n \"\"\"\n return self.ch_stop_time(*channels)\n\n def is_mergeable_with(self, timeslots: 'TimeslotCollection') -> bool:\n \"\"\"Return if self is mergeable with `timeslots`.\n\n Args:\n timeslots: TimeslotCollection to be checked\n \"\"\"\n for slot in timeslots.timeslots:\n if slot.channel in self.channels:\n for interval in self._table[slot.channel]:\n if slot.interval.has_overlap(interval):\n return False\n return True\n\n def merged(self, timeslots: 'TimeslotCollection') -> 'TimeslotCollection':\n \"\"\"Return a new TimeslotCollection merged with a specified `timeslots`\n\n Args:\n timeslots: TimeslotCollection to be merged\n \"\"\"\n slots = [Timeslot(slot.interval, slot.channel) for slot in self.timeslots]\n slots.extend([Timeslot(slot.interval, slot.channel) for slot in timeslots.timeslots])\n return TimeslotCollection(*slots)\n\n def shift(self, time: int) -> 'TimeslotCollection':\n \"\"\"Return a new TimeslotCollection shifted by `time`.\n\n Args:\n time: time to be shifted by\n \"\"\"\n slots = [Timeslot(slot.interval.shift(time), slot.channel) for slot in self.timeslots]\n return TimeslotCollection(*slots)\n\n def __eq__(self, other) -> bool:\n \"\"\"Two time-slot collections are the same if they have the same time-slots.\n\n Args:\n other (TimeslotCollection): other TimeslotCollection\n \"\"\"\n if self.timeslots == other.timeslots:\n return True\n return False\n", "path": "qiskit/pulse/timeslots.py"}]} | 3,174 | 318 |
gh_patches_debug_10105 | rasdani/github-patches | git_diff | pulp__pulpcore-2566 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implementation of does_batch in DeclarativeContent is wrong
**Describe the bug**
`does_batch` is supposed to decide, whether content can be waited on, or must be evaluated asap. It produces `False` unnecessarily often, leading to performance issues.
**Additional context**
This is suspected to at least one of the reasons for: https://github.com/pulp/pulp_deb/issues/466
</issue>
<code>
[start of pulpcore/plugin/stages/models.py]
1 from gettext import gettext as _
2
3 import asyncio
4
5 from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
6 from pulpcore.plugin.models import Artifact
7
8
9 class DeclarativeArtifact:
10 """
11 Relates an :class:`~pulpcore.plugin.models.Artifact`, how to download it, and its
12 `relative_path` used later during publishing.
13
14 This is used by the Stages API stages to determine if an
15 :class:`~pulpcore.plugin.models.Artifact` is already present and ensure Pulp can download it in
16 the future. The `artifact` can be either saved or unsaved. If unsaved, the `artifact` attributes
17 may be incomplete because not all digest information can be computed until the
18 :class:`~pulpcore.plugin.models.Artifact` is downloaded.
19
20 Attributes:
21 artifact (:class:`~pulpcore.plugin.models.Artifact`): An
22 :class:`~pulpcore.plugin.models.Artifact` either saved or unsaved. If unsaved, it
23 may have partial digest information attached to it.
24 url (str): the url to fetch the :class:`~pulpcore.plugin.models.Artifact` from.
25 relative_path (str): the relative_path this :class:`~pulpcore.plugin.models.Artifact`
26 should be published at for any Publication.
27 remote (:class:`~pulpcore.plugin.models.Remote`): The remote used to fetch this
28 :class:`~pulpcore.plugin.models.Artifact`.
29 extra_data (dict): A dictionary available for additional data to be stored in.
30 deferred_download (bool): Whether this artifact should be downloaded and saved
31 in the artifact stages. Defaults to `False`. See :ref:`on-demand-support`.
32
33 Raises:
34 ValueError: If `artifact`, `url`, or `relative_path` are not specified. If `remote` is not
35 specified and `artifact` doesn't have a file.
36 """
37
38 __slots__ = ("artifact", "url", "relative_path", "remote", "extra_data", "deferred_download")
39
40 def __init__(
41 self,
42 artifact=None,
43 url=None,
44 relative_path=None,
45 remote=None,
46 extra_data=None,
47 deferred_download=False,
48 ):
49 if not url:
50 raise ValueError(_("DeclarativeArtifact must have a 'url'"))
51 if not relative_path:
52 raise ValueError(_("DeclarativeArtifact must have a 'relative_path'"))
53 if not artifact:
54 raise ValueError(_("DeclarativeArtifact must have a 'artifact'"))
55 if not remote and not artifact.file:
56 raise ValueError(
57 _(
58 "DeclarativeArtifact must have a 'remote' if the Artifact doesn't "
59 "have a file backing it."
60 )
61 )
62 self.artifact = artifact
63 self.url = url
64 self.relative_path = relative_path
65 self.remote = remote
66 self.extra_data = extra_data or {}
67 self.deferred_download = deferred_download
68
69 async def download(self):
70 """
71 Download content and update the associated Artifact.
72
73 Returns:
74 Returns the :class:`~pulpcore.plugin.download.DownloadResult` of the Artifact.
75 """
76 expected_digests = {}
77 validation_kwargs = {}
78 for digest_name in ALL_KNOWN_CONTENT_CHECKSUMS:
79 digest_value = getattr(self.artifact, digest_name)
80 if digest_value:
81 expected_digests[digest_name] = digest_value
82 if expected_digests:
83 validation_kwargs["expected_digests"] = expected_digests
84 if self.artifact.size:
85 expected_size = self.artifact.size
86 validation_kwargs["expected_size"] = expected_size
87 downloader = self.remote.get_downloader(url=self.url, **validation_kwargs)
88 # Custom downloaders may need extra information to complete the request.
89 download_result = await downloader.run(extra_data=self.extra_data)
90 self.artifact = Artifact(**download_result.artifact_attributes, file=download_result.path)
91 return download_result
92
93
94 class DeclarativeContent:
95 """
96 Relates a Content unit and zero or more :class:`~pulpcore.plugin.stages.DeclarativeArtifact`
97 objects.
98
99 This is used by the Stages API stages to determine if a Content unit is already present and
100 ensure all of its associated :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects are
101 related correctly. The `content` can be either saved or unsaved depending on where in the Stages
102 API pipeline this is used.
103
104 Attributes:
105 content (subclass of :class:`~pulpcore.plugin.models.Content`): A Content unit, possibly
106 unsaved
107 d_artifacts (list): A list of zero or more
108 :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects associated with `content`.
109 extra_data (dict): A dictionary available for additional data to be stored in.
110
111 Raises:
112 ValueError: If `content` is not specified.
113 """
114
115 __slots__ = (
116 "content",
117 "d_artifacts",
118 "extra_data",
119 "_future",
120 "_thaw_queue_event",
121 "_resolved",
122 )
123
124 def __init__(self, content=None, d_artifacts=None, extra_data=None):
125 if not content:
126 raise ValueError(_("DeclarativeContent must have a 'content'"))
127 self.content = content
128 self.d_artifacts = d_artifacts or []
129 self.extra_data = extra_data or {}
130 self._future = None
131 self._thaw_queue_event = None
132 self._resolved = False
133
134 @property
135 def does_batch(self):
136 """Whether this content is being awaited on and must therefore not wait forever in batches.
137 When overwritten in subclasses, a `True` value must never be turned into `False`.
138 """
139 return not self._resolved and self._future is None
140
141 async def resolution(self):
142 """Coroutine that waits for the content to be saved to database.
143 Returns the content unit."""
144 if self._resolved:
145 # Already resolved ~> shortcut
146 return self.content
147 if self._future is None:
148 # We do not yet have a future
149 self._future = asyncio.get_event_loop().create_future()
150 if self._thaw_queue_event:
151 # We have a future now but are still stuck in a queue
152 self._thaw_queue_event.set()
153 # Now we wait
154 return await self._future
155
156 def resolve(self):
157 """Resolve this content unit and notify any waiting tasks."""
158 self._resolved = True
159 if self._future:
160 self._future.set_result(self.content)
161 self._future = None
162
163 def __str__(self):
164 return str(self.content.__class__.__name__)
165
[end of pulpcore/plugin/stages/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/plugin/stages/models.py b/pulpcore/plugin/stages/models.py
--- a/pulpcore/plugin/stages/models.py
+++ b/pulpcore/plugin/stages/models.py
@@ -136,7 +136,7 @@
"""Whether this content is being awaited on and must therefore not wait forever in batches.
When overwritten in subclasses, a `True` value must never be turned into `False`.
"""
- return not self._resolved and self._future is None
+ return self._resolved or self._future is None
async def resolution(self):
"""Coroutine that waits for the content to be saved to database.
| {"golden_diff": "diff --git a/pulpcore/plugin/stages/models.py b/pulpcore/plugin/stages/models.py\n--- a/pulpcore/plugin/stages/models.py\n+++ b/pulpcore/plugin/stages/models.py\n@@ -136,7 +136,7 @@\n \"\"\"Whether this content is being awaited on and must therefore not wait forever in batches.\n When overwritten in subclasses, a `True` value must never be turned into `False`.\n \"\"\"\n- return not self._resolved and self._future is None\n+ return self._resolved or self._future is None\n \n async def resolution(self):\n \"\"\"Coroutine that waits for the content to be saved to database.\n", "issue": "Implementation of does_batch in DeclarativeContent is wrong\n**Describe the bug**\r\n`does_batch` is supposed to decide, whether content can be waited on, or must be evaluated asap. It produces `False` unnecessarily often, leading to performance issues.\r\n\r\n**Additional context**\r\nThis is suspected to at least one of the reasons for: https://github.com/pulp/pulp_deb/issues/466\n", "before_files": [{"content": "from gettext import gettext as _\n\nimport asyncio\n\nfrom pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS\nfrom pulpcore.plugin.models import Artifact\n\n\nclass DeclarativeArtifact:\n \"\"\"\n Relates an :class:`~pulpcore.plugin.models.Artifact`, how to download it, and its\n `relative_path` used later during publishing.\n\n This is used by the Stages API stages to determine if an\n :class:`~pulpcore.plugin.models.Artifact` is already present and ensure Pulp can download it in\n the future. The `artifact` can be either saved or unsaved. If unsaved, the `artifact` attributes\n may be incomplete because not all digest information can be computed until the\n :class:`~pulpcore.plugin.models.Artifact` is downloaded.\n\n Attributes:\n artifact (:class:`~pulpcore.plugin.models.Artifact`): An\n :class:`~pulpcore.plugin.models.Artifact` either saved or unsaved. If unsaved, it\n may have partial digest information attached to it.\n url (str): the url to fetch the :class:`~pulpcore.plugin.models.Artifact` from.\n relative_path (str): the relative_path this :class:`~pulpcore.plugin.models.Artifact`\n should be published at for any Publication.\n remote (:class:`~pulpcore.plugin.models.Remote`): The remote used to fetch this\n :class:`~pulpcore.plugin.models.Artifact`.\n extra_data (dict): A dictionary available for additional data to be stored in.\n deferred_download (bool): Whether this artifact should be downloaded and saved\n in the artifact stages. Defaults to `False`. See :ref:`on-demand-support`.\n\n Raises:\n ValueError: If `artifact`, `url`, or `relative_path` are not specified. If `remote` is not\n specified and `artifact` doesn't have a file.\n \"\"\"\n\n __slots__ = (\"artifact\", \"url\", \"relative_path\", \"remote\", \"extra_data\", \"deferred_download\")\n\n def __init__(\n self,\n artifact=None,\n url=None,\n relative_path=None,\n remote=None,\n extra_data=None,\n deferred_download=False,\n ):\n if not url:\n raise ValueError(_(\"DeclarativeArtifact must have a 'url'\"))\n if not relative_path:\n raise ValueError(_(\"DeclarativeArtifact must have a 'relative_path'\"))\n if not artifact:\n raise ValueError(_(\"DeclarativeArtifact must have a 'artifact'\"))\n if not remote and not artifact.file:\n raise ValueError(\n _(\n \"DeclarativeArtifact must have a 'remote' if the Artifact doesn't \"\n \"have a file backing it.\"\n )\n )\n self.artifact = artifact\n self.url = url\n self.relative_path = relative_path\n self.remote = remote\n self.extra_data = extra_data or {}\n self.deferred_download = deferred_download\n\n async def download(self):\n \"\"\"\n Download content and update the associated Artifact.\n\n Returns:\n Returns the :class:`~pulpcore.plugin.download.DownloadResult` of the Artifact.\n \"\"\"\n expected_digests = {}\n validation_kwargs = {}\n for digest_name in ALL_KNOWN_CONTENT_CHECKSUMS:\n digest_value = getattr(self.artifact, digest_name)\n if digest_value:\n expected_digests[digest_name] = digest_value\n if expected_digests:\n validation_kwargs[\"expected_digests\"] = expected_digests\n if self.artifact.size:\n expected_size = self.artifact.size\n validation_kwargs[\"expected_size\"] = expected_size\n downloader = self.remote.get_downloader(url=self.url, **validation_kwargs)\n # Custom downloaders may need extra information to complete the request.\n download_result = await downloader.run(extra_data=self.extra_data)\n self.artifact = Artifact(**download_result.artifact_attributes, file=download_result.path)\n return download_result\n\n\nclass DeclarativeContent:\n \"\"\"\n Relates a Content unit and zero or more :class:`~pulpcore.plugin.stages.DeclarativeArtifact`\n objects.\n\n This is used by the Stages API stages to determine if a Content unit is already present and\n ensure all of its associated :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects are\n related correctly. The `content` can be either saved or unsaved depending on where in the Stages\n API pipeline this is used.\n\n Attributes:\n content (subclass of :class:`~pulpcore.plugin.models.Content`): A Content unit, possibly\n unsaved\n d_artifacts (list): A list of zero or more\n :class:`~pulpcore.plugin.stages.DeclarativeArtifact` objects associated with `content`.\n extra_data (dict): A dictionary available for additional data to be stored in.\n\n Raises:\n ValueError: If `content` is not specified.\n \"\"\"\n\n __slots__ = (\n \"content\",\n \"d_artifacts\",\n \"extra_data\",\n \"_future\",\n \"_thaw_queue_event\",\n \"_resolved\",\n )\n\n def __init__(self, content=None, d_artifacts=None, extra_data=None):\n if not content:\n raise ValueError(_(\"DeclarativeContent must have a 'content'\"))\n self.content = content\n self.d_artifacts = d_artifacts or []\n self.extra_data = extra_data or {}\n self._future = None\n self._thaw_queue_event = None\n self._resolved = False\n\n @property\n def does_batch(self):\n \"\"\"Whether this content is being awaited on and must therefore not wait forever in batches.\n When overwritten in subclasses, a `True` value must never be turned into `False`.\n \"\"\"\n return not self._resolved and self._future is None\n\n async def resolution(self):\n \"\"\"Coroutine that waits for the content to be saved to database.\n Returns the content unit.\"\"\"\n if self._resolved:\n # Already resolved ~> shortcut\n return self.content\n if self._future is None:\n # We do not yet have a future\n self._future = asyncio.get_event_loop().create_future()\n if self._thaw_queue_event:\n # We have a future now but are still stuck in a queue\n self._thaw_queue_event.set()\n # Now we wait\n return await self._future\n\n def resolve(self):\n \"\"\"Resolve this content unit and notify any waiting tasks.\"\"\"\n self._resolved = True\n if self._future:\n self._future.set_result(self.content)\n self._future = None\n\n def __str__(self):\n return str(self.content.__class__.__name__)\n", "path": "pulpcore/plugin/stages/models.py"}]} | 2,435 | 144 |
gh_patches_debug_7477 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-680 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Device Support Request] LIVARNO LUX/LIDL Led Panel 60x60 (Tuya TS0502A)
[LIDL Service website](https://www.lidl-service.com/cps/rde/xchg/SID-3771F4F2-8A18D468/lsp/hs.xsl/product.html?id=5027306530&title=Smart+LED+Light+Panel&count=1)
**Describe the solution you'd like**
- [x] power control
- [x] brightness control
- [x] CCT control
- [x] remove color control

Maybee because the board is also color capable

**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**
[Device signature and pairing log at my dev VM](https://pastebin.com/ifAkAXaF)
**Additional context**
https://zigbee.blakadder.com/Tuya_TS0502A.html
Touchlink resetable
</issue>
<code>
[start of zhaquirks/lidl/cct.py]
1 """Quirk for LIDL CCT bulb."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomCluster, CustomDevice
4 from zigpy.zcl.clusters.general import (
5 Basic,
6 GreenPowerProxy,
7 Groups,
8 Identify,
9 LevelControl,
10 OnOff,
11 Ota,
12 Scenes,
13 Time,
14 )
15 from zigpy.zcl.clusters.lighting import Color
16 from zigpy.zcl.clusters.lightlink import LightLink
17
18 from zhaquirks.const import (
19 DEVICE_TYPE,
20 ENDPOINTS,
21 INPUT_CLUSTERS,
22 MODELS_INFO,
23 OUTPUT_CLUSTERS,
24 PROFILE_ID,
25 )
26
27
28 class LidlCCTColorCluster(CustomCluster, Color):
29 """Lidl CCT Lighting custom cluster."""
30
31 # Remove RGB color wheel for CCT Lighting: only expose color temperature
32 # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)
33 _CONSTANT_ATTRIBUTES = {0x400A: 16}
34
35
36 class CCTLight(CustomDevice):
37 """Lidl CCT Lighting device."""
38
39 signature = {
40 MODELS_INFO: [("_TZ3000_49qchf10", "TS0502A"), ("_TZ3000_oborybow", "TS0502A")],
41 ENDPOINTS: {
42 1: {
43 # <SimpleDescriptor endpoint=1 profile=260 device_type=268
44 # device_version=1
45 # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]
46 # output_clusters=[10, 25]
47 PROFILE_ID: zha.PROFILE_ID,
48 DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,
49 INPUT_CLUSTERS: [
50 Basic.cluster_id,
51 Identify.cluster_id,
52 Groups.cluster_id,
53 Scenes.cluster_id,
54 OnOff.cluster_id,
55 LevelControl.cluster_id,
56 Color.cluster_id,
57 LightLink.cluster_id,
58 ],
59 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
60 },
61 242: {
62 # <SimpleDescriptor endpoint=242 profile=41440 device_type=97
63 # device_version=0
64 # input_clusters=[]
65 # output_clusters=[33]
66 PROFILE_ID: 41440,
67 DEVICE_TYPE: 97,
68 INPUT_CLUSTERS: [],
69 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
70 },
71 },
72 }
73
74 replacement = {
75 ENDPOINTS: {
76 1: {
77 PROFILE_ID: zha.PROFILE_ID,
78 DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,
79 INPUT_CLUSTERS: [
80 Basic.cluster_id,
81 Identify.cluster_id,
82 Groups.cluster_id,
83 Scenes.cluster_id,
84 OnOff.cluster_id,
85 LevelControl.cluster_id,
86 LidlCCTColorCluster,
87 LightLink.cluster_id,
88 ],
89 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
90 },
91 242: {
92 PROFILE_ID: 41440,
93 DEVICE_TYPE: 97,
94 INPUT_CLUSTERS: [],
95 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
96 },
97 }
98 }
99
[end of zhaquirks/lidl/cct.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zhaquirks/lidl/cct.py b/zhaquirks/lidl/cct.py
--- a/zhaquirks/lidl/cct.py
+++ b/zhaquirks/lidl/cct.py
@@ -37,7 +37,12 @@
"""Lidl CCT Lighting device."""
signature = {
- MODELS_INFO: [("_TZ3000_49qchf10", "TS0502A"), ("_TZ3000_oborybow", "TS0502A")],
+ MODELS_INFO: [
+ ("_TZ3000_49qchf10", "TS0502A"),
+ ("_TZ3000_oborybow", "TS0502A"),
+ ("_TZ3000_9evm3otq", "TS0502A"),
+ ("_TZ3000_rylaozuc", "TS0502A"),
+ ],
ENDPOINTS: {
1: {
# <SimpleDescriptor endpoint=1 profile=260 device_type=268
| {"golden_diff": "diff --git a/zhaquirks/lidl/cct.py b/zhaquirks/lidl/cct.py\n--- a/zhaquirks/lidl/cct.py\n+++ b/zhaquirks/lidl/cct.py\n@@ -37,7 +37,12 @@\n \"\"\"Lidl CCT Lighting device.\"\"\"\n \n signature = {\n- MODELS_INFO: [(\"_TZ3000_49qchf10\", \"TS0502A\"), (\"_TZ3000_oborybow\", \"TS0502A\")],\n+ MODELS_INFO: [\n+ (\"_TZ3000_49qchf10\", \"TS0502A\"),\n+ (\"_TZ3000_oborybow\", \"TS0502A\"),\n+ (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n+ (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n+ ],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n", "issue": "[Device Support Request] LIVARNO LUX/LIDL Led Panel 60x60 (Tuya TS0502A)\n[LIDL Service website](https://www.lidl-service.com/cps/rde/xchg/SID-3771F4F2-8A18D468/lsp/hs.xsl/product.html?id=5027306530&title=Smart+LED+Light+Panel&count=1)\r\n\r\n**Describe the solution you'd like**\r\n- [x] power control\r\n- [x] brightness control\r\n- [x] CCT control\r\n- [x] remove color control\r\n\r\n\r\n\r\nMaybee because the board is also color capable\r\n\r\n\r\n\r\n**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**\r\n[Device signature and pairing log at my dev VM](https://pastebin.com/ifAkAXaF)\r\n\r\n\r\n**Additional context**\r\nhttps://zigbee.blakadder.com/Tuya_TS0502A.html\r\nTouchlink resetable\n", "before_files": [{"content": "\"\"\"Quirk for LIDL CCT bulb.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import (\n Basic,\n GreenPowerProxy,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.lighting import Color\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass LidlCCTColorCluster(CustomCluster, Color):\n \"\"\"Lidl CCT Lighting custom cluster.\"\"\"\n\n # Remove RGB color wheel for CCT Lighting: only expose color temperature\n # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)\n _CONSTANT_ATTRIBUTES = {0x400A: 16}\n\n\nclass CCTLight(CustomDevice):\n \"\"\"Lidl CCT Lighting device.\"\"\"\n\n signature = {\n MODELS_INFO: [(\"_TZ3000_49qchf10\", \"TS0502A\"), (\"_TZ3000_oborybow\", \"TS0502A\")],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n # device_version=1\n # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]\n # output_clusters=[10, 25]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Color.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n LidlCCTColorCluster,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/lidl/cct.py"}]} | 1,843 | 260 |
gh_patches_debug_30584 | rasdani/github-patches | git_diff | opendatacube__datacube-core-1469 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
python 3.8/3.9 doesn't support entry_points with either group or select
As title.
https://github.com/opendatacube/datacube-core/blob/5fed4a688c85c198c5d25f2570b0c92ef91dfc04/datacube/drivers/driver_cache.py#L50
Ref: https://docs.python.org/3/library/importlib.metadata.html
>Compatibility Note: The “selectable” entry points were introduced in importlib_metadata 3.6 and Python 3.10. Prior to those changes, entry_points accepted no parameters and always returned a dictionary of entry points, keyed by group. For compatibility, if no parameters are passed to entry_points, a SelectableGroups object is returned, implementing that dict interface. In the future, calling entry_points with no parameters will return an EntryPoints object. Users should rely on the selection interface to retrieve entry points by group.
</issue>
<code>
[start of datacube/drivers/driver_cache.py]
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2023 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 import logging
6 from typing import Dict, Any, Tuple, Iterable
7
8 _LOG = logging.getLogger(__name__)
9
10
11 def load_drivers(group: str) -> Dict[str, Any]:
12 """
13 Load available drivers for a given group name.
14
15 Gracefully handles:
16
17 - Driver module not able to be imported
18 - Driver init function throwing an exception or returning None
19
20 By having driver entry_points pointing to a function, we defer loading the driver
21 module or running any code until required.
22
23 :param group: Name of the entry point group e.g. "datacube.plugins.io.read"
24
25 :returns: Dictionary String -> Driver Object
26 """
27
28 def safe_load(ep):
29 # pylint: disable=broad-except,bare-except
30 try:
31 driver_init = ep.load()
32 except Exception as e:
33 _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)
34 _LOG.warning('Error was: %s', repr(e))
35 return None
36
37 try:
38 driver = driver_init()
39 except Exception:
40 _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)
41 return None
42
43 if driver is None:
44 _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)
45
46 return driver
47
48 def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:
49 from importlib.metadata import entry_points
50 for ep in entry_points(group=group):
51 driver = safe_load(ep)
52 if driver is not None:
53 yield (ep.name, driver)
54
55 return dict((name, driver) for name, driver in resolve_all(group))
56
[end of datacube/drivers/driver_cache.py]
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4
5 tests_require = [
6 'hypothesis',
7 'pycodestyle',
8 'pylint',
9 'pytest',
10 'pytest-cov',
11 'pytest-timeout',
12 'pytest-httpserver',
13 'moto',
14 ]
15 doc_require = [
16 'Sphinx',
17 'sphinx_autodoc_typehints', # Propagate mypy info into docs
18 'sphinx-click',
19 'recommonmark',
20 'autodocsumm',
21 'beautifulsoup4',
22 'nbsphinx',
23 'pydata-sphinx-theme==0.9.0',
24 ]
25
26 extras_require = {
27 'performance': ['ciso8601', 'bottleneck'],
28 'distributed': ['distributed', 'dask[distributed]'],
29 'doc': doc_require,
30 's3': ['boto3', 'botocore'],
31 'test': tests_require,
32 'cf': ['compliance-checker>=4.0.0'],
33 }
34
35 extras_require['dev'] = sorted(set(sum([extras_require[k] for k in [
36 'test',
37 'doc',
38 'performance',
39 's3',
40 'distributed',
41 ]], [])))
42
43 # An 'all' option, following ipython naming conventions.
44 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
45
46 extra_plugins = dict(read=[], write=[], index=[])
47
48 setup(
49 name='datacube',
50 python_requires='>=3.8.0',
51
52 url='https://github.com/opendatacube/datacube-core',
53 author='Open Data Cube',
54 maintainer='Open Data Cube',
55 maintainer_email='',
56 description='An analysis environment for satellite and other earth observation data',
57 long_description=open('README.rst').read(),
58 long_description_content_type='text/x-rst',
59 license='Apache License 2.0',
60 classifiers=[
61 "Development Status :: 4 - Beta",
62 "Intended Audience :: Developers",
63 "Intended Audience :: Science/Research",
64 "License :: OSI Approved :: Apache Software License",
65 "Natural Language :: English",
66 "Operating System :: MacOS :: MacOS X",
67 "Operating System :: POSIX",
68 "Operating System :: POSIX :: BSD",
69 "Operating System :: POSIX :: Linux",
70 "Operating System :: Microsoft :: Windows",
71 "Programming Language :: Python",
72 "Programming Language :: Python :: 3",
73 "Programming Language :: Python :: 3.8",
74 "Programming Language :: Python :: 3.9",
75 "Programming Language :: Python :: 3.10",
76 "Topic :: Scientific/Engineering :: GIS",
77 "Topic :: Scientific/Engineering :: Information Analysis",
78 ],
79
80 packages=find_packages(
81 exclude=('tests', 'tests.*',
82 'integration_tests', 'integration_tests.*')
83 ),
84 package_data={
85 '': ['*.yaml', '*/*.yaml'],
86 'datacube': ['py.typed'],
87 },
88 scripts=[],
89 install_requires=[
90 'affine',
91 'attrs>=18.1',
92 'pyproj>=2.5',
93 'shapely>=2.0',
94 'cachetools',
95 'click>=5.0',
96 'cloudpickle>=0.4',
97 'dask[array]',
98 'distributed',
99 'jsonschema',
100 'netcdf4',
101 'numpy',
102 'psycopg2',
103 'lark',
104 'pandas',
105 'python-dateutil',
106 'pyyaml',
107 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1
108 'ruamel.yaml',
109 'sqlalchemy>=1.4,<2.0', # GeoAlchemy2 requires >=1.4. SqlAlchemy2 *may* work but has not been tested yet.
110 'GeoAlchemy2',
111 'toolz',
112 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost
113 'packaging',
114 'deprecat',
115 ],
116 extras_require=extras_require,
117 tests_require=tests_require,
118
119 entry_points={
120 'console_scripts': [
121 'datacube = datacube.scripts.cli_app:cli',
122 'datacube-search = datacube.scripts.search_tool:cli',
123 'datacube-worker = datacube.execution.worker:main',
124 ],
125 'datacube.plugins.io.read': [
126 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',
127 *extra_plugins['read'],
128 ],
129 'datacube.plugins.io.write': [
130 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',
131 *extra_plugins['write'],
132 ],
133 'datacube.plugins.index': [
134 'default = datacube.index.postgres.index:index_driver_init',
135 'null = datacube.index.null.index:index_driver_init',
136 'memory = datacube.index.memory.index:index_driver_init',
137 'postgis = datacube.index.postgis.index:index_driver_init',
138 *extra_plugins['index'],
139 ],
140 },
141 )
142
[end of setup.py]
[start of docs/click_utils.py]
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2023 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 from importlib.metadata import entry_points
6 from docutils.nodes import literal_block, section, title, make_id
7 from sphinx.domains import Domain
8 from docutils.parsers.rst import Directive
9 import importlib
10
11 import click
12
13
14 class ClickHelpDirective(Directive):
15 has_content = True
16 required_arguments = 1
17
18 def run(self):
19 root_cmd = self.arguments[0]
20
21 env = self.state.document.settings.env
22
23 group = find_script_callable_from_env(root_cmd, env)
24
25 return [generate_help_text(group, [root_cmd])]
26
27
28 def find_script_callable_from_env(name, env):
29 commands = env.config.click_utils_commands
30
31 module, function_name = commands[name].split(':')
32 module = importlib.import_module(module)
33 return getattr(module, function_name)
34
35
36 def find_script_callable(name):
37 return list(entry_points(
38 group='console_scripts', name=name))[0].load()
39
40
41 def generate_help_text(command, prefix):
42 ctx = click.Context(command)
43 help_opts = command.get_help_option(ctx).opts
44 full_cmd = ' '.join(prefix)
45 block = section(None,
46 title(None, full_cmd),
47 ids=[make_id(full_cmd)], names=[full_cmd])
48 if help_opts:
49 h = "$ {} {}\n".format(full_cmd, help_opts[0]) + command.get_help(ctx)
50 block.append(literal_block(None, h, language='console'))
51
52 if isinstance(command, click.core.MultiCommand):
53 for c in command.list_commands(ctx):
54 c = command.resolve_command(ctx, [c])[1]
55 block.append(generate_help_text(c, prefix+[c.name]))
56
57 return block
58
59
60 def make_block(command, opt, content):
61 h = "$ {} {}\n".format(command, opt) + content
62 return section(None,
63 title(None, command),
64 literal_block(None, h, language='console'),
65 ids=[make_id(command)], names=[command])
66
67
68 class DatacubeDomain(Domain):
69 name = 'datacube'
70 label = 'Data Cube'
71 directives = {
72 'click-help': ClickHelpDirective,
73 }
74
75
76 def setup(app):
77 app.add_config_value('click_utils_commands', {}, 'html')
78
79 app.add_domain(DatacubeDomain)
80 return {
81 'parallel_read_safe': False,
82 'parallel_write_safe': False,
83 }
84
[end of docs/click_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/datacube/drivers/driver_cache.py b/datacube/drivers/driver_cache.py
--- a/datacube/drivers/driver_cache.py
+++ b/datacube/drivers/driver_cache.py
@@ -46,7 +46,10 @@
return driver
def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:
- from importlib.metadata import entry_points
+ try:
+ from importlib_metadata import entry_points
+ except ModuleNotFoundError:
+ from importlib.metadata import entry_points
for ep in entry_points(group=group):
driver = safe_load(ep)
if driver is not None:
diff --git a/docs/click_utils.py b/docs/click_utils.py
--- a/docs/click_utils.py
+++ b/docs/click_utils.py
@@ -2,7 +2,6 @@
#
# Copyright (c) 2015-2023 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
-from importlib.metadata import entry_points
from docutils.nodes import literal_block, section, title, make_id
from sphinx.domains import Domain
from docutils.parsers.rst import Directive
@@ -34,6 +33,10 @@
def find_script_callable(name):
+ try:
+ from importlib_metadata import entry_points
+ except ModuleNotFoundError:
+ from importlib.metadata import entry_points
return list(entry_points(
group='console_scripts', name=name))[0].load()
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -112,6 +112,7 @@
'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost
'packaging',
'deprecat',
+ 'importlib_metadata>3.5;python_version<"3.10"',
],
extras_require=extras_require,
tests_require=tests_require,
| {"golden_diff": "diff --git a/datacube/drivers/driver_cache.py b/datacube/drivers/driver_cache.py\n--- a/datacube/drivers/driver_cache.py\n+++ b/datacube/drivers/driver_cache.py\n@@ -46,7 +46,10 @@\n return driver\n \n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n- from importlib.metadata import entry_points\n+ try:\n+ from importlib_metadata import entry_points\n+ except ModuleNotFoundError:\n+ from importlib.metadata import entry_points\n for ep in entry_points(group=group):\n driver = safe_load(ep)\n if driver is not None:\ndiff --git a/docs/click_utils.py b/docs/click_utils.py\n--- a/docs/click_utils.py\n+++ b/docs/click_utils.py\n@@ -2,7 +2,6 @@\n #\n # Copyright (c) 2015-2023 ODC Contributors\n # SPDX-License-Identifier: Apache-2.0\n-from importlib.metadata import entry_points\n from docutils.nodes import literal_block, section, title, make_id\n from sphinx.domains import Domain\n from docutils.parsers.rst import Directive\n@@ -34,6 +33,10 @@\n \n \n def find_script_callable(name):\n+ try:\n+ from importlib_metadata import entry_points\n+ except ModuleNotFoundError:\n+ from importlib.metadata import entry_points\n return list(entry_points(\n group='console_scripts', name=name))[0].load()\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -112,6 +112,7 @@\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n 'packaging',\n 'deprecat',\n+ 'importlib_metadata>3.5;python_version<\"3.10\"',\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n", "issue": "python 3.8/3.9 doesn't support entry_points with either group or select\nAs title.\r\n\r\nhttps://github.com/opendatacube/datacube-core/blob/5fed4a688c85c198c5d25f2570b0c92ef91dfc04/datacube/drivers/driver_cache.py#L50\r\n\r\nRef: https://docs.python.org/3/library/importlib.metadata.html\r\n>Compatibility Note: The \u201cselectable\u201d entry points were introduced in importlib_metadata 3.6 and Python 3.10. Prior to those changes, entry_points accepted no parameters and always returned a dictionary of entry points, keyed by group. For compatibility, if no parameters are passed to entry_points, a SelectableGroups object is returned, implementing that dict interface. In the future, calling entry_points with no parameters will return an EntryPoints object. Users should rely on the selection interface to retrieve entry points by group.\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2023 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nimport logging\nfrom typing import Dict, Any, Tuple, Iterable\n\n_LOG = logging.getLogger(__name__)\n\n\ndef load_drivers(group: str) -> Dict[str, Any]:\n \"\"\"\n Load available drivers for a given group name.\n\n Gracefully handles:\n\n - Driver module not able to be imported\n - Driver init function throwing an exception or returning None\n\n By having driver entry_points pointing to a function, we defer loading the driver\n module or running any code until required.\n\n :param group: Name of the entry point group e.g. \"datacube.plugins.io.read\"\n\n :returns: Dictionary String -> Driver Object\n \"\"\"\n\n def safe_load(ep):\n # pylint: disable=broad-except,bare-except\n try:\n driver_init = ep.load()\n except Exception as e:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n _LOG.warning('Error was: %s', repr(e))\n return None\n\n try:\n driver = driver_init()\n except Exception:\n _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)\n return None\n\n if driver is None:\n _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)\n\n return driver\n\n def resolve_all(group: str) -> Iterable[Tuple[str, Any]]:\n from importlib.metadata import entry_points\n for ep in entry_points(group=group):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\n\n return dict((name, driver) for name, driver in resolve_all(group))\n", "path": "datacube/drivers/driver_cache.py"}, {"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'autodocsumm',\n 'beautifulsoup4',\n 'nbsphinx',\n 'pydata-sphinx-theme==0.9.0',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n 'datacube': ['py.typed'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'attrs>=18.1',\n 'pyproj>=2.5',\n 'shapely>=2.0',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'ruamel.yaml',\n 'sqlalchemy>=1.4,<2.0', # GeoAlchemy2 requires >=1.4. SqlAlchemy2 *may* work but has not been tested yet.\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n 'packaging',\n 'deprecat',\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}, {"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2023 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\nfrom importlib.metadata import entry_points\nfrom docutils.nodes import literal_block, section, title, make_id\nfrom sphinx.domains import Domain\nfrom docutils.parsers.rst import Directive\nimport importlib\n\nimport click\n\n\nclass ClickHelpDirective(Directive):\n has_content = True\n required_arguments = 1\n\n def run(self):\n root_cmd = self.arguments[0]\n\n env = self.state.document.settings.env\n\n group = find_script_callable_from_env(root_cmd, env)\n\n return [generate_help_text(group, [root_cmd])]\n\n\ndef find_script_callable_from_env(name, env):\n commands = env.config.click_utils_commands\n\n module, function_name = commands[name].split(':')\n module = importlib.import_module(module)\n return getattr(module, function_name)\n\n\ndef find_script_callable(name):\n return list(entry_points(\n group='console_scripts', name=name))[0].load()\n\n\ndef generate_help_text(command, prefix):\n ctx = click.Context(command)\n help_opts = command.get_help_option(ctx).opts\n full_cmd = ' '.join(prefix)\n block = section(None,\n title(None, full_cmd),\n ids=[make_id(full_cmd)], names=[full_cmd])\n if help_opts:\n h = \"$ {} {}\\n\".format(full_cmd, help_opts[0]) + command.get_help(ctx)\n block.append(literal_block(None, h, language='console'))\n\n if isinstance(command, click.core.MultiCommand):\n for c in command.list_commands(ctx):\n c = command.resolve_command(ctx, [c])[1]\n block.append(generate_help_text(c, prefix+[c.name]))\n\n return block\n\n\ndef make_block(command, opt, content):\n h = \"$ {} {}\\n\".format(command, opt) + content\n return section(None,\n title(None, command),\n literal_block(None, h, language='console'),\n ids=[make_id(command)], names=[command])\n\n\nclass DatacubeDomain(Domain):\n name = 'datacube'\n label = 'Data Cube'\n directives = {\n 'click-help': ClickHelpDirective,\n }\n\n\ndef setup(app):\n app.add_config_value('click_utils_commands', {}, 'html')\n\n app.add_domain(DatacubeDomain)\n return {\n 'parallel_read_safe': False,\n 'parallel_write_safe': False,\n }\n", "path": "docs/click_utils.py"}]} | 3,434 | 427 |
gh_patches_debug_57081 | rasdani/github-patches | git_diff | SeldonIO__MLServer-945 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MLServer is incompatible with latest release of FastAPI
MLServer is incompatible with [latest release of FastAPI](https://github.com/tiangolo/fastapi/releases/tag/0.89.0), and installing any version of MLServer will result in the following error, temp workaround added in this [pull request](https://github.com/SeldonIO/MLServer/pull/934) however, I think this needs a more in-depth root-cause analysis.
```
2023-01-09 02:11:59,296 [mlserver] INFO - Using asyncio event-loop policy: uvloop
2023-01-09 02:11:59,301 [mlserver] WARNING - Model name 'node-1' is different than model's folder name '25-mlserver-example-single'.
Traceback (most recent call last):
File "/home/cc/miniconda3/envs/central-1/bin/mlserver", line 8, in <module>
sys.exit(main())
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py", line 79, in main
root()
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1055, in main
rv = self.invoke(ctx)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py", line 20, in wrapper
return asyncio.run(f(*args, **kwargs))
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "uvloop/loop.pyx", line 1517, in uvloop.loop.Loop.run_until_complete
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py", line 43, in start
server = MLServer(settings)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/server.py", line 71, in __init__
self._rest_server = RESTServer(
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/server.py", line 26, in __init__
self._app = create_app(
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/app.py", line 43, in create_app
APIRoute(
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/routing.py", line 400, in __init__
self.response_field = create_response_field(
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/utils.py", line 90, in create_response_field
raise fastapi.exceptions.FastAPIError(
fastapi.exceptions.FastAPIError: Invalid args for response field! Hint: check that <class 'starlette.responses.Response'> is a valid pydantic field type
```
</issue>
<code>
[start of setup.py]
1 import os
2
3 from typing import Dict
4 from setuptools import setup, find_packages
5
6 ROOT_PATH = os.path.dirname(__file__)
7 PKG_NAME = "mlserver"
8 PKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)
9
10
11 def _load_version() -> str:
12 version = ""
13 version_path = os.path.join(PKG_PATH, "version.py")
14 with open(version_path) as fp:
15 version_module: Dict[str, str] = {}
16 exec(fp.read(), version_module)
17 version = version_module["__version__"]
18
19 return version
20
21
22 def _load_description() -> str:
23 readme_path = os.path.join(ROOT_PATH, "README.md")
24 with open(readme_path) as fp:
25 return fp.read()
26
27
28 env_marker_cpython = (
29 "sys_platform != 'win32'"
30 " and (sys_platform != 'cygwin'"
31 " and platform_python_implementation != 'PyPy')"
32 )
33
34 setup(
35 name=PKG_NAME,
36 version=_load_version(),
37 url="https://github.com/SeldonIO/MLServer.git",
38 author="Seldon Technologies Ltd.",
39 author_email="[email protected]",
40 description="ML server",
41 packages=find_packages(exclude=["tests", "tests.*"]),
42 install_requires=[
43 "click",
44 "fastapi<=0.88.0",
45 "python-dotenv",
46 "grpcio",
47 "importlib-metadata;python_version<'3.8'",
48 "numpy",
49 "pandas",
50 "protobuf",
51 "uvicorn",
52 "starlette_exporter",
53 "py-grpc-prometheus",
54 "uvloop;" + env_marker_cpython,
55 "aiokafka",
56 "tritonclient[http]>=2.24",
57 "aiofiles",
58 "orjson",
59 ],
60 entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]},
61 long_description=_load_description(),
62 long_description_content_type="text/markdown",
63 license="Apache 2.0",
64 )
65
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,8 @@
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=[
"click",
- "fastapi<=0.88.0",
+ # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861
+ "fastapi<=0.89.1, !=0.89.0",
"python-dotenv",
"grpcio",
"importlib-metadata;python_version<'3.8'",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,7 +41,8 @@\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n- \"fastapi<=0.88.0\",\n+ # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861\n+ \"fastapi<=0.89.1, !=0.89.0\",\n \"python-dotenv\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n", "issue": "MLServer is incompatible with latest release of FastAPI\nMLServer is incompatible with [latest release of FastAPI](https://github.com/tiangolo/fastapi/releases/tag/0.89.0), and installing any version of MLServer will result in the following error, temp workaround added in this [pull request](https://github.com/SeldonIO/MLServer/pull/934) however, I think this needs a more in-depth root-cause analysis.\r\n```\r\n2023-01-09 02:11:59,296 [mlserver] INFO - Using asyncio event-loop policy: uvloop\r\n2023-01-09 02:11:59,301 [mlserver] WARNING - Model name 'node-1' is different than model's folder name '25-mlserver-example-single'.\r\nTraceback (most recent call last):\r\n File \"/home/cc/miniconda3/envs/central-1/bin/mlserver\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py\", line 79, in main\r\n root()\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1130, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1055, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1657, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1404, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 760, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py\", line 20, in wrapper\r\n return asyncio.run(f(*args, **kwargs))\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/asyncio/runners.py\", line 44, in run\r\n return loop.run_until_complete(main)\r\n File \"uvloop/loop.pyx\", line 1517, in uvloop.loop.Loop.run_until_complete\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py\", line 43, in start\r\n server = MLServer(settings)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/server.py\", line 71, in __init__\r\n self._rest_server = RESTServer(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/server.py\", line 26, in __init__\r\n self._app = create_app(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/app.py\", line 43, in create_app\r\n APIRoute(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/routing.py\", line 400, in __init__\r\n self.response_field = create_response_field(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/utils.py\", line 90, in create_response_field\r\n raise fastapi.exceptions.FastAPIError(\r\nfastapi.exceptions.FastAPIError: Invalid args for response field! Hint: check that <class 'starlette.responses.Response'> is a valid pydantic field type\r\n```\n", "before_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n \"fastapi<=0.88.0\",\n \"python-dotenv\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n \"uvloop;\" + env_marker_cpython,\n \"aiokafka\",\n \"tritonclient[http]>=2.24\",\n \"aiofiles\",\n \"orjson\",\n ],\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}]} | 2,010 | 138 |
gh_patches_debug_19706 | rasdani/github-patches | git_diff | buildbot__buildbot-2332 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scheduled weekly dependency update for week 28
## Updates
Here's a list of all the updates bundled in this pull request. I've added some links to make it easier for you to find all the information you need.
<table align="center">
<tr>
<td><b>boto</b></td>
<td align="center">2.47.0</td>
<td align="center">»</td>
<td align="center">2.48.0</td>
<td>
<a href="https://pypi.python.org/pypi/boto">PyPI</a> | <a href="https://pyup.io/changelogs/boto/">Changelog</a> | <a href="https://github.com/boto/boto/">Repo</a>
</td>
<tr>
<td><b>botocore</b></td>
<td align="center">1.5.78</td>
<td align="center">»</td>
<td align="center">1.5.80</td>
<td>
<a href="https://pypi.python.org/pypi/botocore">PyPI</a> | <a href="https://pyup.io/changelogs/botocore/">Changelog</a> | <a href="https://github.com/boto/botocore">Repo</a>
</td>
<tr>
<td><b>Pillow</b></td>
<td align="center">4.2.0</td>
<td align="center">»</td>
<td align="center">4.2.1</td>
<td>
<a href="https://pypi.python.org/pypi/pillow">PyPI</a> | <a href="https://pyup.io/changelogs/pillow/">Changelog</a> | <a href="https://python-pillow.org">Homepage</a>
</td>
<tr>
<td><b>pycparser</b></td>
<td align="center">2.17</td>
<td align="center">»</td>
<td align="center">2.18</td>
<td>
<a href="https://pypi.python.org/pypi/pycparser">PyPI</a> | <a href="https://pyup.io/changelogs/pycparser/">Changelog</a> | <a href="https://github.com/eliben/pycparser">Repo</a>
</td>
<tr>
<td><b>buildbot-www</b></td>
<td align="center">0.9.9.post1</td>
<td align="center">»</td>
<td align="center">0.9.9.post2</td>
<td>
<a href="https://pypi.python.org/pypi/buildbot-www">PyPI</a> | <a href="http://buildbot.net/">Homepage</a>
</td>
</tr>
</table>
## Changelogs
### boto 2.47.0 -> 2.48.0
>### 2.48.0
>============
>:date: 2017/06/29
>This release addresses a few S3 related bugs as well as a bug with the recent
>endpoint heuristics feature.
>Changes
>-------
>* Fix generate_url() AttributeError when using anonymous connections (:issue:`3734`, :sha:`83481807`)
>* Use RegionInfo by default with heuristics (:issue:`3737`, :sha:`0a9b1140`)
>* Allow specifying s3 host from boto config file. (:issue:`3738`, :sha:`dcfc7512`)
### botocore 1.5.78 -> 1.5.80
>### 1.5.80
>======
>* api-change:``kinesis``: Update kinesis client to latest version
>* api-change:``kms``: Update kms client to latest version
>* api-change:``ds``: Update ds client to latest version
>* api-change:``ssm``: Update ssm client to latest version
>### 1.5.79
>======
>* api-change:``cloudwatch``: Update cloudwatch client to latest version
>* api-change:``route53``: Update route53 client to latest version
### Pillow 4.2.0 -> 4.2.1
>### 4.2.1
>------------------
>- CI: Fix version specification and test on CI for PyPy/Windows 2608
> [wiredfool]
### pycparser 2.17 -> 2.18
>### 2.18
> - PR 161 & 184: Update bundled PLY version to 3.10
> - PR 158: Add support for the __int128 type.
> - PR 169: Handle more tricky TYPEID in declarators.
> - PR 178: Add columns to the coord of each node
That's it for now!
Happy merging! 🤖
</issue>
<code>
[start of master/buildbot/data/forceschedulers.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from twisted.internet import defer
17
18 from buildbot.data import base
19 from buildbot.data import types
20 from buildbot.schedulers import forcesched
21 from buildbot.www.rest import JSONRPC_CODES
22 from buildbot.www.rest import BadJsonRpc2
23
24
25 def forceScheduler2Data(sched):
26 ret = dict(all_fields=[],
27 name=unicode(sched.name),
28 button_name=unicode(sched.buttonName),
29 label=unicode(sched.label),
30 builder_names=map(unicode, sched.builderNames))
31 ret["all_fields"] = [field.getSpec() for field in sched.all_fields]
32 return ret
33
34
35 class ForceSchedulerEndpoint(base.Endpoint):
36
37 isCollection = False
38 pathPatterns = """
39 /forceschedulers/i:schedulername
40 """
41
42 def findForceScheduler(self, schedulername):
43 # eventually this may be db backed. This is why the API is async
44 for sched in self.master.allSchedulers():
45 if sched.name == schedulername and isinstance(sched, forcesched.ForceScheduler):
46 return defer.succeed(sched)
47
48 @defer.inlineCallbacks
49 def get(self, resultSpec, kwargs):
50 sched = yield self.findForceScheduler(kwargs['schedulername'])
51 if sched is not None:
52 defer.returnValue(forceScheduler2Data(sched))
53
54 @defer.inlineCallbacks
55 def control(self, action, args, kwargs):
56 if action == "force":
57 sched = yield self.findForceScheduler(kwargs['schedulername'])
58 if "owner" not in args:
59 args['owner'] = "user"
60 try:
61 res = yield sched.force(**args)
62 defer.returnValue(res)
63 except forcesched.CollectedValidationError as e:
64 raise BadJsonRpc2(e.errors, JSONRPC_CODES["invalid_params"])
65 defer.returnValue(None)
66
67
68 class ForceSchedulersEndpoint(base.Endpoint):
69
70 isCollection = True
71 pathPatterns = """
72 /forceschedulers
73 /builders/:builderid/forceschedulers
74 """
75 rootLinkName = 'forceschedulers'
76
77 @defer.inlineCallbacks
78 def get(self, resultSpec, kwargs):
79 l = []
80 builderid = kwargs.get('builderid', None)
81 if builderid is not None:
82 bdict = yield self.master.db.builders.getBuilder(builderid)
83 for sched in self.master.allSchedulers():
84 if isinstance(sched, forcesched.ForceScheduler):
85 if builderid is not None and bdict['name'] not in sched.builderNames:
86 continue
87 l.append(forceScheduler2Data(sched))
88 defer.returnValue(l)
89
90
91 class ForceScheduler(base.ResourceType):
92
93 name = "forcescheduler"
94 plural = "forceschedulers"
95 endpoints = [ForceSchedulerEndpoint, ForceSchedulersEndpoint]
96 keyFields = []
97
98 class EntityType(types.Entity):
99 name = types.Identifier(20)
100 button_name = types.String()
101 label = types.String()
102 builder_names = types.List(of=types.Identifier(20))
103 all_fields = types.List(of=types.JsonObject())
104 entityType = EntityType(name)
105
[end of master/buildbot/data/forceschedulers.py]
[start of master/buildbot/data/builders.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from twisted.internet import defer
17
18 from buildbot.data import base
19 from buildbot.data import types
20
21
22 class BuilderEndpoint(base.Endpoint):
23
24 isCollection = False
25 pathPatterns = """
26 /builders/n:builderid
27 /masters/n:masterid/builders/n:builderid
28 """
29
30 @defer.inlineCallbacks
31 def get(self, resultSpec, kwargs):
32 builderid = kwargs['builderid']
33 bdict = yield self.master.db.builders.getBuilder(builderid)
34 if not bdict:
35 defer.returnValue(None)
36 return
37 if 'masterid' in kwargs:
38 if kwargs['masterid'] not in bdict['masterids']:
39 defer.returnValue(None)
40 return
41 defer.returnValue(
42 dict(builderid=builderid,
43 name=bdict['name'],
44 masterids=bdict['masterids'],
45 description=bdict['description'],
46 tags=bdict['tags']))
47
48
49 class BuildersEndpoint(base.Endpoint):
50
51 isCollection = True
52 rootLinkName = 'builders'
53 pathPatterns = """
54 /builders
55 /masters/n:masterid/builders
56 """
57
58 @defer.inlineCallbacks
59 def get(self, resultSpec, kwargs):
60 bdicts = yield self.master.db.builders.getBuilders(
61 masterid=kwargs.get('masterid', None))
62 defer.returnValue([
63 dict(builderid=bd['id'],
64 name=bd['name'],
65 masterids=bd['masterids'],
66 description=bd['description'],
67 tags=bd['tags'])
68 for bd in bdicts])
69
70
71 class Builder(base.ResourceType):
72
73 name = "builder"
74 plural = "builders"
75 endpoints = [BuilderEndpoint, BuildersEndpoint]
76 keyFields = ['builderid']
77
78 class EntityType(types.Entity):
79 builderid = types.Integer()
80 name = types.Identifier(20)
81 masterids = types.List(of=types.Integer())
82 description = types.NoneOk(types.String())
83 tags = types.List(of=types.String())
84 entityType = EntityType(name)
85
86 def __init__(self, master):
87 base.ResourceType.__init__(self, master)
88
89 @base.updateMethod
90 def findBuilderId(self, name):
91 return self.master.db.builders.findBuilderId(name)
92
93 @base.updateMethod
94 def updateBuilderInfo(self, builderid, description, tags):
95 return self.master.db.builders.updateBuilderInfo(builderid, description, tags)
96
97 @base.updateMethod
98 @defer.inlineCallbacks
99 def updateBuilderList(self, masterid, builderNames):
100 # get the "current" list of builders for this master, so we know what
101 # changes to make. Race conditions here aren't a great worry, as this
102 # is the only master inserting or deleting these records.
103 builders = yield self.master.db.builders.getBuilders(masterid=masterid)
104
105 # figure out what to remove and remove it
106 builderNames_set = set(builderNames)
107 for bldr in builders:
108 if bldr['name'] not in builderNames_set:
109 builderid = bldr['id']
110 yield self.master.db.builders.removeBuilderMaster(
111 masterid=masterid, builderid=builderid)
112 self.master.mq.produce(('builders', str(builderid), 'stopped'),
113 dict(builderid=builderid, masterid=masterid,
114 name=bldr['name']))
115 else:
116 builderNames_set.remove(bldr['name'])
117
118 # now whatever's left in builderNames_set is new
119 for name in builderNames_set:
120 builderid = yield self.master.db.builders.findBuilderId(name)
121 yield self.master.db.builders.addBuilderMaster(
122 masterid=masterid, builderid=builderid)
123 self.master.mq.produce(('builders', str(builderid), 'started'),
124 dict(builderid=builderid, masterid=masterid, name=name))
125
126 @defer.inlineCallbacks
127 def _masterDeactivated(self, masterid):
128 # called from the masters rtype to indicate that the given master is
129 # deactivated
130 yield self.updateBuilderList(masterid, [])
131
[end of master/buildbot/data/builders.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/master/buildbot/data/builders.py b/master/buildbot/data/builders.py
--- a/master/buildbot/data/builders.py
+++ b/master/buildbot/data/builders.py
@@ -77,7 +77,7 @@
class EntityType(types.Entity):
builderid = types.Integer()
- name = types.Identifier(20)
+ name = types.Identifier(50)
masterids = types.List(of=types.Integer())
description = types.NoneOk(types.String())
tags = types.List(of=types.String())
diff --git a/master/buildbot/data/forceschedulers.py b/master/buildbot/data/forceschedulers.py
--- a/master/buildbot/data/forceschedulers.py
+++ b/master/buildbot/data/forceschedulers.py
@@ -96,9 +96,9 @@
keyFields = []
class EntityType(types.Entity):
- name = types.Identifier(20)
+ name = types.Identifier(50)
button_name = types.String()
label = types.String()
- builder_names = types.List(of=types.Identifier(20))
+ builder_names = types.List(of=types.Identifier(50))
all_fields = types.List(of=types.JsonObject())
entityType = EntityType(name)
| {"golden_diff": "diff --git a/master/buildbot/data/builders.py b/master/buildbot/data/builders.py\n--- a/master/buildbot/data/builders.py\n+++ b/master/buildbot/data/builders.py\n@@ -77,7 +77,7 @@\n \n class EntityType(types.Entity):\n builderid = types.Integer()\n- name = types.Identifier(20)\n+ name = types.Identifier(50)\n masterids = types.List(of=types.Integer())\n description = types.NoneOk(types.String())\n tags = types.List(of=types.String())\ndiff --git a/master/buildbot/data/forceschedulers.py b/master/buildbot/data/forceschedulers.py\n--- a/master/buildbot/data/forceschedulers.py\n+++ b/master/buildbot/data/forceschedulers.py\n@@ -96,9 +96,9 @@\n keyFields = []\n \n class EntityType(types.Entity):\n- name = types.Identifier(20)\n+ name = types.Identifier(50)\n button_name = types.String()\n label = types.String()\n- builder_names = types.List(of=types.Identifier(20))\n+ builder_names = types.List(of=types.Identifier(50))\n all_fields = types.List(of=types.JsonObject())\n entityType = EntityType(name)\n", "issue": "Scheduled weekly dependency update for week 28\n\n\n\n## Updates\nHere's a list of all the updates bundled in this pull request. I've added some links to make it easier for you to find all the information you need.\n<table align=\"center\">\n\n<tr>\n<td><b>boto</b></td>\n<td align=\"center\">2.47.0</td>\n<td align=\"center\">»</td>\n<td align=\"center\">2.48.0</td>\n<td>\n <a href=\"https://pypi.python.org/pypi/boto\">PyPI</a> | <a href=\"https://pyup.io/changelogs/boto/\">Changelog</a> | <a href=\"https://github.com/boto/boto/\">Repo</a> \n\n</td>\n\n<tr>\n<td><b>botocore</b></td>\n<td align=\"center\">1.5.78</td>\n<td align=\"center\">»</td>\n<td align=\"center\">1.5.80</td>\n<td>\n <a href=\"https://pypi.python.org/pypi/botocore\">PyPI</a> | <a href=\"https://pyup.io/changelogs/botocore/\">Changelog</a> | <a href=\"https://github.com/boto/botocore\">Repo</a> \n\n</td>\n\n<tr>\n<td><b>Pillow</b></td>\n<td align=\"center\">4.2.0</td>\n<td align=\"center\">»</td>\n<td align=\"center\">4.2.1</td>\n<td>\n <a href=\"https://pypi.python.org/pypi/pillow\">PyPI</a> | <a href=\"https://pyup.io/changelogs/pillow/\">Changelog</a> | <a href=\"https://python-pillow.org\">Homepage</a> \n\n</td>\n\n<tr>\n<td><b>pycparser</b></td>\n<td align=\"center\">2.17</td>\n<td align=\"center\">»</td>\n<td align=\"center\">2.18</td>\n<td>\n <a href=\"https://pypi.python.org/pypi/pycparser\">PyPI</a> | <a href=\"https://pyup.io/changelogs/pycparser/\">Changelog</a> | <a href=\"https://github.com/eliben/pycparser\">Repo</a> \n\n</td>\n\n<tr>\n<td><b>buildbot-www</b></td>\n<td align=\"center\">0.9.9.post1</td>\n<td align=\"center\">»</td>\n<td align=\"center\">0.9.9.post2</td>\n<td>\n <a href=\"https://pypi.python.org/pypi/buildbot-www\">PyPI</a> | <a href=\"http://buildbot.net/\">Homepage</a> \n\n</td>\n\n</tr>\n</table>\n\n\n\n## Changelogs\n\n\n### boto 2.47.0 -> 2.48.0\n\n>### 2.48.0\n\n>============\n\n>:date: 2017/06/29\n\n>This release addresses a few S3 related bugs as well as a bug with the recent\n>endpoint heuristics feature.\n\n>Changes\n>-------\n>* Fix generate_url() AttributeError when using anonymous connections (:issue:`3734`, :sha:`83481807`)\n>* Use RegionInfo by default with heuristics (:issue:`3737`, :sha:`0a9b1140`)\n>* Allow specifying s3 host from boto config file. (:issue:`3738`, :sha:`dcfc7512`)\n\n\n\n\n\n\n\n\n\n\n### botocore 1.5.78 -> 1.5.80\n\n>### 1.5.80\n\n>======\n\n>* api-change:``kinesis``: Update kinesis client to latest version\n>* api-change:``kms``: Update kms client to latest version\n>* api-change:``ds``: Update ds client to latest version\n>* api-change:``ssm``: Update ssm client to latest version\n\n\n\n\n>### 1.5.79\n\n>======\n\n>* api-change:``cloudwatch``: Update cloudwatch client to latest version\n>* api-change:``route53``: Update route53 client to latest version\n\n\n\n\n\n\n\n\n### Pillow 4.2.0 -> 4.2.1\n\n>### 4.2.1\n\n>------------------\n\n>- CI: Fix version specification and test on CI for PyPy/Windows 2608\n> [wiredfool]\n\n\n\n\n\n\n\n### pycparser 2.17 -> 2.18\n\n>### 2.18\n\n\n> - PR 161 & 184: Update bundled PLY version to 3.10\n> - PR 158: Add support for the __int128 type.\n> - PR 169: Handle more tricky TYPEID in declarators.\n> - PR 178: Add columns to the coord of each node\n\n\n\n\n\n\n\n\n\n\nThat's it for now!\n\nHappy merging! \ud83e\udd16\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom twisted.internet import defer\n\nfrom buildbot.data import base\nfrom buildbot.data import types\nfrom buildbot.schedulers import forcesched\nfrom buildbot.www.rest import JSONRPC_CODES\nfrom buildbot.www.rest import BadJsonRpc2\n\n\ndef forceScheduler2Data(sched):\n ret = dict(all_fields=[],\n name=unicode(sched.name),\n button_name=unicode(sched.buttonName),\n label=unicode(sched.label),\n builder_names=map(unicode, sched.builderNames))\n ret[\"all_fields\"] = [field.getSpec() for field in sched.all_fields]\n return ret\n\n\nclass ForceSchedulerEndpoint(base.Endpoint):\n\n isCollection = False\n pathPatterns = \"\"\"\n /forceschedulers/i:schedulername\n \"\"\"\n\n def findForceScheduler(self, schedulername):\n # eventually this may be db backed. This is why the API is async\n for sched in self.master.allSchedulers():\n if sched.name == schedulername and isinstance(sched, forcesched.ForceScheduler):\n return defer.succeed(sched)\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n sched = yield self.findForceScheduler(kwargs['schedulername'])\n if sched is not None:\n defer.returnValue(forceScheduler2Data(sched))\n\n @defer.inlineCallbacks\n def control(self, action, args, kwargs):\n if action == \"force\":\n sched = yield self.findForceScheduler(kwargs['schedulername'])\n if \"owner\" not in args:\n args['owner'] = \"user\"\n try:\n res = yield sched.force(**args)\n defer.returnValue(res)\n except forcesched.CollectedValidationError as e:\n raise BadJsonRpc2(e.errors, JSONRPC_CODES[\"invalid_params\"])\n defer.returnValue(None)\n\n\nclass ForceSchedulersEndpoint(base.Endpoint):\n\n isCollection = True\n pathPatterns = \"\"\"\n /forceschedulers\n /builders/:builderid/forceschedulers\n \"\"\"\n rootLinkName = 'forceschedulers'\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n l = []\n builderid = kwargs.get('builderid', None)\n if builderid is not None:\n bdict = yield self.master.db.builders.getBuilder(builderid)\n for sched in self.master.allSchedulers():\n if isinstance(sched, forcesched.ForceScheduler):\n if builderid is not None and bdict['name'] not in sched.builderNames:\n continue\n l.append(forceScheduler2Data(sched))\n defer.returnValue(l)\n\n\nclass ForceScheduler(base.ResourceType):\n\n name = \"forcescheduler\"\n plural = \"forceschedulers\"\n endpoints = [ForceSchedulerEndpoint, ForceSchedulersEndpoint]\n keyFields = []\n\n class EntityType(types.Entity):\n name = types.Identifier(20)\n button_name = types.String()\n label = types.String()\n builder_names = types.List(of=types.Identifier(20))\n all_fields = types.List(of=types.JsonObject())\n entityType = EntityType(name)\n", "path": "master/buildbot/data/forceschedulers.py"}, {"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom twisted.internet import defer\n\nfrom buildbot.data import base\nfrom buildbot.data import types\n\n\nclass BuilderEndpoint(base.Endpoint):\n\n isCollection = False\n pathPatterns = \"\"\"\n /builders/n:builderid\n /masters/n:masterid/builders/n:builderid\n \"\"\"\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n builderid = kwargs['builderid']\n bdict = yield self.master.db.builders.getBuilder(builderid)\n if not bdict:\n defer.returnValue(None)\n return\n if 'masterid' in kwargs:\n if kwargs['masterid'] not in bdict['masterids']:\n defer.returnValue(None)\n return\n defer.returnValue(\n dict(builderid=builderid,\n name=bdict['name'],\n masterids=bdict['masterids'],\n description=bdict['description'],\n tags=bdict['tags']))\n\n\nclass BuildersEndpoint(base.Endpoint):\n\n isCollection = True\n rootLinkName = 'builders'\n pathPatterns = \"\"\"\n /builders\n /masters/n:masterid/builders\n \"\"\"\n\n @defer.inlineCallbacks\n def get(self, resultSpec, kwargs):\n bdicts = yield self.master.db.builders.getBuilders(\n masterid=kwargs.get('masterid', None))\n defer.returnValue([\n dict(builderid=bd['id'],\n name=bd['name'],\n masterids=bd['masterids'],\n description=bd['description'],\n tags=bd['tags'])\n for bd in bdicts])\n\n\nclass Builder(base.ResourceType):\n\n name = \"builder\"\n plural = \"builders\"\n endpoints = [BuilderEndpoint, BuildersEndpoint]\n keyFields = ['builderid']\n\n class EntityType(types.Entity):\n builderid = types.Integer()\n name = types.Identifier(20)\n masterids = types.List(of=types.Integer())\n description = types.NoneOk(types.String())\n tags = types.List(of=types.String())\n entityType = EntityType(name)\n\n def __init__(self, master):\n base.ResourceType.__init__(self, master)\n\n @base.updateMethod\n def findBuilderId(self, name):\n return self.master.db.builders.findBuilderId(name)\n\n @base.updateMethod\n def updateBuilderInfo(self, builderid, description, tags):\n return self.master.db.builders.updateBuilderInfo(builderid, description, tags)\n\n @base.updateMethod\n @defer.inlineCallbacks\n def updateBuilderList(self, masterid, builderNames):\n # get the \"current\" list of builders for this master, so we know what\n # changes to make. Race conditions here aren't a great worry, as this\n # is the only master inserting or deleting these records.\n builders = yield self.master.db.builders.getBuilders(masterid=masterid)\n\n # figure out what to remove and remove it\n builderNames_set = set(builderNames)\n for bldr in builders:\n if bldr['name'] not in builderNames_set:\n builderid = bldr['id']\n yield self.master.db.builders.removeBuilderMaster(\n masterid=masterid, builderid=builderid)\n self.master.mq.produce(('builders', str(builderid), 'stopped'),\n dict(builderid=builderid, masterid=masterid,\n name=bldr['name']))\n else:\n builderNames_set.remove(bldr['name'])\n\n # now whatever's left in builderNames_set is new\n for name in builderNames_set:\n builderid = yield self.master.db.builders.findBuilderId(name)\n yield self.master.db.builders.addBuilderMaster(\n masterid=masterid, builderid=builderid)\n self.master.mq.produce(('builders', str(builderid), 'started'),\n dict(builderid=builderid, masterid=masterid, name=name))\n\n @defer.inlineCallbacks\n def _masterDeactivated(self, masterid):\n # called from the masters rtype to indicate that the given master is\n # deactivated\n yield self.updateBuilderList(masterid, [])\n", "path": "master/buildbot/data/builders.py"}]} | 4,016 | 265 |
gh_patches_debug_39102 | rasdani/github-patches | git_diff | kedro-org__kedro-3680 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Provide better error message when you try to use a project command from outside a project
> **Note**. This ticket does not handle the question of what happens if you're in a subdirectory of the project. That is done in #1831 . This ticket is just about what happens if you're outside the project altogether (e.g. the level above the project).
When a kedro project command (e.g. `kedro run`) is run from outside a project then currently you get the very uninformative message `run command is not found`. We should change this so that it gives a much clearer message that you are outside a kedro project and should `cd` into the project before executing the command.
This has been started in #1720 @noklam . You could either continue working on that PR or start a new one. Relevant modifications to make:
* since we're not interested in iterating upwards through parent directories here, the existing upwards iteration using `find_project_root` is no longer relevant. That leaves open the question of whether and how we might suggest what the correct kedro project directory is. It's definitely a bad idea to iterate _downwards_ through directory structure arbitrarily deep (could take a long time). We could go one directory deep so long as it's not too expensive an operation (maybe need to check there aren't too many folders in the current directory first? Needs some quick time profiling to see). Or just not make any suggestion as to the right `cd` command.
* if we're totally outside a kedro project, a helpful error message is great but:
* is there some better way to check if the command is a project one without overriding `get_command`, e.g. check if it's in `self.project_groups`?
* I don't think this formatting is "on brand" yet since we haven't moved the rest of the CLI to rich yet. Let's just do it as `click.secho` for now. Doing rich styling for CLI messages is a separate issue for another time (e.g. we might move to https://github.com/ewels/rich-click)
</issue>
<code>
[start of kedro/framework/cli/__init__.py]
1 """``kedro.framework.cli`` implements commands available from Kedro's CLI.
2 """
3
4 from .cli import main
5 from .utils import command_with_verbosity, load_entry_points
6
7 __all__ = ["main", "command_with_verbosity", "load_entry_points"]
8
[end of kedro/framework/cli/__init__.py]
[start of kedro/framework/cli/cli.py]
1 """kedro is a CLI for managing Kedro projects.
2
3 This module implements commands available from the kedro CLI.
4 """
5 from __future__ import annotations
6
7 import importlib
8 import sys
9 from collections import defaultdict
10 from pathlib import Path
11 from typing import Any, Sequence
12
13 import click
14
15 from kedro import __version__ as version
16 from kedro.framework.cli.catalog import catalog_cli
17 from kedro.framework.cli.hooks import get_cli_hook_manager
18 from kedro.framework.cli.jupyter import jupyter_cli
19 from kedro.framework.cli.micropkg import micropkg_cli
20 from kedro.framework.cli.pipeline import pipeline_cli
21 from kedro.framework.cli.project import project_group
22 from kedro.framework.cli.registry import registry_cli
23 from kedro.framework.cli.starters import create_cli
24 from kedro.framework.cli.utils import (
25 CONTEXT_SETTINGS,
26 ENTRY_POINT_GROUPS,
27 CommandCollection,
28 KedroCliError,
29 _get_entry_points,
30 load_entry_points,
31 )
32 from kedro.framework.project import LOGGING # noqa: F401
33 from kedro.framework.startup import _is_project, bootstrap_project
34
35 LOGO = rf"""
36 _ _
37 | | _____ __| |_ __ ___
38 | |/ / _ \/ _` | '__/ _ \
39 | < __/ (_| | | | (_) |
40 |_|\_\___|\__,_|_| \___/
41 v{version}
42 """
43
44
45 @click.group(context_settings=CONTEXT_SETTINGS, name="Kedro")
46 @click.version_option(version, "--version", "-V", help="Show version and exit")
47 def cli() -> None: # pragma: no cover
48 """Kedro is a CLI for creating and using Kedro projects. For more
49 information, type ``kedro info``.
50
51 """
52 pass
53
54
55 @cli.command()
56 def info() -> None:
57 """Get more information about kedro."""
58 click.secho(LOGO, fg="green")
59 click.echo(
60 "Kedro is a Python framework for\n"
61 "creating reproducible, maintainable\n"
62 "and modular data science code."
63 )
64
65 plugin_versions = {}
66 plugin_entry_points = defaultdict(set)
67 for plugin_entry_point in ENTRY_POINT_GROUPS:
68 for entry_point in _get_entry_points(plugin_entry_point):
69 module_name = entry_point.module.split(".")[0]
70 plugin_versions[module_name] = entry_point.dist.version
71 plugin_entry_points[module_name].add(plugin_entry_point)
72
73 click.echo()
74 if plugin_versions:
75 click.echo("Installed plugins:")
76 for plugin_name, plugin_version in sorted(plugin_versions.items()):
77 entrypoints_str = ",".join(sorted(plugin_entry_points[plugin_name]))
78 click.echo(
79 f"{plugin_name}: {plugin_version} (entry points:{entrypoints_str})"
80 )
81 else:
82 click.echo("No plugins installed")
83
84
85 def _init_plugins() -> None:
86 init_hooks = load_entry_points("init")
87 for init_hook in init_hooks:
88 init_hook()
89
90
91 class KedroCLI(CommandCollection):
92 """A CommandCollection class to encapsulate the KedroCLI command
93 loading.
94 """
95
96 def __init__(self, project_path: Path):
97 self._metadata = None # running in package mode
98 if _is_project(project_path):
99 self._metadata = bootstrap_project(project_path)
100 self._cli_hook_manager = get_cli_hook_manager()
101
102 super().__init__(
103 ("Global commands", self.global_groups),
104 ("Project specific commands", self.project_groups),
105 )
106
107 def main(
108 self,
109 args: Any | None = None,
110 prog_name: Any | None = None,
111 complete_var: Any | None = None,
112 standalone_mode: bool = True,
113 **extra: Any,
114 ) -> Any:
115 if self._metadata:
116 extra.update(obj=self._metadata)
117
118 # This is how click's internals parse sys.argv, which include the command,
119 # subcommand, arguments and options. click doesn't store this information anywhere
120 # so we have to re-do it.
121 args = sys.argv[1:] if args is None else list(args)
122 self._cli_hook_manager.hook.before_command_run(
123 project_metadata=self._metadata, command_args=args
124 )
125
126 try:
127 super().main(
128 args=args,
129 prog_name=prog_name,
130 complete_var=complete_var,
131 standalone_mode=standalone_mode,
132 **extra,
133 )
134 # click.core.main() method exits by default, we capture this and then
135 # exit as originally intended
136 except SystemExit as exc:
137 self._cli_hook_manager.hook.after_command_run(
138 project_metadata=self._metadata, command_args=args, exit_code=exc.code
139 )
140 sys.exit(exc.code)
141
142 @property
143 def global_groups(self) -> Sequence[click.MultiCommand]:
144 """Property which loads all global command groups from plugins and
145 combines them with the built-in ones (eventually overriding the
146 built-in ones if they are redefined by plugins).
147 """
148 return [cli, create_cli, *load_entry_points("global")]
149
150 @property
151 def project_groups(self) -> Sequence[click.MultiCommand]:
152 """Property which loads all project command groups from the
153 project and the plugins, then combines them with the built-in ones.
154 Built-in commands can be overridden by plugins, which can be
155 overridden by a custom project cli.py.
156 See https://kedro.readthedocs.io/en/stable/extend_kedro/common_use_cases.html#use-case-3-how-to-add-or-modify-cli-commands
157 on how to add this.
158 """
159 if not self._metadata:
160 return []
161
162 built_in = [
163 catalog_cli,
164 jupyter_cli,
165 pipeline_cli,
166 micropkg_cli,
167 project_group,
168 registry_cli,
169 ]
170
171 plugins = load_entry_points("project")
172
173 try:
174 project_cli = importlib.import_module(f"{self._metadata.package_name}.cli")
175 # fail gracefully if cli.py does not exist
176 except ModuleNotFoundError:
177 # return only built-in commands and commands from plugins
178 # (plugins can override built-in commands)
179 return [*built_in, *plugins]
180
181 # fail badly if cli.py exists, but has no `cli` in it
182 if not hasattr(project_cli, "cli"):
183 raise KedroCliError(
184 f"Cannot load commands from {self._metadata.package_name}.cli"
185 )
186 user_defined = project_cli.cli
187 # return built-in commands, plugin commands and user defined commands
188 # (overriding happens as follows built-in < plugins < cli.py)
189 return [*built_in, *plugins, user_defined]
190
191
192 def main() -> None: # pragma: no cover
193 """Main entry point. Look for a ``cli.py``, and, if found, add its
194 commands to `kedro`'s before invoking the CLI.
195 """
196 _init_plugins()
197 cli_collection = KedroCLI(project_path=Path.cwd())
198 cli_collection()
199
[end of kedro/framework/cli/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kedro/framework/cli/__init__.py b/kedro/framework/cli/__init__.py
--- a/kedro/framework/cli/__init__.py
+++ b/kedro/framework/cli/__init__.py
@@ -1,7 +1,11 @@
"""``kedro.framework.cli`` implements commands available from Kedro's CLI.
"""
-from .cli import main
-from .utils import command_with_verbosity, load_entry_points
+# The constant need to be defined first otherwise it causes circular depdencies
+ORANGE = (255, 175, 0)
+BRIGHT_BLACK = (128, 128, 128)
+
+from .cli import main # noqa: E402
+from .utils import command_with_verbosity, load_entry_points # noqa: E402
__all__ = ["main", "command_with_verbosity", "load_entry_points"]
diff --git a/kedro/framework/cli/cli.py b/kedro/framework/cli/cli.py
--- a/kedro/framework/cli/cli.py
+++ b/kedro/framework/cli/cli.py
@@ -6,6 +6,7 @@
import importlib
import sys
+import traceback
from collections import defaultdict
from pathlib import Path
from typing import Any, Sequence
@@ -13,6 +14,7 @@
import click
from kedro import __version__ as version
+from kedro.framework.cli import BRIGHT_BLACK, ORANGE
from kedro.framework.cli.catalog import catalog_cli
from kedro.framework.cli.hooks import get_cli_hook_manager
from kedro.framework.cli.jupyter import jupyter_cli
@@ -133,10 +135,40 @@
)
# click.core.main() method exits by default, we capture this and then
# exit as originally intended
+
except SystemExit as exc:
self._cli_hook_manager.hook.after_command_run(
project_metadata=self._metadata, command_args=args, exit_code=exc.code
)
+ # When CLI is run outside of a project, project_groups are not registered
+ catch_exception = "click.exceptions.UsageError: No such command"
+ # click convert exception handles to error message
+ if catch_exception in traceback.format_exc() and not self.project_groups:
+ warn = click.style(
+ "\nKedro project not found in this directory. ",
+ fg=ORANGE,
+ bold=True,
+ )
+ result = (
+ click.style("Project specific commands such as ")
+ + click.style("'run' ", fg="cyan")
+ + "or "
+ + click.style("'jupyter' ", fg="cyan")
+ + "are only available within a project directory."
+ )
+ message = warn + result
+ hint = (
+ click.style(
+ "\nHint: Kedro is looking for a file called ", fg=BRIGHT_BLACK
+ )
+ + click.style("'pyproject.toml", fg="magenta")
+ + click.style(
+ ", is one present in your current working directory?",
+ fg=BRIGHT_BLACK,
+ )
+ )
+ click.echo(message)
+ click.echo(hint)
sys.exit(exc.code)
@property
| {"golden_diff": "diff --git a/kedro/framework/cli/__init__.py b/kedro/framework/cli/__init__.py\n--- a/kedro/framework/cli/__init__.py\n+++ b/kedro/framework/cli/__init__.py\n@@ -1,7 +1,11 @@\n \"\"\"``kedro.framework.cli`` implements commands available from Kedro's CLI.\n \"\"\"\n \n-from .cli import main\n-from .utils import command_with_verbosity, load_entry_points\n+# The constant need to be defined first otherwise it causes circular depdencies\n+ORANGE = (255, 175, 0)\n+BRIGHT_BLACK = (128, 128, 128)\n+\n+from .cli import main # noqa: E402\n+from .utils import command_with_verbosity, load_entry_points # noqa: E402\n \n __all__ = [\"main\", \"command_with_verbosity\", \"load_entry_points\"]\ndiff --git a/kedro/framework/cli/cli.py b/kedro/framework/cli/cli.py\n--- a/kedro/framework/cli/cli.py\n+++ b/kedro/framework/cli/cli.py\n@@ -6,6 +6,7 @@\n \n import importlib\n import sys\n+import traceback\n from collections import defaultdict\n from pathlib import Path\n from typing import Any, Sequence\n@@ -13,6 +14,7 @@\n import click\n \n from kedro import __version__ as version\n+from kedro.framework.cli import BRIGHT_BLACK, ORANGE\n from kedro.framework.cli.catalog import catalog_cli\n from kedro.framework.cli.hooks import get_cli_hook_manager\n from kedro.framework.cli.jupyter import jupyter_cli\n@@ -133,10 +135,40 @@\n )\n # click.core.main() method exits by default, we capture this and then\n # exit as originally intended\n+\n except SystemExit as exc:\n self._cli_hook_manager.hook.after_command_run(\n project_metadata=self._metadata, command_args=args, exit_code=exc.code\n )\n+ # When CLI is run outside of a project, project_groups are not registered\n+ catch_exception = \"click.exceptions.UsageError: No such command\"\n+ # click convert exception handles to error message\n+ if catch_exception in traceback.format_exc() and not self.project_groups:\n+ warn = click.style(\n+ \"\\nKedro project not found in this directory. \",\n+ fg=ORANGE,\n+ bold=True,\n+ )\n+ result = (\n+ click.style(\"Project specific commands such as \")\n+ + click.style(\"'run' \", fg=\"cyan\")\n+ + \"or \"\n+ + click.style(\"'jupyter' \", fg=\"cyan\")\n+ + \"are only available within a project directory.\"\n+ )\n+ message = warn + result\n+ hint = (\n+ click.style(\n+ \"\\nHint: Kedro is looking for a file called \", fg=BRIGHT_BLACK\n+ )\n+ + click.style(\"'pyproject.toml\", fg=\"magenta\")\n+ + click.style(\n+ \", is one present in your current working directory?\",\n+ fg=BRIGHT_BLACK,\n+ )\n+ )\n+ click.echo(message)\n+ click.echo(hint)\n sys.exit(exc.code)\n \n @property\n", "issue": "Provide better error message when you try to use a project command from outside a project\n> **Note**. This ticket does not handle the question of what happens if you're in a subdirectory of the project. That is done in #1831 . This ticket is just about what happens if you're outside the project altogether (e.g. the level above the project).\r\n\r\nWhen a kedro project command (e.g. `kedro run`) is run from outside a project then currently you get the very uninformative message `run command is not found`. We should change this so that it gives a much clearer message that you are outside a kedro project and should `cd` into the project before executing the command.\r\n\r\nThis has been started in #1720 @noklam . You could either continue working on that PR or start a new one. Relevant modifications to make:\r\n* since we're not interested in iterating upwards through parent directories here, the existing upwards iteration using `find_project_root` is no longer relevant. That leaves open the question of whether and how we might suggest what the correct kedro project directory is. It's definitely a bad idea to iterate _downwards_ through directory structure arbitrarily deep (could take a long time). We could go one directory deep so long as it's not too expensive an operation (maybe need to check there aren't too many folders in the current directory first? Needs some quick time profiling to see). Or just not make any suggestion as to the right `cd` command. \r\n* if we're totally outside a kedro project, a helpful error message is great but:\r\n * is there some better way to check if the command is a project one without overriding `get_command`, e.g. check if it's in `self.project_groups`?\r\n * I don't think this formatting is \"on brand\" yet since we haven't moved the rest of the CLI to rich yet. Let's just do it as `click.secho` for now. Doing rich styling for CLI messages is a separate issue for another time (e.g. we might move to https://github.com/ewels/rich-click)\n", "before_files": [{"content": "\"\"\"``kedro.framework.cli`` implements commands available from Kedro's CLI.\n\"\"\"\n\nfrom .cli import main\nfrom .utils import command_with_verbosity, load_entry_points\n\n__all__ = [\"main\", \"command_with_verbosity\", \"load_entry_points\"]\n", "path": "kedro/framework/cli/__init__.py"}, {"content": "\"\"\"kedro is a CLI for managing Kedro projects.\n\nThis module implements commands available from the kedro CLI.\n\"\"\"\nfrom __future__ import annotations\n\nimport importlib\nimport sys\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Any, Sequence\n\nimport click\n\nfrom kedro import __version__ as version\nfrom kedro.framework.cli.catalog import catalog_cli\nfrom kedro.framework.cli.hooks import get_cli_hook_manager\nfrom kedro.framework.cli.jupyter import jupyter_cli\nfrom kedro.framework.cli.micropkg import micropkg_cli\nfrom kedro.framework.cli.pipeline import pipeline_cli\nfrom kedro.framework.cli.project import project_group\nfrom kedro.framework.cli.registry import registry_cli\nfrom kedro.framework.cli.starters import create_cli\nfrom kedro.framework.cli.utils import (\n CONTEXT_SETTINGS,\n ENTRY_POINT_GROUPS,\n CommandCollection,\n KedroCliError,\n _get_entry_points,\n load_entry_points,\n)\nfrom kedro.framework.project import LOGGING # noqa: F401\nfrom kedro.framework.startup import _is_project, bootstrap_project\n\nLOGO = rf\"\"\"\n _ _\n| | _____ __| |_ __ ___\n| |/ / _ \\/ _` | '__/ _ \\\n| < __/ (_| | | | (_) |\n|_|\\_\\___|\\__,_|_| \\___/\nv{version}\n\"\"\"\n\n\[email protected](context_settings=CONTEXT_SETTINGS, name=\"Kedro\")\[email protected]_option(version, \"--version\", \"-V\", help=\"Show version and exit\")\ndef cli() -> None: # pragma: no cover\n \"\"\"Kedro is a CLI for creating and using Kedro projects. For more\n information, type ``kedro info``.\n\n \"\"\"\n pass\n\n\[email protected]()\ndef info() -> None:\n \"\"\"Get more information about kedro.\"\"\"\n click.secho(LOGO, fg=\"green\")\n click.echo(\n \"Kedro is a Python framework for\\n\"\n \"creating reproducible, maintainable\\n\"\n \"and modular data science code.\"\n )\n\n plugin_versions = {}\n plugin_entry_points = defaultdict(set)\n for plugin_entry_point in ENTRY_POINT_GROUPS:\n for entry_point in _get_entry_points(plugin_entry_point):\n module_name = entry_point.module.split(\".\")[0]\n plugin_versions[module_name] = entry_point.dist.version\n plugin_entry_points[module_name].add(plugin_entry_point)\n\n click.echo()\n if plugin_versions:\n click.echo(\"Installed plugins:\")\n for plugin_name, plugin_version in sorted(plugin_versions.items()):\n entrypoints_str = \",\".join(sorted(plugin_entry_points[plugin_name]))\n click.echo(\n f\"{plugin_name}: {plugin_version} (entry points:{entrypoints_str})\"\n )\n else:\n click.echo(\"No plugins installed\")\n\n\ndef _init_plugins() -> None:\n init_hooks = load_entry_points(\"init\")\n for init_hook in init_hooks:\n init_hook()\n\n\nclass KedroCLI(CommandCollection):\n \"\"\"A CommandCollection class to encapsulate the KedroCLI command\n loading.\n \"\"\"\n\n def __init__(self, project_path: Path):\n self._metadata = None # running in package mode\n if _is_project(project_path):\n self._metadata = bootstrap_project(project_path)\n self._cli_hook_manager = get_cli_hook_manager()\n\n super().__init__(\n (\"Global commands\", self.global_groups),\n (\"Project specific commands\", self.project_groups),\n )\n\n def main(\n self,\n args: Any | None = None,\n prog_name: Any | None = None,\n complete_var: Any | None = None,\n standalone_mode: bool = True,\n **extra: Any,\n ) -> Any:\n if self._metadata:\n extra.update(obj=self._metadata)\n\n # This is how click's internals parse sys.argv, which include the command,\n # subcommand, arguments and options. click doesn't store this information anywhere\n # so we have to re-do it.\n args = sys.argv[1:] if args is None else list(args)\n self._cli_hook_manager.hook.before_command_run(\n project_metadata=self._metadata, command_args=args\n )\n\n try:\n super().main(\n args=args,\n prog_name=prog_name,\n complete_var=complete_var,\n standalone_mode=standalone_mode,\n **extra,\n )\n # click.core.main() method exits by default, we capture this and then\n # exit as originally intended\n except SystemExit as exc:\n self._cli_hook_manager.hook.after_command_run(\n project_metadata=self._metadata, command_args=args, exit_code=exc.code\n )\n sys.exit(exc.code)\n\n @property\n def global_groups(self) -> Sequence[click.MultiCommand]:\n \"\"\"Property which loads all global command groups from plugins and\n combines them with the built-in ones (eventually overriding the\n built-in ones if they are redefined by plugins).\n \"\"\"\n return [cli, create_cli, *load_entry_points(\"global\")]\n\n @property\n def project_groups(self) -> Sequence[click.MultiCommand]:\n \"\"\"Property which loads all project command groups from the\n project and the plugins, then combines them with the built-in ones.\n Built-in commands can be overridden by plugins, which can be\n overridden by a custom project cli.py.\n See https://kedro.readthedocs.io/en/stable/extend_kedro/common_use_cases.html#use-case-3-how-to-add-or-modify-cli-commands\n on how to add this.\n \"\"\"\n if not self._metadata:\n return []\n\n built_in = [\n catalog_cli,\n jupyter_cli,\n pipeline_cli,\n micropkg_cli,\n project_group,\n registry_cli,\n ]\n\n plugins = load_entry_points(\"project\")\n\n try:\n project_cli = importlib.import_module(f\"{self._metadata.package_name}.cli\")\n # fail gracefully if cli.py does not exist\n except ModuleNotFoundError:\n # return only built-in commands and commands from plugins\n # (plugins can override built-in commands)\n return [*built_in, *plugins]\n\n # fail badly if cli.py exists, but has no `cli` in it\n if not hasattr(project_cli, \"cli\"):\n raise KedroCliError(\n f\"Cannot load commands from {self._metadata.package_name}.cli\"\n )\n user_defined = project_cli.cli\n # return built-in commands, plugin commands and user defined commands\n # (overriding happens as follows built-in < plugins < cli.py)\n return [*built_in, *plugins, user_defined]\n\n\ndef main() -> None: # pragma: no cover\n \"\"\"Main entry point. Look for a ``cli.py``, and, if found, add its\n commands to `kedro`'s before invoking the CLI.\n \"\"\"\n _init_plugins()\n cli_collection = KedroCLI(project_path=Path.cwd())\n cli_collection()\n", "path": "kedro/framework/cli/cli.py"}]} | 3,066 | 713 |
gh_patches_debug_25531 | rasdani/github-patches | git_diff | scikit-hep__awkward-2387 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ak.with_field is has an overzealous check for excluding RecordArrays
### Version of Awkward Array
HEAD
### Description and code to reproduce
One should be able to add new fields to a `RecordArray` with no fields:
```python
>>> no_fields = ak.Array([{}, {}, {}, {}, {}])
>>> no_fields["new_field"] = ak.Array([1, 2, 3, 4, 5])
```
but there's an error:
```
ValueError: no tuples or records in array; cannot add a new field
This error occurred while calling
ak.Array.__setitem__(
self = <Array [{}, {}, {}, {}, {}] type='5 * {}'>
field_name = 'new_field'
field_value = <Array [1, 2, 3, 4, 5] type='5 * int64'>
)
```
This happens because
https://github.com/scikit-hep/awkward/blob/be60b5330f7f872fd01b34c44d2686eeafa9bbfc/src/awkward/operations/ak_with_field.py#L86-L87
is protecting the subsequent code from having to check for the case that `base` doesn't even have a `RecordArray`, but `base.fields == []` isn't the way to do it: you can also get that from a `RecordArray` with no fields.
Instead, that guard needs to check for the existence of a `RecordArray` some other way. Perhaps the following?
```python
try:
ak._do.recursively_apply(no_fields.layout, lambda x, **kwargs: None, allow_records=False)
except ValueError:
pass
else:
raise ValueError("no tuples or records in array; cannot add a new field") from None
```
I'm not entirely happy about repurposing an anti-guard for one function as a guard for another. Also, it should complain if any of the branches of a `UnionArray` are not `RecordArrays`.
</issue>
<code>
[start of src/awkward/operations/ak_with_field.py]
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2 __all__ = ("with_field",)
3 import copy
4
5 import awkward as ak
6 from awkward._behavior import behavior_of
7 from awkward._layout import wrap_layout
8 from awkward._nplikes.numpylike import NumpyMetadata
9 from awkward._regularize import is_non_string_like_sequence
10
11 np = NumpyMetadata.instance()
12
13
14 def with_field(array, what, where=None, *, highlevel=True, behavior=None):
15 """
16 Args:
17 array: Array-like data (anything #ak.to_layout recognizes).
18 what: Array-like data (anything #ak.to_layout recognizes) to add as a new field.
19 where (None or str or non-empy sequence of str): If None, the new field
20 has no name (can be accessed as an integer slot number in a
21 string); If str, the name of the new field. If a sequence, it is
22 interpreted as a path where to add the field in a nested record.
23 highlevel (bool): If True, return an #ak.Array; otherwise, return
24 a low-level #ak.contents.Content subclass.
25 behavior (None or dict): Custom #ak.behavior for the output array, if
26 high-level.
27
28 Returns an #ak.Array or #ak.Record (or low-level equivalent, if
29 `highlevel=False`) with a new field attached. This function does not
30 change the array in-place.
31
32 See #ak.Array.__setitem__ and #ak.Record.__setitem__ for a variant that
33 changes the high-level object in-place. (These methods internally use
34 #ak.with_field, so performance is not a factor in choosing one over the
35 other.)
36 """
37 with ak._errors.OperationErrorContext(
38 "ak.with_field",
39 {
40 "array": array,
41 "what": what,
42 "where": where,
43 "highlevel": highlevel,
44 "behavior": behavior,
45 },
46 ):
47 return _impl(array, what, where, highlevel, behavior)
48
49
50 def _impl(base, what, where, highlevel, behavior):
51 if not (
52 where is None
53 or isinstance(where, str)
54 or (
55 is_non_string_like_sequence(where)
56 and all(isinstance(x, str) for x in where)
57 )
58 ):
59 raise TypeError(
60 "New fields may only be assigned by field name(s) "
61 "or as a new integer slot by passing None for 'where'"
62 )
63
64 if is_non_string_like_sequence(where) and len(where) > 1:
65 return _impl(
66 base,
67 _impl(
68 base[where[0]],
69 what,
70 where[1:],
71 highlevel,
72 behavior,
73 ),
74 where[0],
75 highlevel,
76 behavior,
77 )
78 else:
79 # If we have an iterable here, pull out the only ti
80 if is_non_string_like_sequence(where):
81 where = where[0]
82
83 behavior = behavior_of(base, what, behavior=behavior)
84 base = ak.operations.to_layout(base, allow_record=True, allow_other=False)
85
86 if len(base.fields) == 0:
87 raise ValueError("no tuples or records in array; cannot add a new field")
88
89 what = ak.operations.to_layout(what, allow_record=True, allow_other=True)
90
91 keys = copy.copy(base.fields)
92 if where in base.fields:
93 keys.remove(where)
94
95 def action(inputs, **kwargs):
96 base, what = inputs
97 backend = base.backend
98
99 if isinstance(base, ak.contents.RecordArray):
100 if what is None:
101 what = ak.contents.IndexedOptionArray(
102 ak.index.Index64(
103 backend.index_nplike.full(len(base), -1, dtype=np.int64),
104 nplike=backend.index_nplike,
105 ),
106 ak.contents.EmptyArray(),
107 )
108 elif not isinstance(what, ak.contents.Content):
109 what = ak.contents.NumpyArray(
110 backend.nplike.repeat(what, len(base))
111 )
112 if base.is_tuple:
113 # Preserve tuple-ness
114 if where is None:
115 fields = None
116 # Otherwise the tuple becomes a record
117 else:
118 fields = [*keys, where]
119 # Records with `where=None` will create a tuple-like key
120 elif where is None:
121 fields = [*keys, str(len(keys))]
122 else:
123 fields = [*keys, where]
124 out = ak.contents.RecordArray(
125 [base[k] for k in keys] + [what],
126 fields,
127 parameters=base.parameters,
128 )
129 return (out,)
130 else:
131 return None
132
133 out = ak._broadcasting.broadcast_and_apply(
134 [base, what],
135 action,
136 behavior,
137 right_broadcast=False,
138 )
139
140 assert isinstance(out, tuple) and len(out) == 1
141
142 return wrap_layout(out[0], behavior, highlevel)
143
[end of src/awkward/operations/ak_with_field.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/awkward/operations/ak_with_field.py b/src/awkward/operations/ak_with_field.py
--- a/src/awkward/operations/ak_with_field.py
+++ b/src/awkward/operations/ak_with_field.py
@@ -82,16 +82,33 @@
behavior = behavior_of(base, what, behavior=behavior)
base = ak.operations.to_layout(base, allow_record=True, allow_other=False)
-
- if len(base.fields) == 0:
- raise ValueError("no tuples or records in array; cannot add a new field")
-
what = ak.operations.to_layout(what, allow_record=True, allow_other=True)
keys = copy.copy(base.fields)
if where in base.fields:
keys.remove(where)
+ def purelist_is_record(layout):
+ result = False
+
+ def action_is_record(input, **kwargs):
+ nonlocal result
+
+ if input.is_record:
+ result = True
+ return input
+ elif input.is_union:
+ result = all(purelist_is_record(x) for x in input.contents)
+ return input
+ else:
+ return None
+
+ ak._do.recursively_apply(layout, action_is_record, return_array=False)
+ return result
+
+ if not purelist_is_record(base):
+ raise ValueError("no tuples or records in array; cannot add a new field")
+
def action(inputs, **kwargs):
base, what = inputs
backend = base.backend
| {"golden_diff": "diff --git a/src/awkward/operations/ak_with_field.py b/src/awkward/operations/ak_with_field.py\n--- a/src/awkward/operations/ak_with_field.py\n+++ b/src/awkward/operations/ak_with_field.py\n@@ -82,16 +82,33 @@\n \n behavior = behavior_of(base, what, behavior=behavior)\n base = ak.operations.to_layout(base, allow_record=True, allow_other=False)\n-\n- if len(base.fields) == 0:\n- raise ValueError(\"no tuples or records in array; cannot add a new field\")\n-\n what = ak.operations.to_layout(what, allow_record=True, allow_other=True)\n \n keys = copy.copy(base.fields)\n if where in base.fields:\n keys.remove(where)\n \n+ def purelist_is_record(layout):\n+ result = False\n+\n+ def action_is_record(input, **kwargs):\n+ nonlocal result\n+\n+ if input.is_record:\n+ result = True\n+ return input\n+ elif input.is_union:\n+ result = all(purelist_is_record(x) for x in input.contents)\n+ return input\n+ else:\n+ return None\n+\n+ ak._do.recursively_apply(layout, action_is_record, return_array=False)\n+ return result\n+\n+ if not purelist_is_record(base):\n+ raise ValueError(\"no tuples or records in array; cannot add a new field\")\n+\n def action(inputs, **kwargs):\n base, what = inputs\n backend = base.backend\n", "issue": "ak.with_field is has an overzealous check for excluding RecordArrays\n### Version of Awkward Array\n\nHEAD\n\n### Description and code to reproduce\n\nOne should be able to add new fields to a `RecordArray` with no fields:\r\n\r\n```python\r\n>>> no_fields = ak.Array([{}, {}, {}, {}, {}])\r\n>>> no_fields[\"new_field\"] = ak.Array([1, 2, 3, 4, 5])\r\n```\r\n\r\nbut there's an error:\r\n\r\n```\r\nValueError: no tuples or records in array; cannot add a new field\r\n\r\nThis error occurred while calling\r\n\r\n ak.Array.__setitem__(\r\n self = <Array [{}, {}, {}, {}, {}] type='5 * {}'>\r\n field_name = 'new_field'\r\n field_value = <Array [1, 2, 3, 4, 5] type='5 * int64'>\r\n )\r\n```\r\n\r\nThis happens because\r\n\r\nhttps://github.com/scikit-hep/awkward/blob/be60b5330f7f872fd01b34c44d2686eeafa9bbfc/src/awkward/operations/ak_with_field.py#L86-L87\r\n\r\nis protecting the subsequent code from having to check for the case that `base` doesn't even have a `RecordArray`, but `base.fields == []` isn't the way to do it: you can also get that from a `RecordArray` with no fields.\r\n\r\nInstead, that guard needs to check for the existence of a `RecordArray` some other way. Perhaps the following?\r\n\r\n```python\r\ntry:\r\n ak._do.recursively_apply(no_fields.layout, lambda x, **kwargs: None, allow_records=False)\r\nexcept ValueError:\r\n pass\r\nelse:\r\n raise ValueError(\"no tuples or records in array; cannot add a new field\") from None\r\n```\r\n\r\nI'm not entirely happy about repurposing an anti-guard for one function as a guard for another. Also, it should complain if any of the branches of a `UnionArray` are not `RecordArrays`.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n__all__ = (\"with_field\",)\nimport copy\n\nimport awkward as ak\nfrom awkward._behavior import behavior_of\nfrom awkward._layout import wrap_layout\nfrom awkward._nplikes.numpylike import NumpyMetadata\nfrom awkward._regularize import is_non_string_like_sequence\n\nnp = NumpyMetadata.instance()\n\n\ndef with_field(array, what, where=None, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n what: Array-like data (anything #ak.to_layout recognizes) to add as a new field.\n where (None or str or non-empy sequence of str): If None, the new field\n has no name (can be accessed as an integer slot number in a\n string); If str, the name of the new field. If a sequence, it is\n interpreted as a path where to add the field in a nested record.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Returns an #ak.Array or #ak.Record (or low-level equivalent, if\n `highlevel=False`) with a new field attached. This function does not\n change the array in-place.\n\n See #ak.Array.__setitem__ and #ak.Record.__setitem__ for a variant that\n changes the high-level object in-place. (These methods internally use\n #ak.with_field, so performance is not a factor in choosing one over the\n other.)\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.with_field\",\n {\n \"array\": array,\n \"what\": what,\n \"where\": where,\n \"highlevel\": highlevel,\n \"behavior\": behavior,\n },\n ):\n return _impl(array, what, where, highlevel, behavior)\n\n\ndef _impl(base, what, where, highlevel, behavior):\n if not (\n where is None\n or isinstance(where, str)\n or (\n is_non_string_like_sequence(where)\n and all(isinstance(x, str) for x in where)\n )\n ):\n raise TypeError(\n \"New fields may only be assigned by field name(s) \"\n \"or as a new integer slot by passing None for 'where'\"\n )\n\n if is_non_string_like_sequence(where) and len(where) > 1:\n return _impl(\n base,\n _impl(\n base[where[0]],\n what,\n where[1:],\n highlevel,\n behavior,\n ),\n where[0],\n highlevel,\n behavior,\n )\n else:\n # If we have an iterable here, pull out the only ti\n if is_non_string_like_sequence(where):\n where = where[0]\n\n behavior = behavior_of(base, what, behavior=behavior)\n base = ak.operations.to_layout(base, allow_record=True, allow_other=False)\n\n if len(base.fields) == 0:\n raise ValueError(\"no tuples or records in array; cannot add a new field\")\n\n what = ak.operations.to_layout(what, allow_record=True, allow_other=True)\n\n keys = copy.copy(base.fields)\n if where in base.fields:\n keys.remove(where)\n\n def action(inputs, **kwargs):\n base, what = inputs\n backend = base.backend\n\n if isinstance(base, ak.contents.RecordArray):\n if what is None:\n what = ak.contents.IndexedOptionArray(\n ak.index.Index64(\n backend.index_nplike.full(len(base), -1, dtype=np.int64),\n nplike=backend.index_nplike,\n ),\n ak.contents.EmptyArray(),\n )\n elif not isinstance(what, ak.contents.Content):\n what = ak.contents.NumpyArray(\n backend.nplike.repeat(what, len(base))\n )\n if base.is_tuple:\n # Preserve tuple-ness\n if where is None:\n fields = None\n # Otherwise the tuple becomes a record\n else:\n fields = [*keys, where]\n # Records with `where=None` will create a tuple-like key\n elif where is None:\n fields = [*keys, str(len(keys))]\n else:\n fields = [*keys, where]\n out = ak.contents.RecordArray(\n [base[k] for k in keys] + [what],\n fields,\n parameters=base.parameters,\n )\n return (out,)\n else:\n return None\n\n out = ak._broadcasting.broadcast_and_apply(\n [base, what],\n action,\n behavior,\n right_broadcast=False,\n )\n\n assert isinstance(out, tuple) and len(out) == 1\n\n return wrap_layout(out[0], behavior, highlevel)\n", "path": "src/awkward/operations/ak_with_field.py"}]} | 2,377 | 338 |
gh_patches_debug_33253 | rasdani/github-patches | git_diff | angr__angr-3184 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
An error occurred while analysis the CFG of the mips program.
An error occurs when I use angr to analyze the CFG of the mips program (net-cgi).
The error message is shown below.
> File "/home/user/PycharmProjects/pythonProject/venv/lib/python3.8/site-packages/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py", line 106, in _resolve
gp_used = self._is_gp_used_on_slice(project, b)
File "/home/user/PycharmProjects/pythonProject/venv/lib/python3.8/site-packages/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py", line 183, in _is_gp_used_on_slice
stmt = block.vex.statements[block_stmt_idx]
IndexError: list index out of range
The code is shown below.
```
import angr
proj = angr.Project(net-cgi, auto_load_libs=False)
cfg = proj.analyses.CFG()
```
The net-cgi can be found in [net-cgi.zip](https://github.com/angr/angr/files/8160732/net-cgi.zip)
</issue>
<code>
[start of angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py]
1 from typing import Dict, TYPE_CHECKING
2 import logging
3
4 import pyvex
5 import archinfo
6
7
8 from .... import options, BP_BEFORE
9 from ....blade import Blade
10 from ....annocfg import AnnotatedCFG
11 from ....exploration_techniques import Slicecutor
12
13 from .resolver import IndirectJumpResolver
14
15 if TYPE_CHECKING:
16 from angr.block import Block
17
18
19 l = logging.getLogger(name=__name__)
20
21
22 class OverwriteTmpValueCallback:
23 """
24 Overwrites temporary values during resolution
25 """
26 def __init__(self, gp_value):
27 self.gp_value = gp_value
28
29 def overwrite_tmp_value(self, state):
30 state.inspect.tmp_write_expr = state.solver.BVV(self.gp_value, state.arch.bits)
31
32
33 class MipsElfFastResolver(IndirectJumpResolver):
34 """
35 Indirect Jump Resolver for MIPs
36 """
37 def __init__(self, project):
38 super().__init__(project, timeless=True)
39
40 def filter(self, cfg, addr, func_addr, block, jumpkind):
41 if not isinstance(self.project.arch, (archinfo.ArchMIPS32, archinfo.ArchMIPS64, )):
42 return False
43 return True
44
45 def resolve(self, cfg, addr, func_addr, block, jumpkind):
46 """
47 Wrapper for _resolve that slowly increments the max_depth used by Blade for finding sources
48 until we can resolve the addr or we reach the default max_depth
49
50 :param cfg: A CFG instance.
51 :param int addr: IRSB address.
52 :param int func_addr: The function address.
53 :param pyvex.IRSB block: The IRSB.
54 :param str jumpkind: The jumpkind.
55 :return: If it was resolved and targets alongside it
56 :rtype: tuple
57 """
58 for max_level in range(2, 4):
59 resolved, resolved_targets = self._resolve(cfg, addr, func_addr, block, jumpkind, max_level=max_level)
60 if resolved:
61 return resolved, resolved_targets
62 return False, []
63
64 def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level):
65 """
66 Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp.
67
68 :param cfg: A CFG instance.
69 :param int addr: IRSB address.
70 :param int func_addr: The function address.
71 :param pyvex.IRSB block: The IRSB.
72 :param str jumpkind: The jumpkind.
73 :param int max_level: maximum level for Blade to resolve when looking for sources
74 :return: If it was resolved and targets alongside it
75 :rtype: tuple
76 """
77
78 project = self.project
79
80 b = Blade(cfg.graph, addr, -1, cfg=cfg, project=project, ignore_sp=True, ignore_bp=True,
81 ignored_regs=('gp',), cross_insn_opt=False, stop_at_calls=True, max_level=max_level
82 )
83
84 sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]
85 if not sources:
86 return False, []
87
88 source = sources[0]
89 source_addr = source[0]
90 annotated_cfg = AnnotatedCFG(project, None, detect_loops=False)
91 annotated_cfg.from_digraph(b.slice)
92
93 state = project.factory.blank_state(addr=source_addr, mode="fastpath",
94 remove_options=options.refs,
95 # suppress unconstrained stack reads for `gp`
96 add_options={
97 options.SYMBOL_FILL_UNCONSTRAINED_REGISTERS,
98 options.SYMBOL_FILL_UNCONSTRAINED_MEMORY,
99 options.NO_CROSS_INSN_OPT,
100 },
101 )
102 state.regs._t9 = func_addr
103 func = cfg.kb.functions.function(addr=func_addr)
104
105 # see if gp is used on this slice at all
106 gp_used = self._is_gp_used_on_slice(project, b)
107
108 gp_value = None
109 if gp_used:
110 if 'gp' not in func.info:
111 # this might a special case: gp is only used once in this function, and it can be initialized right
112 # before its use site.
113 # however, it should have been determined in CFGFast
114 # cannot determine the value of gp. quit
115 pass
116 else:
117 gp_value = func.info['gp']
118
119 if gp_value is None:
120 l.warning('Failed to determine value of register gp for function %#x.', func.addr)
121 return False, []
122
123 # Special handling for cases where `gp` is stored on the stack
124 gp_offset = project.arch.registers['gp'][0]
125 self._set_gp_load_callback(state, b, project, gp_offset, gp_value)
126 state.regs._gp = gp_value
127
128 simgr = self.project.factory.simulation_manager(state)
129 simgr.use_technique(Slicecutor(annotated_cfg, force_sat=True))
130 simgr.run()
131
132 if simgr.cut:
133 # pick the successor that is cut right after executing `addr`
134 try:
135 target_state = next(iter(cut for cut in simgr.cut if cut.history.addr == addr))
136 except StopIteration:
137 l.info("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self))
138 return False, [ ]
139 target = target_state.addr
140
141 if self._is_target_valid(cfg, target) and target != func_addr:
142 l.debug("Indirect jump at %#x is resolved to target %#x.", addr, target)
143 return True, [ target ]
144
145 l.info("Indirect jump at %#x is resolved to target %#x, which seems to be invalid.", addr, target)
146 return False, [ ]
147
148 l.info("Indirect jump at %#x cannot be resolved by %s.", addr, repr(self))
149 return False, [ ]
150
151 @staticmethod
152 def _set_gp_load_callback(state, blade, project, gp_offset, gp_value):
153 tmps = {}
154 for block_addr_in_slice in set(slice_node[0] for slice_node in blade.slice.nodes()):
155 for stmt in project.factory.block(block_addr_in_slice, cross_insn_opt=False).vex.statements:
156 if isinstance(stmt, pyvex.IRStmt.WrTmp) and isinstance(stmt.data, pyvex.IRExpr.Load):
157 # Load from memory to a tmp - assuming it's loading from the stack
158 tmps[stmt.tmp] = 'stack'
159 elif isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == gp_offset:
160 if isinstance(stmt.data, pyvex.IRExpr.RdTmp):
161 tmp_offset = stmt.data.tmp # pylint:disable=cell-var-from-loop
162 if tmps.get(tmp_offset, None) == 'stack':
163 # found the load from stack
164 # we must make sure value of that temporary variable equals to the correct gp value
165 state.inspect.make_breakpoint('tmp_write', when=BP_BEFORE,
166 condition=lambda s, bbl_addr_=block_addr_in_slice,
167 tmp_offset_=tmp_offset:
168 s.scratch.bbl_addr == bbl_addr_ and s.inspect.tmp_write_num == tmp_offset_,
169 action=OverwriteTmpValueCallback(
170 gp_value).overwrite_tmp_value
171 )
172 break
173
174 @staticmethod
175 def _is_gp_used_on_slice(project, b: Blade) -> bool:
176 gp_offset = project.arch.registers['gp'][0]
177 blocks_on_slice: Dict[int, 'Block'] = { }
178 for block_addr, block_stmt_idx in b.slice.nodes():
179 if block_addr not in blocks_on_slice:
180 blocks_on_slice[block_addr] = project.factory.block(block_addr, cross_insn_opt=False)
181 block = blocks_on_slice[block_addr]
182 stmt = block.vex.statements[block_stmt_idx]
183 if isinstance(stmt, pyvex.IRStmt.WrTmp) \
184 and isinstance(stmt.data, pyvex.IRExpr.Get) \
185 and stmt.data.offset == gp_offset:
186 gp_used = True
187 break
188 else:
189 gp_used = False
190
191 return gp_used
192
[end of angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py b/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py
--- a/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py
+++ b/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py
@@ -9,7 +9,7 @@
from ....blade import Blade
from ....annocfg import AnnotatedCFG
from ....exploration_techniques import Slicecutor
-
+from ....utils.constants import DEFAULT_STATEMENT
from .resolver import IndirectJumpResolver
if TYPE_CHECKING:
@@ -61,7 +61,7 @@
return resolved, resolved_targets
return False, []
- def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level):
+ def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level): # pylint:disable=unused-argument
"""
Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp.
@@ -179,12 +179,17 @@
if block_addr not in blocks_on_slice:
blocks_on_slice[block_addr] = project.factory.block(block_addr, cross_insn_opt=False)
block = blocks_on_slice[block_addr]
- stmt = block.vex.statements[block_stmt_idx]
- if isinstance(stmt, pyvex.IRStmt.WrTmp) \
- and isinstance(stmt.data, pyvex.IRExpr.Get) \
- and stmt.data.offset == gp_offset:
- gp_used = True
- break
+ if block_stmt_idx == DEFAULT_STATEMENT:
+ if isinstance(block.vex.next, pyvex.IRExpr.Get) and block.vex.next.offset == gp_offset:
+ gp_used = True
+ break
+ else:
+ stmt = block.vex.statements[block_stmt_idx]
+ if isinstance(stmt, pyvex.IRStmt.WrTmp) \
+ and isinstance(stmt.data, pyvex.IRExpr.Get) \
+ and stmt.data.offset == gp_offset:
+ gp_used = True
+ break
else:
gp_used = False
| {"golden_diff": "diff --git a/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py b/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\n--- a/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\n+++ b/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\n@@ -9,7 +9,7 @@\n from ....blade import Blade\n from ....annocfg import AnnotatedCFG\n from ....exploration_techniques import Slicecutor\n-\n+from ....utils.constants import DEFAULT_STATEMENT\n from .resolver import IndirectJumpResolver\n \n if TYPE_CHECKING:\n@@ -61,7 +61,7 @@\n return resolved, resolved_targets\n return False, []\n \n- def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level):\n+ def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level): # pylint:disable=unused-argument\n \"\"\"\n Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp.\n \n@@ -179,12 +179,17 @@\n if block_addr not in blocks_on_slice:\n blocks_on_slice[block_addr] = project.factory.block(block_addr, cross_insn_opt=False)\n block = blocks_on_slice[block_addr]\n- stmt = block.vex.statements[block_stmt_idx]\n- if isinstance(stmt, pyvex.IRStmt.WrTmp) \\\n- and isinstance(stmt.data, pyvex.IRExpr.Get) \\\n- and stmt.data.offset == gp_offset:\n- gp_used = True\n- break\n+ if block_stmt_idx == DEFAULT_STATEMENT:\n+ if isinstance(block.vex.next, pyvex.IRExpr.Get) and block.vex.next.offset == gp_offset:\n+ gp_used = True\n+ break\n+ else:\n+ stmt = block.vex.statements[block_stmt_idx]\n+ if isinstance(stmt, pyvex.IRStmt.WrTmp) \\\n+ and isinstance(stmt.data, pyvex.IRExpr.Get) \\\n+ and stmt.data.offset == gp_offset:\n+ gp_used = True\n+ break\n else:\n gp_used = False\n", "issue": "An error occurred while analysis the CFG of the mips program.\nAn error occurs when I use angr to analyze the CFG of the mips program (net-cgi). \r\nThe error message is shown below.\r\n\r\n> File \"/home/user/PycharmProjects/pythonProject/venv/lib/python3.8/site-packages/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\", line 106, in _resolve\r\n gp_used = self._is_gp_used_on_slice(project, b)\r\n File \"/home/user/PycharmProjects/pythonProject/venv/lib/python3.8/site-packages/angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py\", line 183, in _is_gp_used_on_slice\r\n stmt = block.vex.statements[block_stmt_idx]\r\nIndexError: list index out of range\r\n\r\nThe code is shown below.\r\n```\r\nimport angr\r\nproj = angr.Project(net-cgi, auto_load_libs=False)\r\ncfg = proj.analyses.CFG()\r\n```\r\n\r\nThe net-cgi can be found in [net-cgi.zip](https://github.com/angr/angr/files/8160732/net-cgi.zip)\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Dict, TYPE_CHECKING\nimport logging\n\nimport pyvex\nimport archinfo\n\n\nfrom .... import options, BP_BEFORE\nfrom ....blade import Blade\nfrom ....annocfg import AnnotatedCFG\nfrom ....exploration_techniques import Slicecutor\n\nfrom .resolver import IndirectJumpResolver\n\nif TYPE_CHECKING:\n from angr.block import Block\n\n\nl = logging.getLogger(name=__name__)\n\n\nclass OverwriteTmpValueCallback:\n \"\"\"\n Overwrites temporary values during resolution\n \"\"\"\n def __init__(self, gp_value):\n self.gp_value = gp_value\n\n def overwrite_tmp_value(self, state):\n state.inspect.tmp_write_expr = state.solver.BVV(self.gp_value, state.arch.bits)\n\n\nclass MipsElfFastResolver(IndirectJumpResolver):\n \"\"\"\n Indirect Jump Resolver for MIPs\n \"\"\"\n def __init__(self, project):\n super().__init__(project, timeless=True)\n\n def filter(self, cfg, addr, func_addr, block, jumpkind):\n if not isinstance(self.project.arch, (archinfo.ArchMIPS32, archinfo.ArchMIPS64, )):\n return False\n return True\n\n def resolve(self, cfg, addr, func_addr, block, jumpkind):\n \"\"\"\n Wrapper for _resolve that slowly increments the max_depth used by Blade for finding sources\n until we can resolve the addr or we reach the default max_depth\n\n :param cfg: A CFG instance.\n :param int addr: IRSB address.\n :param int func_addr: The function address.\n :param pyvex.IRSB block: The IRSB.\n :param str jumpkind: The jumpkind.\n :return: If it was resolved and targets alongside it\n :rtype: tuple\n \"\"\"\n for max_level in range(2, 4):\n resolved, resolved_targets = self._resolve(cfg, addr, func_addr, block, jumpkind, max_level=max_level)\n if resolved:\n return resolved, resolved_targets\n return False, []\n\n def _resolve(self, cfg, addr, func_addr, block, jumpkind, max_level):\n \"\"\"\n Resolves the indirect jump in MIPS ELF binaries where all external function calls are indexed using gp.\n\n :param cfg: A CFG instance.\n :param int addr: IRSB address.\n :param int func_addr: The function address.\n :param pyvex.IRSB block: The IRSB.\n :param str jumpkind: The jumpkind.\n :param int max_level: maximum level for Blade to resolve when looking for sources\n :return: If it was resolved and targets alongside it\n :rtype: tuple\n \"\"\"\n\n project = self.project\n\n b = Blade(cfg.graph, addr, -1, cfg=cfg, project=project, ignore_sp=True, ignore_bp=True,\n ignored_regs=('gp',), cross_insn_opt=False, stop_at_calls=True, max_level=max_level\n )\n\n sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]\n if not sources:\n return False, []\n\n source = sources[0]\n source_addr = source[0]\n annotated_cfg = AnnotatedCFG(project, None, detect_loops=False)\n annotated_cfg.from_digraph(b.slice)\n\n state = project.factory.blank_state(addr=source_addr, mode=\"fastpath\",\n remove_options=options.refs,\n # suppress unconstrained stack reads for `gp`\n add_options={\n options.SYMBOL_FILL_UNCONSTRAINED_REGISTERS,\n options.SYMBOL_FILL_UNCONSTRAINED_MEMORY,\n options.NO_CROSS_INSN_OPT,\n },\n )\n state.regs._t9 = func_addr\n func = cfg.kb.functions.function(addr=func_addr)\n\n # see if gp is used on this slice at all\n gp_used = self._is_gp_used_on_slice(project, b)\n\n gp_value = None\n if gp_used:\n if 'gp' not in func.info:\n # this might a special case: gp is only used once in this function, and it can be initialized right\n # before its use site.\n # however, it should have been determined in CFGFast\n # cannot determine the value of gp. quit\n pass\n else:\n gp_value = func.info['gp']\n\n if gp_value is None:\n l.warning('Failed to determine value of register gp for function %#x.', func.addr)\n return False, []\n\n # Special handling for cases where `gp` is stored on the stack\n gp_offset = project.arch.registers['gp'][0]\n self._set_gp_load_callback(state, b, project, gp_offset, gp_value)\n state.regs._gp = gp_value\n\n simgr = self.project.factory.simulation_manager(state)\n simgr.use_technique(Slicecutor(annotated_cfg, force_sat=True))\n simgr.run()\n\n if simgr.cut:\n # pick the successor that is cut right after executing `addr`\n try:\n target_state = next(iter(cut for cut in simgr.cut if cut.history.addr == addr))\n except StopIteration:\n l.info(\"Indirect jump at %#x cannot be resolved by %s.\", addr, repr(self))\n return False, [ ]\n target = target_state.addr\n\n if self._is_target_valid(cfg, target) and target != func_addr:\n l.debug(\"Indirect jump at %#x is resolved to target %#x.\", addr, target)\n return True, [ target ]\n\n l.info(\"Indirect jump at %#x is resolved to target %#x, which seems to be invalid.\", addr, target)\n return False, [ ]\n\n l.info(\"Indirect jump at %#x cannot be resolved by %s.\", addr, repr(self))\n return False, [ ]\n\n @staticmethod\n def _set_gp_load_callback(state, blade, project, gp_offset, gp_value):\n tmps = {}\n for block_addr_in_slice in set(slice_node[0] for slice_node in blade.slice.nodes()):\n for stmt in project.factory.block(block_addr_in_slice, cross_insn_opt=False).vex.statements:\n if isinstance(stmt, pyvex.IRStmt.WrTmp) and isinstance(stmt.data, pyvex.IRExpr.Load):\n # Load from memory to a tmp - assuming it's loading from the stack\n tmps[stmt.tmp] = 'stack'\n elif isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == gp_offset:\n if isinstance(stmt.data, pyvex.IRExpr.RdTmp):\n tmp_offset = stmt.data.tmp # pylint:disable=cell-var-from-loop\n if tmps.get(tmp_offset, None) == 'stack':\n # found the load from stack\n # we must make sure value of that temporary variable equals to the correct gp value\n state.inspect.make_breakpoint('tmp_write', when=BP_BEFORE,\n condition=lambda s, bbl_addr_=block_addr_in_slice,\n tmp_offset_=tmp_offset:\n s.scratch.bbl_addr == bbl_addr_ and s.inspect.tmp_write_num == tmp_offset_,\n action=OverwriteTmpValueCallback(\n gp_value).overwrite_tmp_value\n )\n break\n\n @staticmethod\n def _is_gp_used_on_slice(project, b: Blade) -> bool:\n gp_offset = project.arch.registers['gp'][0]\n blocks_on_slice: Dict[int, 'Block'] = { }\n for block_addr, block_stmt_idx in b.slice.nodes():\n if block_addr not in blocks_on_slice:\n blocks_on_slice[block_addr] = project.factory.block(block_addr, cross_insn_opt=False)\n block = blocks_on_slice[block_addr]\n stmt = block.vex.statements[block_stmt_idx]\n if isinstance(stmt, pyvex.IRStmt.WrTmp) \\\n and isinstance(stmt.data, pyvex.IRExpr.Get) \\\n and stmt.data.offset == gp_offset:\n gp_used = True\n break\n else:\n gp_used = False\n\n return gp_used\n", "path": "angr/analyses/cfg/indirect_jump_resolvers/mips_elf_fast.py"}]} | 3,051 | 510 |
gh_patches_debug_40195 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1183 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
use strategy 0 for user-provided gradients in minuit
# Description
since we have exact gradient we can disable the checks minuit does
cc @alexander-held
</issue>
<code>
[start of src/pyhf/optimize/opt_minuit.py]
1 """Minuit Optimizer Class."""
2 from .. import default_backend, exceptions
3 from .mixins import OptimizerMixin
4 import scipy
5 import iminuit
6
7
8 class minuit_optimizer(OptimizerMixin):
9 """
10 Optimizer that uses iminuit.Minuit.migrad.
11 """
12
13 __slots__ = ['name', 'errordef', 'steps']
14
15 def __init__(self, *args, **kwargs):
16 """
17 Create MINUIT Optimizer.
18
19 .. note::
20
21 ``errordef`` should be 1.0 for a least-squares cost function and 0.5
22 for negative log-likelihood function. See page 37 of
23 http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes
24 called ``UP`` in the ``MINUIT`` docs.
25
26
27 Args:
28 errordef (:obj:`float`): See minuit docs. Default is 1.0.
29 steps (:obj:`int`): Number of steps for the bounds. Default is 1000.
30 """
31 self.name = 'minuit'
32 self.errordef = kwargs.pop('errordef', 1)
33 self.steps = kwargs.pop('steps', 1000)
34 super().__init__(*args, **kwargs)
35
36 def _get_minimizer(
37 self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False
38 ):
39
40 step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds]
41 fixed_vals = fixed_vals or []
42 # Minuit wants True/False for each parameter
43 fixed_bools = [False] * len(init_pars)
44 for index, val in fixed_vals:
45 fixed_bools[index] = True
46 init_pars[index] = val
47 step_sizes[index] = 0.0
48
49 # Minuit requires jac=callable
50 if do_grad:
51 wrapped_objective = lambda pars: objective_and_grad(pars)[0]
52 jac = lambda pars: objective_and_grad(pars)[1]
53 else:
54 wrapped_objective = objective_and_grad
55 jac = None
56
57 kwargs = dict(
58 fcn=wrapped_objective,
59 grad=jac,
60 start=init_pars,
61 error=step_sizes,
62 limit=init_bounds,
63 fix=fixed_bools,
64 print_level=self.verbose,
65 errordef=self.errordef,
66 )
67 return iminuit.Minuit.from_array_func(**kwargs)
68
69 def _minimize(
70 self,
71 minimizer,
72 func,
73 x0,
74 do_grad=False,
75 bounds=None,
76 fixed_vals=None,
77 return_uncertainties=False,
78 options={},
79 ):
80
81 """
82 Same signature as :func:`scipy.optimize.minimize`.
83
84 Note: an additional `minuit` is injected into the fitresult to get the
85 underlying minimizer.
86
87 Minimizer Options:
88 maxiter (:obj:`int`): maximum number of iterations. Default is 100000.
89 return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.
90
91 Returns:
92 fitresult (scipy.optimize.OptimizeResult): the fit result
93 """
94 maxiter = options.pop('maxiter', self.maxiter)
95 return_uncertainties = options.pop('return_uncertainties', False)
96 if options:
97 raise exceptions.Unsupported(
98 f"Unsupported options were passed in: {list(options.keys())}."
99 )
100
101 minimizer.migrad(ncall=maxiter)
102 # Following lines below come from:
103 # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125
104 message = "Optimization terminated successfully."
105 if not minimizer.valid:
106 message = "Optimization failed."
107 fmin = minimizer.fmin
108 if fmin.has_reached_call_limit:
109 message += " Call limit was reached."
110 if fmin.is_above_max_edm:
111 message += " Estimated distance to minimum too large."
112
113 n = len(x0)
114 hess_inv = default_backend.ones((n, n))
115 if minimizer.valid:
116 hess_inv = minimizer.np_covariance()
117
118 unc = None
119 if return_uncertainties:
120 unc = minimizer.np_errors()
121
122 return scipy.optimize.OptimizeResult(
123 x=minimizer.np_values(),
124 unc=unc,
125 success=minimizer.valid,
126 fun=minimizer.fval,
127 hess_inv=hess_inv,
128 message=message,
129 nfev=minimizer.ncalls,
130 njev=minimizer.ngrads,
131 minuit=minimizer,
132 )
133
[end of src/pyhf/optimize/opt_minuit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py
--- a/src/pyhf/optimize/opt_minuit.py
+++ b/src/pyhf/optimize/opt_minuit.py
@@ -10,7 +10,7 @@
Optimizer that uses iminuit.Minuit.migrad.
"""
- __slots__ = ['name', 'errordef', 'steps']
+ __slots__ = ['name', 'errordef', 'steps', 'strategy']
def __init__(self, *args, **kwargs):
"""
@@ -27,10 +27,12 @@
Args:
errordef (:obj:`float`): See minuit docs. Default is 1.0.
steps (:obj:`int`): Number of steps for the bounds. Default is 1000.
+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.
"""
self.name = 'minuit'
self.errordef = kwargs.pop('errordef', 1)
self.steps = kwargs.pop('steps', 1000)
+ self.strategy = kwargs.pop('strategy', None)
super().__init__(*args, **kwargs)
def _get_minimizer(
@@ -87,17 +89,24 @@
Minimizer Options:
maxiter (:obj:`int`): maximum number of iterations. Default is 100000.
return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.
+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`.
Returns:
fitresult (scipy.optimize.OptimizeResult): the fit result
"""
maxiter = options.pop('maxiter', self.maxiter)
return_uncertainties = options.pop('return_uncertainties', False)
+ # 0: Fast, user-provided gradient
+ # 1: Default, no user-provided gradient
+ strategy = options.pop(
+ 'strategy', self.strategy if self.strategy else not do_grad
+ )
if options:
raise exceptions.Unsupported(
f"Unsupported options were passed in: {list(options.keys())}."
)
+ minimizer.strategy = strategy
minimizer.migrad(ncall=maxiter)
# Following lines below come from:
# https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125
@@ -113,6 +122,8 @@
n = len(x0)
hess_inv = default_backend.ones((n, n))
if minimizer.valid:
+ # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.
+ minimizer.hesse()
hess_inv = minimizer.np_covariance()
unc = None
| {"golden_diff": "diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py\n--- a/src/pyhf/optimize/opt_minuit.py\n+++ b/src/pyhf/optimize/opt_minuit.py\n@@ -10,7 +10,7 @@\n Optimizer that uses iminuit.Minuit.migrad.\n \"\"\"\n \n- __slots__ = ['name', 'errordef', 'steps']\n+ __slots__ = ['name', 'errordef', 'steps', 'strategy']\n \n def __init__(self, *args, **kwargs):\n \"\"\"\n@@ -27,10 +27,12 @@\n Args:\n errordef (:obj:`float`): See minuit docs. Default is 1.0.\n steps (:obj:`int`): Number of steps for the bounds. Default is 1000.\n+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n+ self.strategy = kwargs.pop('strategy', None)\n super().__init__(*args, **kwargs)\n \n def _get_minimizer(\n@@ -87,17 +89,24 @@\n Minimizer Options:\n maxiter (:obj:`int`): maximum number of iterations. Default is 100000.\n return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.\n+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`.\n \n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n return_uncertainties = options.pop('return_uncertainties', False)\n+ # 0: Fast, user-provided gradient\n+ # 1: Default, no user-provided gradient\n+ strategy = options.pop(\n+ 'strategy', self.strategy if self.strategy else not do_grad\n+ )\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options.keys())}.\"\n )\n \n+ minimizer.strategy = strategy\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125\n@@ -113,6 +122,8 @@\n n = len(x0)\n hess_inv = default_backend.ones((n, n))\n if minimizer.valid:\n+ # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.\n+ minimizer.hesse()\n hess_inv = minimizer.np_covariance()\n \n unc = None\n", "issue": "use strategy 0 for user-provided gradients in minuit\n# Description\r\n\r\nsince we have exact gradient we can disable the checks minuit does\r\n\r\ncc @alexander-held \n", "before_files": [{"content": "\"\"\"Minuit Optimizer Class.\"\"\"\nfrom .. import default_backend, exceptions\nfrom .mixins import OptimizerMixin\nimport scipy\nimport iminuit\n\n\nclass minuit_optimizer(OptimizerMixin):\n \"\"\"\n Optimizer that uses iminuit.Minuit.migrad.\n \"\"\"\n\n __slots__ = ['name', 'errordef', 'steps']\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create MINUIT Optimizer.\n\n .. note::\n\n ``errordef`` should be 1.0 for a least-squares cost function and 0.5\n for negative log-likelihood function. See page 37 of\n http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes\n called ``UP`` in the ``MINUIT`` docs.\n\n\n Args:\n errordef (:obj:`float`): See minuit docs. Default is 1.0.\n steps (:obj:`int`): Number of steps for the bounds. Default is 1000.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n super().__init__(*args, **kwargs)\n\n def _get_minimizer(\n self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False\n ):\n\n step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds]\n fixed_vals = fixed_vals or []\n # Minuit wants True/False for each parameter\n fixed_bools = [False] * len(init_pars)\n for index, val in fixed_vals:\n fixed_bools[index] = True\n init_pars[index] = val\n step_sizes[index] = 0.0\n\n # Minuit requires jac=callable\n if do_grad:\n wrapped_objective = lambda pars: objective_and_grad(pars)[0]\n jac = lambda pars: objective_and_grad(pars)[1]\n else:\n wrapped_objective = objective_and_grad\n jac = None\n\n kwargs = dict(\n fcn=wrapped_objective,\n grad=jac,\n start=init_pars,\n error=step_sizes,\n limit=init_bounds,\n fix=fixed_bools,\n print_level=self.verbose,\n errordef=self.errordef,\n )\n return iminuit.Minuit.from_array_func(**kwargs)\n\n def _minimize(\n self,\n minimizer,\n func,\n x0,\n do_grad=False,\n bounds=None,\n fixed_vals=None,\n return_uncertainties=False,\n options={},\n ):\n\n \"\"\"\n Same signature as :func:`scipy.optimize.minimize`.\n\n Note: an additional `minuit` is injected into the fitresult to get the\n underlying minimizer.\n\n Minimizer Options:\n maxiter (:obj:`int`): maximum number of iterations. Default is 100000.\n return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.\n\n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n return_uncertainties = options.pop('return_uncertainties', False)\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options.keys())}.\"\n )\n\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125\n message = \"Optimization terminated successfully.\"\n if not minimizer.valid:\n message = \"Optimization failed.\"\n fmin = minimizer.fmin\n if fmin.has_reached_call_limit:\n message += \" Call limit was reached.\"\n if fmin.is_above_max_edm:\n message += \" Estimated distance to minimum too large.\"\n\n n = len(x0)\n hess_inv = default_backend.ones((n, n))\n if minimizer.valid:\n hess_inv = minimizer.np_covariance()\n\n unc = None\n if return_uncertainties:\n unc = minimizer.np_errors()\n\n return scipy.optimize.OptimizeResult(\n x=minimizer.np_values(),\n unc=unc,\n success=minimizer.valid,\n fun=minimizer.fval,\n hess_inv=hess_inv,\n message=message,\n nfev=minimizer.ncalls,\n njev=minimizer.ngrads,\n minuit=minimizer,\n )\n", "path": "src/pyhf/optimize/opt_minuit.py"}]} | 1,923 | 706 |
gh_patches_debug_26193 | rasdani/github-patches | git_diff | python-discord__site-1165 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support rescheduling of offensive messages
When a message trips the filter on the bot it is removed after a period of time. During this period it is a record in the database.
When this deletion date is reached the bot will attempt to remove the message from Discord and remove the record from the offensive message table. We currently handle for the message being not found (deleted) but if another error occurs resulting in the message not being deleted we still continue to delete the database record, causing the message to be left around.
We should allow the bot to perform a PATCH request to the deleted message endpoint to update the delete time and reschedule if something has failed (for example, a hiccup on Discord's end).
However, we must also bear in mind that permanent rescheduling could potentially leave lingering records in our database if a case is discovered where a message which cannot be deleted is rescheduled repetitively.
</issue>
<code>
[start of pydis_site/apps/api/viewsets/bot/offensive_message.py]
1 from rest_framework.mixins import (
2 CreateModelMixin,
3 DestroyModelMixin,
4 ListModelMixin
5 )
6 from rest_framework.viewsets import GenericViewSet
7
8 from pydis_site.apps.api.models.bot.offensive_message import OffensiveMessage
9 from pydis_site.apps.api.serializers import OffensiveMessageSerializer
10
11
12 class OffensiveMessageViewSet(
13 CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet
14 ):
15 """
16 View providing CRUD access to offensive messages.
17
18 ## Routes
19 ### GET /bot/offensive-messages
20 Returns all offensive messages in the database.
21
22 #### Response format
23 >>> [
24 ... {
25 ... 'id': '631953598091100200',
26 ... 'channel_id': '291284109232308226',
27 ... 'delete_date': '2019-11-01T21:51:15.545000Z'
28 ... },
29 ... ...
30 ... ]
31
32 #### Status codes
33 - 200: returned on success
34
35 ### POST /bot/offensive-messages
36 Create a new offensive message object.
37
38 #### Request body
39 >>> {
40 ... 'id': int,
41 ... 'channel_id': int,
42 ... 'delete_date': datetime.datetime # ISO-8601-formatted date
43 ... }
44
45 #### Status codes
46 - 201: returned on success
47 - 400: if the body format is invalid
48
49 ### DELETE /bot/offensive-messages/<id:int>
50 Delete the offensive message object with the given `id`.
51
52 #### Status codes
53 - 204: returned on success
54 - 404: if a offensive message object with the given `id` does not exist
55
56 ## Authentication
57 Requires an API token.
58 """
59
60 serializer_class = OffensiveMessageSerializer
61 queryset = OffensiveMessage.objects.all()
62
[end of pydis_site/apps/api/viewsets/bot/offensive_message.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pydis_site/apps/api/viewsets/bot/offensive_message.py b/pydis_site/apps/api/viewsets/bot/offensive_message.py
--- a/pydis_site/apps/api/viewsets/bot/offensive_message.py
+++ b/pydis_site/apps/api/viewsets/bot/offensive_message.py
@@ -1,6 +1,7 @@
from rest_framework.mixins import (
CreateModelMixin,
DestroyModelMixin,
+ UpdateModelMixin,
ListModelMixin
)
from rest_framework.viewsets import GenericViewSet
@@ -10,7 +11,7 @@
class OffensiveMessageViewSet(
- CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet
+ CreateModelMixin, ListModelMixin, UpdateModelMixin, DestroyModelMixin, GenericViewSet
):
"""
View providing CRUD access to offensive messages.
@@ -46,6 +47,16 @@
- 201: returned on success
- 400: if the body format is invalid
+ ### PATCH /bot/offensive-messages/<id:int>
+ Perform a partial update of the offensive message with the given `id`.
+ Intended to allow rescheduling the deletion date in case the bot's attempt
+ to delete the message failed due to another error than the message already
+ being deleted.
+
+ #### Status codes
+ - 200: returned on success
+ - 404: if a offensive message object with the given `id` does not exist
+
### DELETE /bot/offensive-messages/<id:int>
Delete the offensive message object with the given `id`.
| {"golden_diff": "diff --git a/pydis_site/apps/api/viewsets/bot/offensive_message.py b/pydis_site/apps/api/viewsets/bot/offensive_message.py\n--- a/pydis_site/apps/api/viewsets/bot/offensive_message.py\n+++ b/pydis_site/apps/api/viewsets/bot/offensive_message.py\n@@ -1,6 +1,7 @@\n from rest_framework.mixins import (\n CreateModelMixin,\n DestroyModelMixin,\n+ UpdateModelMixin,\n ListModelMixin\n )\n from rest_framework.viewsets import GenericViewSet\n@@ -10,7 +11,7 @@\n \n \n class OffensiveMessageViewSet(\n- CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet\n+ CreateModelMixin, ListModelMixin, UpdateModelMixin, DestroyModelMixin, GenericViewSet\n ):\n \"\"\"\n View providing CRUD access to offensive messages.\n@@ -46,6 +47,16 @@\n - 201: returned on success\n - 400: if the body format is invalid\n \n+ ### PATCH /bot/offensive-messages/<id:int>\n+ Perform a partial update of the offensive message with the given `id`.\n+ Intended to allow rescheduling the deletion date in case the bot's attempt\n+ to delete the message failed due to another error than the message already\n+ being deleted.\n+\n+ #### Status codes\n+ - 200: returned on success\n+ - 404: if a offensive message object with the given `id` does not exist\n+\n ### DELETE /bot/offensive-messages/<id:int>\n Delete the offensive message object with the given `id`.\n", "issue": "Support rescheduling of offensive messages\nWhen a message trips the filter on the bot it is removed after a period of time. During this period it is a record in the database.\r\n\r\nWhen this deletion date is reached the bot will attempt to remove the message from Discord and remove the record from the offensive message table. We currently handle for the message being not found (deleted) but if another error occurs resulting in the message not being deleted we still continue to delete the database record, causing the message to be left around.\r\n\r\nWe should allow the bot to perform a PATCH request to the deleted message endpoint to update the delete time and reschedule if something has failed (for example, a hiccup on Discord's end).\r\n\r\nHowever, we must also bear in mind that permanent rescheduling could potentially leave lingering records in our database if a case is discovered where a message which cannot be deleted is rescheduled repetitively.\n", "before_files": [{"content": "from rest_framework.mixins import (\n CreateModelMixin,\n DestroyModelMixin,\n ListModelMixin\n)\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom pydis_site.apps.api.models.bot.offensive_message import OffensiveMessage\nfrom pydis_site.apps.api.serializers import OffensiveMessageSerializer\n\n\nclass OffensiveMessageViewSet(\n CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet\n):\n \"\"\"\n View providing CRUD access to offensive messages.\n\n ## Routes\n ### GET /bot/offensive-messages\n Returns all offensive messages in the database.\n\n #### Response format\n >>> [\n ... {\n ... 'id': '631953598091100200',\n ... 'channel_id': '291284109232308226',\n ... 'delete_date': '2019-11-01T21:51:15.545000Z'\n ... },\n ... ...\n ... ]\n\n #### Status codes\n - 200: returned on success\n\n ### POST /bot/offensive-messages\n Create a new offensive message object.\n\n #### Request body\n >>> {\n ... 'id': int,\n ... 'channel_id': int,\n ... 'delete_date': datetime.datetime # ISO-8601-formatted date\n ... }\n\n #### Status codes\n - 201: returned on success\n - 400: if the body format is invalid\n\n ### DELETE /bot/offensive-messages/<id:int>\n Delete the offensive message object with the given `id`.\n\n #### Status codes\n - 204: returned on success\n - 404: if a offensive message object with the given `id` does not exist\n\n ## Authentication\n Requires an API token.\n \"\"\"\n\n serializer_class = OffensiveMessageSerializer\n queryset = OffensiveMessage.objects.all()\n", "path": "pydis_site/apps/api/viewsets/bot/offensive_message.py"}]} | 1,290 | 353 |
gh_patches_debug_40983 | rasdani/github-patches | git_diff | microsoft__torchgeo-644 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: missing "not" operator to set ignore_zeros in segmentation trainer
I believe there's a bug in the translation from ignore_zeros to ignore_index in the [\_\_init__ of SemanticSegmentationTask](https://github.com/microsoft/torchgeo/blob/c933694ddc9a121af9415389949aefcc84760472/torchgeo/trainers/segmentation.py#L89).
`self.ignore_zeros = None if kwargs["ignore_zeros"] else 0` should be:
`self.ignore_zeros = None if kwargs["ignore_zeros"] is None else 0` as is done [when define loss function](https://github.com/microsoft/torchgeo/blob/c933694ddc9a121af9415389949aefcc84760472/torchgeo/trainers/segmentation.py#L57)
`self.ignore_zeros` is later used to define the "self.ignore_index" of the self.train_metrics. For sake of clarity, I'd suggest renaming it to `self.ignore_index` directly in the \_\_init__.
Therefore, this
```
...
self.ignore_zeros = None if kwargs["ignore_zeros"] else 0
self.config_task()
self.train_metrics = MetricCollection(
[
Accuracy(
num_classes=self.hparams["num_classes"],
ignore_index=self.ignore_zeros,
...
```
would become
```
...
self.ignore_index = None if kwargs["ignore_zeros"] is None else 0
self.config_task()
self.train_metrics = MetricCollection(
[
Accuracy(
num_classes=self.hparams["num_classes"],
ignore_index=self.ignore_index,
...
```
If you'd like, I can definitely push a PR for to fix this bug if the solution seems ok to all. Let me know.
</issue>
<code>
[start of torchgeo/trainers/segmentation.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """Segmentation tasks."""
5
6 from typing import Any, Dict, cast
7
8 import segmentation_models_pytorch as smp
9 import torch
10 import torch.nn as nn
11 from pytorch_lightning.core.lightning import LightningModule
12 from torch import Tensor
13 from torch.optim.lr_scheduler import ReduceLROnPlateau
14 from torch.utils.data import DataLoader
15 from torchmetrics import Accuracy, JaccardIndex, MetricCollection
16
17 from ..datasets.utils import unbind_samples
18 from ..models import FCN
19
20 # https://github.com/pytorch/pytorch/issues/60979
21 # https://github.com/pytorch/pytorch/pull/61045
22 DataLoader.__module__ = "torch.utils.data"
23
24
25 class SemanticSegmentationTask(LightningModule):
26 """LightningModule for semantic segmentation of images."""
27
28 def config_task(self) -> None:
29 """Configures the task based on kwargs parameters passed to the constructor."""
30 if self.hyperparams["segmentation_model"] == "unet":
31 self.model = smp.Unet(
32 encoder_name=self.hyperparams["encoder_name"],
33 encoder_weights=self.hyperparams["encoder_weights"],
34 in_channels=self.hyperparams["in_channels"],
35 classes=self.hyperparams["num_classes"],
36 )
37 elif self.hyperparams["segmentation_model"] == "deeplabv3+":
38 self.model = smp.DeepLabV3Plus(
39 encoder_name=self.hyperparams["encoder_name"],
40 encoder_weights=self.hyperparams["encoder_weights"],
41 in_channels=self.hyperparams["in_channels"],
42 classes=self.hyperparams["num_classes"],
43 )
44 elif self.hyperparams["segmentation_model"] == "fcn":
45 self.model = FCN(
46 in_channels=self.hyperparams["in_channels"],
47 classes=self.hyperparams["num_classes"],
48 num_filters=self.hyperparams["num_filters"],
49 )
50 else:
51 raise ValueError(
52 f"Model type '{self.hyperparams['segmentation_model']}' is not valid."
53 )
54
55 if self.hyperparams["loss"] == "ce":
56 self.loss = nn.CrossEntropyLoss(
57 ignore_index=-1000 if self.ignore_zeros is None else 0
58 )
59 elif self.hyperparams["loss"] == "jaccard":
60 self.loss = smp.losses.JaccardLoss(
61 mode="multiclass", classes=self.hyperparams["num_classes"]
62 )
63 elif self.hyperparams["loss"] == "focal":
64 self.loss = smp.losses.FocalLoss(
65 "multiclass", ignore_index=self.ignore_zeros, normalized=True
66 )
67 else:
68 raise ValueError(f"Loss type '{self.hyperparams['loss']}' is not valid.")
69
70 def __init__(self, **kwargs: Any) -> None:
71 """Initialize the LightningModule with a model and loss function.
72
73 Keyword Args:
74 segmentation_model: Name of the segmentation model type to use
75 encoder_name: Name of the encoder model backbone to use
76 encoder_weights: None or "imagenet" to use imagenet pretrained weights in
77 the encoder model
78 in_channels: Number of channels in input image
79 num_classes: Number of semantic classes to predict
80 loss: Name of the loss function
81 ignore_zeros: Whether to ignore the "0" class value in the loss and metrics
82
83 Raises:
84 ValueError: if kwargs arguments are invalid
85 """
86 super().__init__()
87
88 # Creates `self.hparams` from kwargs
89 self.save_hyperparameters() # type: ignore[operator]
90 self.hyperparams = cast(Dict[str, Any], self.hparams)
91
92 self.ignore_zeros = None if kwargs["ignore_zeros"] else 0
93
94 self.config_task()
95
96 self.train_metrics = MetricCollection(
97 [
98 Accuracy(
99 num_classes=self.hyperparams["num_classes"],
100 ignore_index=self.ignore_zeros,
101 mdmc_average="global",
102 ),
103 JaccardIndex(
104 num_classes=self.hyperparams["num_classes"],
105 ignore_index=self.ignore_zeros,
106 ),
107 ],
108 prefix="train_",
109 )
110 self.val_metrics = self.train_metrics.clone(prefix="val_")
111 self.test_metrics = self.train_metrics.clone(prefix="test_")
112
113 def forward(self, *args: Any, **kwargs: Any) -> Any:
114 """Forward pass of the model.
115
116 Args:
117 x: tensor of data to run through the model
118
119 Returns:
120 output from the model
121 """
122 return self.model(*args, **kwargs)
123
124 def training_step(self, *args: Any, **kwargs: Any) -> Tensor:
125 """Compute and return the training loss.
126
127 Args:
128 batch: the output of your DataLoader
129
130 Returns:
131 training loss
132 """
133 batch = args[0]
134 x = batch["image"]
135 y = batch["mask"]
136 y_hat = self.forward(x)
137 y_hat_hard = y_hat.argmax(dim=1)
138
139 loss = self.loss(y_hat, y)
140
141 # by default, the train step logs every `log_every_n_steps` steps where
142 # `log_every_n_steps` is a parameter to the `Trainer` object
143 self.log("train_loss", loss, on_step=True, on_epoch=False)
144 self.train_metrics(y_hat_hard, y)
145
146 return cast(Tensor, loss)
147
148 def training_epoch_end(self, outputs: Any) -> None:
149 """Logs epoch level training metrics.
150
151 Args:
152 outputs: list of items returned by training_step
153 """
154 self.log_dict(self.train_metrics.compute())
155 self.train_metrics.reset()
156
157 def validation_step(self, *args: Any, **kwargs: Any) -> None:
158 """Compute validation loss and log example predictions.
159
160 Args:
161 batch: the output of your DataLoader
162 batch_idx: the index of this batch
163 """
164 batch = args[0]
165 batch_idx = args[1]
166 x = batch["image"]
167 y = batch["mask"]
168 y_hat = self.forward(x)
169 y_hat_hard = y_hat.argmax(dim=1)
170
171 loss = self.loss(y_hat, y)
172
173 self.log("val_loss", loss, on_step=False, on_epoch=True)
174 self.val_metrics(y_hat_hard, y)
175
176 if batch_idx < 10:
177 try:
178 datamodule = self.trainer.datamodule # type: ignore[union-attr]
179 batch["prediction"] = y_hat_hard
180 for key in ["image", "mask", "prediction"]:
181 batch[key] = batch[key].cpu()
182 sample = unbind_samples(batch)[0]
183 fig = datamodule.plot(sample)
184 summary_writer = self.logger.experiment # type: ignore[union-attr]
185 summary_writer.add_figure(
186 f"image/{batch_idx}", fig, global_step=self.global_step
187 )
188 except AttributeError:
189 pass
190
191 def validation_epoch_end(self, outputs: Any) -> None:
192 """Logs epoch level validation metrics.
193
194 Args:
195 outputs: list of items returned by validation_step
196 """
197 self.log_dict(self.val_metrics.compute())
198 self.val_metrics.reset()
199
200 def test_step(self, *args: Any, **kwargs: Any) -> None:
201 """Compute test loss.
202
203 Args:
204 batch: the output of your DataLoader
205 """
206 batch = args[0]
207 x = batch["image"]
208 y = batch["mask"]
209 y_hat = self.forward(x)
210 y_hat_hard = y_hat.argmax(dim=1)
211
212 loss = self.loss(y_hat, y)
213
214 # by default, the test and validation steps only log per *epoch*
215 self.log("test_loss", loss, on_step=False, on_epoch=True)
216 self.test_metrics(y_hat_hard, y)
217
218 def test_epoch_end(self, outputs: Any) -> None:
219 """Logs epoch level test metrics.
220
221 Args:
222 outputs: list of items returned by test_step
223 """
224 self.log_dict(self.test_metrics.compute())
225 self.test_metrics.reset()
226
227 def configure_optimizers(self) -> Dict[str, Any]:
228 """Initialize the optimizer and learning rate scheduler.
229
230 Returns:
231 a "lr dict" according to the pytorch lightning documentation --
232 https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers
233 """
234 optimizer = torch.optim.Adam(
235 self.model.parameters(), lr=self.hyperparams["learning_rate"]
236 )
237 return {
238 "optimizer": optimizer,
239 "lr_scheduler": {
240 "scheduler": ReduceLROnPlateau(
241 optimizer,
242 patience=self.hyperparams["learning_rate_schedule_patience"],
243 ),
244 "monitor": "val_loss",
245 },
246 }
247
[end of torchgeo/trainers/segmentation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchgeo/trainers/segmentation.py b/torchgeo/trainers/segmentation.py
--- a/torchgeo/trainers/segmentation.py
+++ b/torchgeo/trainers/segmentation.py
@@ -3,6 +3,7 @@
"""Segmentation tasks."""
+import warnings
from typing import Any, Dict, cast
import segmentation_models_pytorch as smp
@@ -53,16 +54,15 @@
)
if self.hyperparams["loss"] == "ce":
- self.loss = nn.CrossEntropyLoss(
- ignore_index=-1000 if self.ignore_zeros is None else 0
- )
+ ignore_value = -1000 if self.ignore_index is None else self.ignore_index
+ self.loss = nn.CrossEntropyLoss(ignore_index=ignore_value)
elif self.hyperparams["loss"] == "jaccard":
self.loss = smp.losses.JaccardLoss(
mode="multiclass", classes=self.hyperparams["num_classes"]
)
elif self.hyperparams["loss"] == "focal":
self.loss = smp.losses.FocalLoss(
- "multiclass", ignore_index=self.ignore_zeros, normalized=True
+ "multiclass", ignore_index=self.ignore_index, normalized=True
)
else:
raise ValueError(f"Loss type '{self.hyperparams['loss']}' is not valid.")
@@ -78,7 +78,7 @@
in_channels: Number of channels in input image
num_classes: Number of semantic classes to predict
loss: Name of the loss function
- ignore_zeros: Whether to ignore the "0" class value in the loss and metrics
+ ignore_index: Optional integer class index to ignore in the loss and metrics
Raises:
ValueError: if kwargs arguments are invalid
@@ -89,20 +89,26 @@
self.save_hyperparameters() # type: ignore[operator]
self.hyperparams = cast(Dict[str, Any], self.hparams)
- self.ignore_zeros = None if kwargs["ignore_zeros"] else 0
-
+ if not isinstance(kwargs["ignore_index"], (int, type(None))):
+ raise ValueError("ignore_index must be an int or None")
+ if (kwargs["ignore_index"] is not None) and (kwargs["loss"] == "jaccard"):
+ warnings.warn(
+ "ignore_index has no effect on training when loss='jaccard'",
+ UserWarning,
+ )
+ self.ignore_index = kwargs["ignore_index"]
self.config_task()
self.train_metrics = MetricCollection(
[
Accuracy(
num_classes=self.hyperparams["num_classes"],
- ignore_index=self.ignore_zeros,
+ ignore_index=self.ignore_index,
mdmc_average="global",
),
JaccardIndex(
num_classes=self.hyperparams["num_classes"],
- ignore_index=self.ignore_zeros,
+ ignore_index=self.ignore_index,
),
],
prefix="train_",
| {"golden_diff": "diff --git a/torchgeo/trainers/segmentation.py b/torchgeo/trainers/segmentation.py\n--- a/torchgeo/trainers/segmentation.py\n+++ b/torchgeo/trainers/segmentation.py\n@@ -3,6 +3,7 @@\n \n \"\"\"Segmentation tasks.\"\"\"\n \n+import warnings\n from typing import Any, Dict, cast\n \n import segmentation_models_pytorch as smp\n@@ -53,16 +54,15 @@\n )\n \n if self.hyperparams[\"loss\"] == \"ce\":\n- self.loss = nn.CrossEntropyLoss(\n- ignore_index=-1000 if self.ignore_zeros is None else 0\n- )\n+ ignore_value = -1000 if self.ignore_index is None else self.ignore_index\n+ self.loss = nn.CrossEntropyLoss(ignore_index=ignore_value)\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n- \"multiclass\", ignore_index=self.ignore_zeros, normalized=True\n+ \"multiclass\", ignore_index=self.ignore_index, normalized=True\n )\n else:\n raise ValueError(f\"Loss type '{self.hyperparams['loss']}' is not valid.\")\n@@ -78,7 +78,7 @@\n in_channels: Number of channels in input image\n num_classes: Number of semantic classes to predict\n loss: Name of the loss function\n- ignore_zeros: Whether to ignore the \"0\" class value in the loss and metrics\n+ ignore_index: Optional integer class index to ignore in the loss and metrics\n \n Raises:\n ValueError: if kwargs arguments are invalid\n@@ -89,20 +89,26 @@\n self.save_hyperparameters() # type: ignore[operator]\n self.hyperparams = cast(Dict[str, Any], self.hparams)\n \n- self.ignore_zeros = None if kwargs[\"ignore_zeros\"] else 0\n-\n+ if not isinstance(kwargs[\"ignore_index\"], (int, type(None))):\n+ raise ValueError(\"ignore_index must be an int or None\")\n+ if (kwargs[\"ignore_index\"] is not None) and (kwargs[\"loss\"] == \"jaccard\"):\n+ warnings.warn(\n+ \"ignore_index has no effect on training when loss='jaccard'\",\n+ UserWarning,\n+ )\n+ self.ignore_index = kwargs[\"ignore_index\"]\n self.config_task()\n \n self.train_metrics = MetricCollection(\n [\n Accuracy(\n num_classes=self.hyperparams[\"num_classes\"],\n- ignore_index=self.ignore_zeros,\n+ ignore_index=self.ignore_index,\n mdmc_average=\"global\",\n ),\n JaccardIndex(\n num_classes=self.hyperparams[\"num_classes\"],\n- ignore_index=self.ignore_zeros,\n+ ignore_index=self.ignore_index,\n ),\n ],\n prefix=\"train_\",\n", "issue": "Bug: missing \"not\" operator to set ignore_zeros in segmentation trainer\nI believe there's a bug in the translation from ignore_zeros to ignore_index in the [\\_\\_init__ of SemanticSegmentationTask](https://github.com/microsoft/torchgeo/blob/c933694ddc9a121af9415389949aefcc84760472/torchgeo/trainers/segmentation.py#L89).\r\n`self.ignore_zeros = None if kwargs[\"ignore_zeros\"] else 0` should be:\r\n`self.ignore_zeros = None if kwargs[\"ignore_zeros\"] is None else 0` as is done [when define loss function](https://github.com/microsoft/torchgeo/blob/c933694ddc9a121af9415389949aefcc84760472/torchgeo/trainers/segmentation.py#L57)\r\n\r\n`self.ignore_zeros` is later used to define the \"self.ignore_index\" of the self.train_metrics. For sake of clarity, I'd suggest renaming it to `self.ignore_index` directly in the \\_\\_init__. \r\n\r\nTherefore, this\r\n```\r\n...\r\n self.ignore_zeros = None if kwargs[\"ignore_zeros\"] else 0\r\n\r\n self.config_task()\r\n\r\n self.train_metrics = MetricCollection(\r\n [\r\n Accuracy(\r\n num_classes=self.hparams[\"num_classes\"],\r\n ignore_index=self.ignore_zeros,\r\n...\r\n```\r\nwould become\r\n```\r\n...\r\n self.ignore_index = None if kwargs[\"ignore_zeros\"] is None else 0\r\n\r\n self.config_task()\r\n\r\n self.train_metrics = MetricCollection(\r\n [\r\n Accuracy(\r\n num_classes=self.hparams[\"num_classes\"],\r\n ignore_index=self.ignore_index,\r\n...\r\n```\r\nIf you'd like, I can definitely push a PR for to fix this bug if the solution seems ok to all. Let me know.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Segmentation tasks.\"\"\"\n\nfrom typing import Any, Dict, cast\n\nimport segmentation_models_pytorch as smp\nimport torch\nimport torch.nn as nn\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom torch import Tensor\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nfrom torchmetrics import Accuracy, JaccardIndex, MetricCollection\n\nfrom ..datasets.utils import unbind_samples\nfrom ..models import FCN\n\n# https://github.com/pytorch/pytorch/issues/60979\n# https://github.com/pytorch/pytorch/pull/61045\nDataLoader.__module__ = \"torch.utils.data\"\n\n\nclass SemanticSegmentationTask(LightningModule):\n \"\"\"LightningModule for semantic segmentation of images.\"\"\"\n\n def config_task(self) -> None:\n \"\"\"Configures the task based on kwargs parameters passed to the constructor.\"\"\"\n if self.hyperparams[\"segmentation_model\"] == \"unet\":\n self.model = smp.Unet(\n encoder_name=self.hyperparams[\"encoder_name\"],\n encoder_weights=self.hyperparams[\"encoder_weights\"],\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"segmentation_model\"] == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=self.hyperparams[\"encoder_name\"],\n encoder_weights=self.hyperparams[\"encoder_weights\"],\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"segmentation_model\"] == \"fcn\":\n self.model = FCN(\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n num_filters=self.hyperparams[\"num_filters\"],\n )\n else:\n raise ValueError(\n f\"Model type '{self.hyperparams['segmentation_model']}' is not valid.\"\n )\n\n if self.hyperparams[\"loss\"] == \"ce\":\n self.loss = nn.CrossEntropyLoss(\n ignore_index=-1000 if self.ignore_zeros is None else 0\n )\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=self.ignore_zeros, normalized=True\n )\n else:\n raise ValueError(f\"Loss type '{self.hyperparams['loss']}' is not valid.\")\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\"Initialize the LightningModule with a model and loss function.\n\n Keyword Args:\n segmentation_model: Name of the segmentation model type to use\n encoder_name: Name of the encoder model backbone to use\n encoder_weights: None or \"imagenet\" to use imagenet pretrained weights in\n the encoder model\n in_channels: Number of channels in input image\n num_classes: Number of semantic classes to predict\n loss: Name of the loss function\n ignore_zeros: Whether to ignore the \"0\" class value in the loss and metrics\n\n Raises:\n ValueError: if kwargs arguments are invalid\n \"\"\"\n super().__init__()\n\n # Creates `self.hparams` from kwargs\n self.save_hyperparameters() # type: ignore[operator]\n self.hyperparams = cast(Dict[str, Any], self.hparams)\n\n self.ignore_zeros = None if kwargs[\"ignore_zeros\"] else 0\n\n self.config_task()\n\n self.train_metrics = MetricCollection(\n [\n Accuracy(\n num_classes=self.hyperparams[\"num_classes\"],\n ignore_index=self.ignore_zeros,\n mdmc_average=\"global\",\n ),\n JaccardIndex(\n num_classes=self.hyperparams[\"num_classes\"],\n ignore_index=self.ignore_zeros,\n ),\n ],\n prefix=\"train_\",\n )\n self.val_metrics = self.train_metrics.clone(prefix=\"val_\")\n self.test_metrics = self.train_metrics.clone(prefix=\"test_\")\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward pass of the model.\n\n Args:\n x: tensor of data to run through the model\n\n Returns:\n output from the model\n \"\"\"\n return self.model(*args, **kwargs)\n\n def training_step(self, *args: Any, **kwargs: Any) -> Tensor:\n \"\"\"Compute and return the training loss.\n\n Args:\n batch: the output of your DataLoader\n\n Returns:\n training loss\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self.forward(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the train step logs every `log_every_n_steps` steps where\n # `log_every_n_steps` is a parameter to the `Trainer` object\n self.log(\"train_loss\", loss, on_step=True, on_epoch=False)\n self.train_metrics(y_hat_hard, y)\n\n return cast(Tensor, loss)\n\n def training_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level training metrics.\n\n Args:\n outputs: list of items returned by training_step\n \"\"\"\n self.log_dict(self.train_metrics.compute())\n self.train_metrics.reset()\n\n def validation_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute validation loss and log example predictions.\n\n Args:\n batch: the output of your DataLoader\n batch_idx: the index of this batch\n \"\"\"\n batch = args[0]\n batch_idx = args[1]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self.forward(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True)\n self.val_metrics(y_hat_hard, y)\n\n if batch_idx < 10:\n try:\n datamodule = self.trainer.datamodule # type: ignore[union-attr]\n batch[\"prediction\"] = y_hat_hard\n for key in [\"image\", \"mask\", \"prediction\"]:\n batch[key] = batch[key].cpu()\n sample = unbind_samples(batch)[0]\n fig = datamodule.plot(sample)\n summary_writer = self.logger.experiment # type: ignore[union-attr]\n summary_writer.add_figure(\n f\"image/{batch_idx}\", fig, global_step=self.global_step\n )\n except AttributeError:\n pass\n\n def validation_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level validation metrics.\n\n Args:\n outputs: list of items returned by validation_step\n \"\"\"\n self.log_dict(self.val_metrics.compute())\n self.val_metrics.reset()\n\n def test_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute test loss.\n\n Args:\n batch: the output of your DataLoader\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self.forward(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the test and validation steps only log per *epoch*\n self.log(\"test_loss\", loss, on_step=False, on_epoch=True)\n self.test_metrics(y_hat_hard, y)\n\n def test_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level test metrics.\n\n Args:\n outputs: list of items returned by test_step\n \"\"\"\n self.log_dict(self.test_metrics.compute())\n self.test_metrics.reset()\n\n def configure_optimizers(self) -> Dict[str, Any]:\n \"\"\"Initialize the optimizer and learning rate scheduler.\n\n Returns:\n a \"lr dict\" according to the pytorch lightning documentation --\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers\n \"\"\"\n optimizer = torch.optim.Adam(\n self.model.parameters(), lr=self.hyperparams[\"learning_rate\"]\n )\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": ReduceLROnPlateau(\n optimizer,\n patience=self.hyperparams[\"learning_rate_schedule_patience\"],\n ),\n \"monitor\": \"val_loss\",\n },\n }\n", "path": "torchgeo/trainers/segmentation.py"}]} | 3,477 | 660 |
gh_patches_debug_15880 | rasdani/github-patches | git_diff | learningequality__kolibri-1266 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot play MP3 files in Kolibri
## Summary
MP3 files are inside the `.kolibri\content\storage` folder and I can play them in the Windows Media Player, but do not load from Kolibri.
## System information
- Version: KolibriSetup-0.2.0.dev3.exe
- Operating system: Windows 8.1
- Browser: Firefox
## Screenshots

</issue>
<code>
[start of kolibri/logger/api.py]
1 from django.db.models.query import F
2 from kolibri.auth.api import KolibriAuthPermissions, KolibriAuthPermissionsFilter
3 from kolibri.auth.filters import HierarchyRelationsFilter
4 from kolibri.content.api import OptionalPageNumberPagination
5 from rest_framework import filters, viewsets
6
7 from .models import AttemptLog, ContentRatingLog, ContentSessionLog, ContentSummaryLog, ExamAttemptLog, ExamLog, MasteryLog, UserSessionLog
8 from .serializers import (
9 AttemptLogSerializer, ContentRatingLogSerializer, ContentSessionLogSerializer, ContentSummaryLogSerializer, ExamAttemptLogSerializer,
10 ExamLogSerializer, MasteryLogSerializer, UserSessionLogSerializer
11 )
12
13
14 class BaseLogFilter(filters.FilterSet):
15 facility = filters.django_filters.MethodFilter()
16 classroom = filters.django_filters.MethodFilter()
17 learner_group = filters.django_filters.MethodFilter()
18
19 # Only device owner (superuser) can filter by facilities
20 def filter_facility(self, queryset, value):
21 return queryset.filter(user__facility_id=value)
22
23 def filter_classroom(self, queryset, value):
24 return HierarchyRelationsFilter(queryset).filter_by_hierarchy(
25 ancestor_collection=value,
26 target_user=F("user"),
27 )
28
29 def filter_learner_group(self, queryset, value):
30 return HierarchyRelationsFilter(queryset).filter_by_hierarchy(
31 ancestor_collection=value,
32 target_user=F("user"),
33 )
34
35
36 class ContentSessionLogFilter(BaseLogFilter):
37
38 class Meta:
39 model = ContentSessionLog
40 fields = ['user_id', 'content_id']
41
42
43 class ContentSessionLogViewSet(viewsets.ModelViewSet):
44 permission_classes = (KolibriAuthPermissions,)
45 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)
46 queryset = ContentSessionLog.objects.all()
47 serializer_class = ContentSessionLogSerializer
48 pagination_class = OptionalPageNumberPagination
49 filter_class = ContentSessionLogFilter
50
51
52 class ContentSummaryLogFilter(BaseLogFilter):
53
54 class Meta:
55 model = ContentSummaryLog
56 fields = ['user_id', 'content_id']
57
58
59 class ContentSummaryLogViewSet(viewsets.ModelViewSet):
60 permission_classes = (KolibriAuthPermissions,)
61 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)
62 queryset = ContentSummaryLog.objects.all()
63 serializer_class = ContentSummaryLogSerializer
64 pagination_class = OptionalPageNumberPagination
65 filter_class = ContentSummaryLogFilter
66
67
68 class ContentRatingLogFilter(BaseLogFilter):
69
70 class Meta:
71 model = ContentRatingLog
72 fields = ['user_id', 'content_id']
73
74
75 class ContentRatingLogViewSet(viewsets.ModelViewSet):
76 permission_classes = (KolibriAuthPermissions,)
77 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)
78 queryset = ContentRatingLog.objects.all()
79 serializer_class = ContentRatingLogSerializer
80 pagination_class = OptionalPageNumberPagination
81 filter_class = ContentRatingLogFilter
82
83
84 class UserSessionLogFilter(BaseLogFilter):
85
86 class Meta:
87 model = UserSessionLog
88 fields = ['user_id']
89
90
91 class UserSessionLogViewSet(viewsets.ModelViewSet):
92 permission_classes = (KolibriAuthPermissions,)
93 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)
94 queryset = UserSessionLog.objects.all()
95 serializer_class = UserSessionLogSerializer
96 pagination_class = OptionalPageNumberPagination
97 filter_class = UserSessionLogFilter
98
99
100 class MasteryFilter(filters.FilterSet):
101
102 class Meta:
103 model = MasteryLog
104 fields = ['summarylog']
105
106 class MasteryLogViewSet(viewsets.ModelViewSet):
107 permission_classes = (KolibriAuthPermissions,)
108 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)
109 queryset = MasteryLog.objects.all()
110 serializer_class = MasteryLogSerializer
111 pagination_class = OptionalPageNumberPagination
112 filter_class = MasteryFilter
113
114 class AttemptFilter(filters.FilterSet):
115 content = filters.django_filters.MethodFilter()
116 user = filters.django_filters.MethodFilter()
117
118 def filter_content(self, queryset, value):
119 return queryset.filter(masterylog__summarylog__content_id=value)
120
121 def filter_user(self, queryset, value):
122 return queryset.filter(masterylog__summarylog__user_id=value)
123
124 class Meta:
125 model = AttemptLog
126 fields = ['masterylog', 'complete']
127
128 class AttemptLogViewSet(viewsets.ModelViewSet):
129 permission_classes = (KolibriAuthPermissions,)
130 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend, filters.OrderingFilter)
131 queryset = AttemptLog.objects.all()
132 serializer_class = AttemptLogSerializer
133 pagination_class = OptionalPageNumberPagination
134 filter_class = AttemptFilter
135 ordering_fields = ('end_timestamp',)
136 ordering = ('end_timestamp',)
137
138
139 class ExamAttemptFilter(filters.FilterSet):
140 exam = filters.django_filters.MethodFilter()
141 user = filters.django_filters.MethodFilter()
142
143 def filter_exam(self, queryset, value):
144 return queryset.filter(examlog__exam=value)
145
146 def filter_user(self, queryset, value):
147 return queryset.filter(examlog__user=value)
148
149 class Meta:
150 model = ExamAttemptLog
151 fields = ['examlog', 'exam', 'user']
152
153 class ExamAttemptLogViewSet(viewsets.ModelViewSet):
154 permission_classes = (KolibriAuthPermissions,)
155 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend, filters.OrderingFilter)
156 queryset = ExamAttemptLog.objects.all()
157 serializer_class = ExamAttemptLogSerializer
158 pagination_class = OptionalPageNumberPagination
159 filter_class = ExamAttemptFilter
160
161 class ExamLogFilter(BaseLogFilter):
162
163 class Meta:
164 model = ExamLog
165 fields = ['user', 'exam']
166
167 class ExamLogViewSet(viewsets.ModelViewSet):
168 permission_classes = (KolibriAuthPermissions,)
169 filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)
170 queryset = ExamLog.objects.all()
171 serializer_class = ExamLogSerializer
172 pagination_class = OptionalPageNumberPagination
173 filter_class = ExamLogFilter
174
[end of kolibri/logger/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/logger/api.py b/kolibri/logger/api.py
--- a/kolibri/logger/api.py
+++ b/kolibri/logger/api.py
@@ -113,17 +113,13 @@
class AttemptFilter(filters.FilterSet):
content = filters.django_filters.MethodFilter()
- user = filters.django_filters.MethodFilter()
def filter_content(self, queryset, value):
return queryset.filter(masterylog__summarylog__content_id=value)
- def filter_user(self, queryset, value):
- return queryset.filter(masterylog__summarylog__user_id=value)
-
class Meta:
model = AttemptLog
- fields = ['masterylog', 'complete']
+ fields = ['masterylog', 'complete', 'user', 'content']
class AttemptLogViewSet(viewsets.ModelViewSet):
permission_classes = (KolibriAuthPermissions,)
| {"golden_diff": "diff --git a/kolibri/logger/api.py b/kolibri/logger/api.py\n--- a/kolibri/logger/api.py\n+++ b/kolibri/logger/api.py\n@@ -113,17 +113,13 @@\n \n class AttemptFilter(filters.FilterSet):\n content = filters.django_filters.MethodFilter()\n- user = filters.django_filters.MethodFilter()\n \n def filter_content(self, queryset, value):\n return queryset.filter(masterylog__summarylog__content_id=value)\n \n- def filter_user(self, queryset, value):\n- return queryset.filter(masterylog__summarylog__user_id=value)\n-\n class Meta:\n model = AttemptLog\n- fields = ['masterylog', 'complete']\n+ fields = ['masterylog', 'complete', 'user', 'content']\n \n class AttemptLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n", "issue": "Cannot play MP3 files in Kolibri\n## Summary\r\n\r\nMP3 files are inside the `.kolibri\\content\\storage` folder and I can play them in the Windows Media Player, but do not load from Kolibri.\r\n\r\n## System information\r\n\r\n - Version: KolibriSetup-0.2.0.dev3.exe\r\n - Operating system: Windows 8.1\r\n - Browser: Firefox\r\n\r\n## Screenshots\r\n\r\n\r\n\n", "before_files": [{"content": "from django.db.models.query import F\nfrom kolibri.auth.api import KolibriAuthPermissions, KolibriAuthPermissionsFilter\nfrom kolibri.auth.filters import HierarchyRelationsFilter\nfrom kolibri.content.api import OptionalPageNumberPagination\nfrom rest_framework import filters, viewsets\n\nfrom .models import AttemptLog, ContentRatingLog, ContentSessionLog, ContentSummaryLog, ExamAttemptLog, ExamLog, MasteryLog, UserSessionLog\nfrom .serializers import (\n AttemptLogSerializer, ContentRatingLogSerializer, ContentSessionLogSerializer, ContentSummaryLogSerializer, ExamAttemptLogSerializer,\n ExamLogSerializer, MasteryLogSerializer, UserSessionLogSerializer\n)\n\n\nclass BaseLogFilter(filters.FilterSet):\n facility = filters.django_filters.MethodFilter()\n classroom = filters.django_filters.MethodFilter()\n learner_group = filters.django_filters.MethodFilter()\n\n # Only device owner (superuser) can filter by facilities\n def filter_facility(self, queryset, value):\n return queryset.filter(user__facility_id=value)\n\n def filter_classroom(self, queryset, value):\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n ancestor_collection=value,\n target_user=F(\"user\"),\n )\n\n def filter_learner_group(self, queryset, value):\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n ancestor_collection=value,\n target_user=F(\"user\"),\n )\n\n\nclass ContentSessionLogFilter(BaseLogFilter):\n\n class Meta:\n model = ContentSessionLog\n fields = ['user_id', 'content_id']\n\n\nclass ContentSessionLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = ContentSessionLog.objects.all()\n serializer_class = ContentSessionLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentSessionLogFilter\n\n\nclass ContentSummaryLogFilter(BaseLogFilter):\n\n class Meta:\n model = ContentSummaryLog\n fields = ['user_id', 'content_id']\n\n\nclass ContentSummaryLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = ContentSummaryLog.objects.all()\n serializer_class = ContentSummaryLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentSummaryLogFilter\n\n\nclass ContentRatingLogFilter(BaseLogFilter):\n\n class Meta:\n model = ContentRatingLog\n fields = ['user_id', 'content_id']\n\n\nclass ContentRatingLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = ContentRatingLog.objects.all()\n serializer_class = ContentRatingLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentRatingLogFilter\n\n\nclass UserSessionLogFilter(BaseLogFilter):\n\n class Meta:\n model = UserSessionLog\n fields = ['user_id']\n\n\nclass UserSessionLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = UserSessionLog.objects.all()\n serializer_class = UserSessionLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = UserSessionLogFilter\n\n\nclass MasteryFilter(filters.FilterSet):\n\n class Meta:\n model = MasteryLog\n fields = ['summarylog']\n\nclass MasteryLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = MasteryLog.objects.all()\n serializer_class = MasteryLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = MasteryFilter\n\nclass AttemptFilter(filters.FilterSet):\n content = filters.django_filters.MethodFilter()\n user = filters.django_filters.MethodFilter()\n\n def filter_content(self, queryset, value):\n return queryset.filter(masterylog__summarylog__content_id=value)\n\n def filter_user(self, queryset, value):\n return queryset.filter(masterylog__summarylog__user_id=value)\n\n class Meta:\n model = AttemptLog\n fields = ['masterylog', 'complete']\n\nclass AttemptLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend, filters.OrderingFilter)\n queryset = AttemptLog.objects.all()\n serializer_class = AttemptLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = AttemptFilter\n ordering_fields = ('end_timestamp',)\n ordering = ('end_timestamp',)\n\n\nclass ExamAttemptFilter(filters.FilterSet):\n exam = filters.django_filters.MethodFilter()\n user = filters.django_filters.MethodFilter()\n\n def filter_exam(self, queryset, value):\n return queryset.filter(examlog__exam=value)\n\n def filter_user(self, queryset, value):\n return queryset.filter(examlog__user=value)\n\n class Meta:\n model = ExamAttemptLog\n fields = ['examlog', 'exam', 'user']\n\nclass ExamAttemptLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend, filters.OrderingFilter)\n queryset = ExamAttemptLog.objects.all()\n serializer_class = ExamAttemptLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ExamAttemptFilter\n\nclass ExamLogFilter(BaseLogFilter):\n\n class Meta:\n model = ExamLog\n fields = ['user', 'exam']\n\nclass ExamLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)\n queryset = ExamLog.objects.all()\n serializer_class = ExamLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ExamLogFilter\n", "path": "kolibri/logger/api.py"}]} | 2,411 | 197 |
gh_patches_debug_17868 | rasdani/github-patches | git_diff | openai__gym-1497 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
EzPickle used incorrectly on some environments
Some environments use `EzPickle` class incorrectly and thus the environment serialization does not result in an expected copy of the environment. An example of such class is the `HandReach-v0`: https://github.com/openai/gym/blob/a488c2fe2fbd4408ba9e193985ec2d86033cab6d/gym/envs/robotics/hand/reach.py#L66
I think the correct usage would be to move the call in the beginning of the `__init__`-method and call
```python
utils.EzPickle.__init__(
distance_threshold=distance_threshold,
n_substeps=n_substeps,
relative_control=relative_control,
initial_qpos=initial_qpos,
reward_type=reward_type,
)
```
or alternatively
```python
utils.EzPickle.__init__(**locals())
```
Here's a simple script to verify the issue:
```python
import pickle
import gym
def main():
env1 = gym.envs.make('HandReach-v0', distance_threshold=1e-6)
env2 = pickle.loads(pickle.dumps(env1))
assert env1.distance_threshold == env2.distance_threshold, (
env1.distance_threshold, env2.distance_threshold)
if __name__ == '__main__':
main()
```
Running it results in the following:
```
$ python ./tests/test_hand_copy.py
Traceback (most recent call last):
File "./tests/test_hand_copy.py", line 15, in <module>
main()
File "./tests/test_hand_copy.py", line 11, in main
env1.distance_threshold, env2.distance_threshold)
AssertionError: (1e-06, 0.01)
```
</issue>
<code>
[start of gym/envs/robotics/hand/reach.py]
1 import os
2 import numpy as np
3
4 from gym import utils
5 from gym.envs.robotics import hand_env
6 from gym.envs.robotics.utils import robot_get_obs
7
8
9 FINGERTIP_SITE_NAMES = [
10 'robot0:S_fftip',
11 'robot0:S_mftip',
12 'robot0:S_rftip',
13 'robot0:S_lftip',
14 'robot0:S_thtip',
15 ]
16
17
18 DEFAULT_INITIAL_QPOS = {
19 'robot0:WRJ1': -0.16514339750464327,
20 'robot0:WRJ0': -0.31973286565062153,
21 'robot0:FFJ3': 0.14340512546557435,
22 'robot0:FFJ2': 0.32028208333591573,
23 'robot0:FFJ1': 0.7126053607727917,
24 'robot0:FFJ0': 0.6705281001412586,
25 'robot0:MFJ3': 0.000246444303701037,
26 'robot0:MFJ2': 0.3152655251085491,
27 'robot0:MFJ1': 0.7659800313729842,
28 'robot0:MFJ0': 0.7323156897425923,
29 'robot0:RFJ3': 0.00038520700007378114,
30 'robot0:RFJ2': 0.36743546201985233,
31 'robot0:RFJ1': 0.7119514095008576,
32 'robot0:RFJ0': 0.6699446327514138,
33 'robot0:LFJ4': 0.0525442258033891,
34 'robot0:LFJ3': -0.13615534724474673,
35 'robot0:LFJ2': 0.39872030433433003,
36 'robot0:LFJ1': 0.7415570009679252,
37 'robot0:LFJ0': 0.704096378652974,
38 'robot0:THJ4': 0.003673823825070126,
39 'robot0:THJ3': 0.5506291436028695,
40 'robot0:THJ2': -0.014515151997119306,
41 'robot0:THJ1': -0.0015229223564485414,
42 'robot0:THJ0': -0.7894883021600622,
43 }
44
45
46 # Ensure we get the path separator correct on windows
47 MODEL_XML_PATH = os.path.join('hand', 'reach.xml')
48
49
50 def goal_distance(goal_a, goal_b):
51 assert goal_a.shape == goal_b.shape
52 return np.linalg.norm(goal_a - goal_b, axis=-1)
53
54
55 class HandReachEnv(hand_env.HandEnv, utils.EzPickle):
56 def __init__(
57 self, distance_threshold=0.01, n_substeps=20, relative_control=False,
58 initial_qpos=DEFAULT_INITIAL_QPOS, reward_type='sparse',
59 ):
60 self.distance_threshold = distance_threshold
61 self.reward_type = reward_type
62
63 hand_env.HandEnv.__init__(
64 self, MODEL_XML_PATH, n_substeps=n_substeps, initial_qpos=initial_qpos,
65 relative_control=relative_control)
66 utils.EzPickle.__init__(self)
67
68 def _get_achieved_goal(self):
69 goal = [self.sim.data.get_site_xpos(name) for name in FINGERTIP_SITE_NAMES]
70 return np.array(goal).flatten()
71
72 # GoalEnv methods
73 # ----------------------------
74
75 def compute_reward(self, achieved_goal, goal, info):
76 d = goal_distance(achieved_goal, goal)
77 if self.reward_type == 'sparse':
78 return -(d > self.distance_threshold).astype(np.float32)
79 else:
80 return -d
81
82 # RobotEnv methods
83 # ----------------------------
84
85 def _env_setup(self, initial_qpos):
86 for name, value in initial_qpos.items():
87 self.sim.data.set_joint_qpos(name, value)
88 self.sim.forward()
89
90 self.initial_goal = self._get_achieved_goal().copy()
91 self.palm_xpos = self.sim.data.body_xpos[self.sim.model.body_name2id('robot0:palm')].copy()
92
93 def _get_obs(self):
94 robot_qpos, robot_qvel = robot_get_obs(self.sim)
95 achieved_goal = self._get_achieved_goal().ravel()
96 observation = np.concatenate([robot_qpos, robot_qvel, achieved_goal])
97 return {
98 'observation': observation.copy(),
99 'achieved_goal': achieved_goal.copy(),
100 'desired_goal': self.goal.copy(),
101 }
102
103 def _sample_goal(self):
104 thumb_name = 'robot0:S_thtip'
105 finger_names = [name for name in FINGERTIP_SITE_NAMES if name != thumb_name]
106 finger_name = self.np_random.choice(finger_names)
107
108 thumb_idx = FINGERTIP_SITE_NAMES.index(thumb_name)
109 finger_idx = FINGERTIP_SITE_NAMES.index(finger_name)
110 assert thumb_idx != finger_idx
111
112 # Pick a meeting point above the hand.
113 meeting_pos = self.palm_xpos + np.array([0.0, -0.09, 0.05])
114 meeting_pos += self.np_random.normal(scale=0.005, size=meeting_pos.shape)
115
116 # Slightly move meeting goal towards the respective finger to avoid that they
117 # overlap.
118 goal = self.initial_goal.copy().reshape(-1, 3)
119 for idx in [thumb_idx, finger_idx]:
120 offset_direction = (meeting_pos - goal[idx])
121 offset_direction /= np.linalg.norm(offset_direction)
122 goal[idx] = meeting_pos - 0.005 * offset_direction
123
124 if self.np_random.uniform() < 0.1:
125 # With some probability, ask all fingers to move back to the origin.
126 # This avoids that the thumb constantly stays near the goal position already.
127 goal = self.initial_goal.copy()
128 return goal.flatten()
129
130 def _is_success(self, achieved_goal, desired_goal):
131 d = goal_distance(achieved_goal, desired_goal)
132 return (d < self.distance_threshold).astype(np.float32)
133
134 def _render_callback(self):
135 # Visualize targets.
136 sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()
137 goal = self.goal.reshape(5, 3)
138 for finger_idx in range(5):
139 site_name = 'target{}'.format(finger_idx)
140 site_id = self.sim.model.site_name2id(site_name)
141 self.sim.model.site_pos[site_id] = goal[finger_idx] - sites_offset[site_id]
142
143 # Visualize finger positions.
144 achieved_goal = self._get_achieved_goal().reshape(5, 3)
145 for finger_idx in range(5):
146 site_name = 'finger{}'.format(finger_idx)
147 site_id = self.sim.model.site_name2id(site_name)
148 self.sim.model.site_pos[site_id] = achieved_goal[finger_idx] - sites_offset[site_id]
149 self.sim.forward()
150
[end of gym/envs/robotics/hand/reach.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gym/envs/robotics/hand/reach.py b/gym/envs/robotics/hand/reach.py
--- a/gym/envs/robotics/hand/reach.py
+++ b/gym/envs/robotics/hand/reach.py
@@ -57,13 +57,13 @@
self, distance_threshold=0.01, n_substeps=20, relative_control=False,
initial_qpos=DEFAULT_INITIAL_QPOS, reward_type='sparse',
):
+ utils.EzPickle.__init__(**locals())
self.distance_threshold = distance_threshold
self.reward_type = reward_type
hand_env.HandEnv.__init__(
self, MODEL_XML_PATH, n_substeps=n_substeps, initial_qpos=initial_qpos,
relative_control=relative_control)
- utils.EzPickle.__init__(self)
def _get_achieved_goal(self):
goal = [self.sim.data.get_site_xpos(name) for name in FINGERTIP_SITE_NAMES]
| {"golden_diff": "diff --git a/gym/envs/robotics/hand/reach.py b/gym/envs/robotics/hand/reach.py\n--- a/gym/envs/robotics/hand/reach.py\n+++ b/gym/envs/robotics/hand/reach.py\n@@ -57,13 +57,13 @@\n self, distance_threshold=0.01, n_substeps=20, relative_control=False,\n initial_qpos=DEFAULT_INITIAL_QPOS, reward_type='sparse',\n ):\n+ utils.EzPickle.__init__(**locals())\n self.distance_threshold = distance_threshold\n self.reward_type = reward_type\n \n hand_env.HandEnv.__init__(\n self, MODEL_XML_PATH, n_substeps=n_substeps, initial_qpos=initial_qpos,\n relative_control=relative_control)\n- utils.EzPickle.__init__(self)\n \n def _get_achieved_goal(self):\n goal = [self.sim.data.get_site_xpos(name) for name in FINGERTIP_SITE_NAMES]\n", "issue": "EzPickle used incorrectly on some environments\nSome environments use `EzPickle` class incorrectly and thus the environment serialization does not result in an expected copy of the environment. An example of such class is the `HandReach-v0`: https://github.com/openai/gym/blob/a488c2fe2fbd4408ba9e193985ec2d86033cab6d/gym/envs/robotics/hand/reach.py#L66\r\n\r\nI think the correct usage would be to move the call in the beginning of the `__init__`-method and call\r\n```python\r\nutils.EzPickle.__init__(\r\n distance_threshold=distance_threshold,\r\n n_substeps=n_substeps,\r\n relative_control=relative_control,\r\n initial_qpos=initial_qpos,\r\n reward_type=reward_type,\r\n)\r\n```\r\nor alternatively\r\n```python\r\nutils.EzPickle.__init__(**locals())\r\n```\r\nHere's a simple script to verify the issue:\r\n```python\r\nimport pickle\r\n\r\nimport gym\r\n\r\n\r\ndef main():\r\n env1 = gym.envs.make('HandReach-v0', distance_threshold=1e-6)\r\n env2 = pickle.loads(pickle.dumps(env1))\r\n\r\n assert env1.distance_threshold == env2.distance_threshold, (\r\n env1.distance_threshold, env2.distance_threshold)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n```\r\n\r\nRunning it results in the following:\r\n```\r\n$ python ./tests/test_hand_copy.py\r\nTraceback (most recent call last):\r\n File \"./tests/test_hand_copy.py\", line 15, in <module>\r\n main()\r\n File \"./tests/test_hand_copy.py\", line 11, in main\r\n env1.distance_threshold, env2.distance_threshold)\r\nAssertionError: (1e-06, 0.01)\r\n```\r\n\n", "before_files": [{"content": "import os\nimport numpy as np\n\nfrom gym import utils\nfrom gym.envs.robotics import hand_env\nfrom gym.envs.robotics.utils import robot_get_obs\n\n\nFINGERTIP_SITE_NAMES = [\n 'robot0:S_fftip',\n 'robot0:S_mftip',\n 'robot0:S_rftip',\n 'robot0:S_lftip',\n 'robot0:S_thtip',\n]\n\n\nDEFAULT_INITIAL_QPOS = {\n 'robot0:WRJ1': -0.16514339750464327,\n 'robot0:WRJ0': -0.31973286565062153,\n 'robot0:FFJ3': 0.14340512546557435,\n 'robot0:FFJ2': 0.32028208333591573,\n 'robot0:FFJ1': 0.7126053607727917,\n 'robot0:FFJ0': 0.6705281001412586,\n 'robot0:MFJ3': 0.000246444303701037,\n 'robot0:MFJ2': 0.3152655251085491,\n 'robot0:MFJ1': 0.7659800313729842,\n 'robot0:MFJ0': 0.7323156897425923,\n 'robot0:RFJ3': 0.00038520700007378114,\n 'robot0:RFJ2': 0.36743546201985233,\n 'robot0:RFJ1': 0.7119514095008576,\n 'robot0:RFJ0': 0.6699446327514138,\n 'robot0:LFJ4': 0.0525442258033891,\n 'robot0:LFJ3': -0.13615534724474673,\n 'robot0:LFJ2': 0.39872030433433003,\n 'robot0:LFJ1': 0.7415570009679252,\n 'robot0:LFJ0': 0.704096378652974,\n 'robot0:THJ4': 0.003673823825070126,\n 'robot0:THJ3': 0.5506291436028695,\n 'robot0:THJ2': -0.014515151997119306,\n 'robot0:THJ1': -0.0015229223564485414,\n 'robot0:THJ0': -0.7894883021600622,\n}\n\n\n# Ensure we get the path separator correct on windows\nMODEL_XML_PATH = os.path.join('hand', 'reach.xml')\n\n\ndef goal_distance(goal_a, goal_b):\n assert goal_a.shape == goal_b.shape\n return np.linalg.norm(goal_a - goal_b, axis=-1)\n\n\nclass HandReachEnv(hand_env.HandEnv, utils.EzPickle):\n def __init__(\n self, distance_threshold=0.01, n_substeps=20, relative_control=False,\n initial_qpos=DEFAULT_INITIAL_QPOS, reward_type='sparse',\n ):\n self.distance_threshold = distance_threshold\n self.reward_type = reward_type\n\n hand_env.HandEnv.__init__(\n self, MODEL_XML_PATH, n_substeps=n_substeps, initial_qpos=initial_qpos,\n relative_control=relative_control)\n utils.EzPickle.__init__(self)\n\n def _get_achieved_goal(self):\n goal = [self.sim.data.get_site_xpos(name) for name in FINGERTIP_SITE_NAMES]\n return np.array(goal).flatten()\n\n # GoalEnv methods\n # ----------------------------\n\n def compute_reward(self, achieved_goal, goal, info):\n d = goal_distance(achieved_goal, goal)\n if self.reward_type == 'sparse':\n return -(d > self.distance_threshold).astype(np.float32)\n else:\n return -d\n\n # RobotEnv methods\n # ----------------------------\n\n def _env_setup(self, initial_qpos):\n for name, value in initial_qpos.items():\n self.sim.data.set_joint_qpos(name, value)\n self.sim.forward()\n\n self.initial_goal = self._get_achieved_goal().copy()\n self.palm_xpos = self.sim.data.body_xpos[self.sim.model.body_name2id('robot0:palm')].copy()\n\n def _get_obs(self):\n robot_qpos, robot_qvel = robot_get_obs(self.sim)\n achieved_goal = self._get_achieved_goal().ravel()\n observation = np.concatenate([robot_qpos, robot_qvel, achieved_goal])\n return {\n 'observation': observation.copy(),\n 'achieved_goal': achieved_goal.copy(),\n 'desired_goal': self.goal.copy(),\n }\n\n def _sample_goal(self):\n thumb_name = 'robot0:S_thtip'\n finger_names = [name for name in FINGERTIP_SITE_NAMES if name != thumb_name]\n finger_name = self.np_random.choice(finger_names)\n\n thumb_idx = FINGERTIP_SITE_NAMES.index(thumb_name)\n finger_idx = FINGERTIP_SITE_NAMES.index(finger_name)\n assert thumb_idx != finger_idx\n\n # Pick a meeting point above the hand.\n meeting_pos = self.palm_xpos + np.array([0.0, -0.09, 0.05])\n meeting_pos += self.np_random.normal(scale=0.005, size=meeting_pos.shape)\n\n # Slightly move meeting goal towards the respective finger to avoid that they\n # overlap.\n goal = self.initial_goal.copy().reshape(-1, 3)\n for idx in [thumb_idx, finger_idx]:\n offset_direction = (meeting_pos - goal[idx])\n offset_direction /= np.linalg.norm(offset_direction)\n goal[idx] = meeting_pos - 0.005 * offset_direction\n\n if self.np_random.uniform() < 0.1:\n # With some probability, ask all fingers to move back to the origin.\n # This avoids that the thumb constantly stays near the goal position already.\n goal = self.initial_goal.copy()\n return goal.flatten()\n\n def _is_success(self, achieved_goal, desired_goal):\n d = goal_distance(achieved_goal, desired_goal)\n return (d < self.distance_threshold).astype(np.float32)\n\n def _render_callback(self):\n # Visualize targets.\n sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()\n goal = self.goal.reshape(5, 3)\n for finger_idx in range(5):\n site_name = 'target{}'.format(finger_idx)\n site_id = self.sim.model.site_name2id(site_name)\n self.sim.model.site_pos[site_id] = goal[finger_idx] - sites_offset[site_id]\n\n # Visualize finger positions.\n achieved_goal = self._get_achieved_goal().reshape(5, 3)\n for finger_idx in range(5):\n site_name = 'finger{}'.format(finger_idx)\n site_id = self.sim.model.site_name2id(site_name)\n self.sim.model.site_pos[site_id] = achieved_goal[finger_idx] - sites_offset[site_id]\n self.sim.forward()\n", "path": "gym/envs/robotics/hand/reach.py"}]} | 3,117 | 225 |
gh_patches_debug_16293 | rasdani/github-patches | git_diff | nf-core__tools-1261 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Linting does not recognize README Nextflow minimum version mention in Quick Start
When running `nf-core lint` with a readme file that has the following in it:
```
## Quick Start
1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)
```
A warning is triggered:
```
readme: README did not have a Nextflow minimum version mentioned in Quick Start section.
```
This warning should not be triggering as the minimum nextflow version is in the readme file.
Link to code location
https://github.com/nf-core/tools/blob/01291016652284bfba23a900399fa0155906a7c5/nf_core/lint/readme.py#L65-L66
</issue>
<code>
[start of nf_core/lint/readme.py]
1 #!/usr/bin/env python
2
3 import os
4 import re
5
6
7 def readme(self):
8 """Repository ``README.md`` tests
9
10 The ``README.md`` files for a project are very important and must meet some requirements:
11
12 * Nextflow badge
13
14 * If no Nextflow badge is found, a warning is given
15 * If a badge is found but the version doesn't match the minimum version in the config file, the test fails
16 * Example badge code:
17
18 .. code-block:: md
19
20 [](https://www.nextflow.io/)
21
22 * Bioconda badge
23
24 * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required
25 * Required badge code:
26
27 .. code-block:: md
28
29 [](https://bioconda.github.io/)
30
31 .. note:: These badges are a markdown image ```` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.
32 """
33 passed = []
34 warned = []
35 failed = []
36
37 with open(os.path.join(self.wf_path, "README.md"), "r") as fh:
38 content = fh.read()
39
40 # Check that there is a readme badge showing the minimum required version of Nextflow
41 # [](https://www.nextflow.io/)
42 # and that it has the correct version
43 nf_badge_re = r"\[!\[Nextflow\]\(https://img\.shields\.io/badge/nextflow%20DSL2-%E2%89%A5([\d\.]+)-23aa62\.svg\?labelColor=000000\)\]\(https://www\.nextflow\.io/\)"
44 match = re.search(nf_badge_re, content)
45 if match:
46 nf_badge_version = match.group(1).strip("'\"")
47 try:
48 assert nf_badge_version == self.minNextflowVersion
49 except (AssertionError, KeyError):
50 failed.append(
51 "README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`".format(
52 nf_badge_version, self.minNextflowVersion
53 )
54 )
55 else:
56 passed.append(
57 "README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`".format(
58 nf_badge_version, self.minNextflowVersion
59 )
60 )
61 else:
62 warned.append("README did not have a Nextflow minimum version badge.")
63
64 # Check that the minimum version mentioned in the quick start section is consistent
65 # Looking for: "1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)"
66 nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://nf-co.re/usage/installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)"
67 match = re.search(nf_version_re, content)
68 if match:
69 nf_quickstart_version = match.group(1)
70 try:
71 assert nf_quickstart_version == self.minNextflowVersion
72 except (AssertionError, KeyError):
73 failed.append(
74 f"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`"
75 )
76 else:
77 passed.append(
78 f"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`"
79 )
80 else:
81 warned.append("README did not have a Nextflow minimum version mentioned in Quick Start section.")
82
83 return {"passed": passed, "warned": warned, "failed": failed}
84
[end of nf_core/lint/readme.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nf_core/lint/readme.py b/nf_core/lint/readme.py
--- a/nf_core/lint/readme.py
+++ b/nf_core/lint/readme.py
@@ -62,8 +62,8 @@
warned.append("README did not have a Nextflow minimum version badge.")
# Check that the minimum version mentioned in the quick start section is consistent
- # Looking for: "1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)"
- nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://nf-co.re/usage/installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)"
+ # Looking for: "1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)"
+ nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://www.nextflow.io/docs/latest/getstarted.html#installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)"
match = re.search(nf_version_re, content)
if match:
nf_quickstart_version = match.group(1)
| {"golden_diff": "diff --git a/nf_core/lint/readme.py b/nf_core/lint/readme.py\n--- a/nf_core/lint/readme.py\n+++ b/nf_core/lint/readme.py\n@@ -62,8 +62,8 @@\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n \n # Check that the minimum version mentioned in the quick start section is consistent\n- # Looking for: \"1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)\"\n- nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://nf-co.re/usage/installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n+ # Looking for: \"1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\"\n+ nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://www.nextflow.io/docs/latest/getstarted.html#installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n", "issue": "Linting does not recognize README Nextflow minimum version mention in Quick Start\nWhen running `nf-core lint` with a readme file that has the following in it:\r\n\r\n```\r\n## Quick Start\r\n\r\n1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\r\n```\r\n\r\nA warning is triggered:\r\n\r\n```\r\nreadme: README did not have a Nextflow minimum version mentioned in Quick Start section.\r\n```\r\n\r\nThis warning should not be triggering as the minimum nextflow version is in the readme file.\r\n\r\nLink to code location\r\n\r\nhttps://github.com/nf-core/tools/blob/01291016652284bfba23a900399fa0155906a7c5/nf_core/lint/readme.py#L65-L66\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport re\n\n\ndef readme(self):\n \"\"\"Repository ``README.md`` tests\n\n The ``README.md`` files for a project are very important and must meet some requirements:\n\n * Nextflow badge\n\n * If no Nextflow badge is found, a warning is given\n * If a badge is found but the version doesn't match the minimum version in the config file, the test fails\n * Example badge code:\n\n .. code-block:: md\n\n [](https://www.nextflow.io/)\n\n * Bioconda badge\n\n * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required\n * Required badge code:\n\n .. code-block:: md\n\n [](https://bioconda.github.io/)\n\n .. note:: These badges are a markdown image ```` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.\n \"\"\"\n passed = []\n warned = []\n failed = []\n\n with open(os.path.join(self.wf_path, \"README.md\"), \"r\") as fh:\n content = fh.read()\n\n # Check that there is a readme badge showing the minimum required version of Nextflow\n # [](https://www.nextflow.io/)\n # and that it has the correct version\n nf_badge_re = r\"\\[!\\[Nextflow\\]\\(https://img\\.shields\\.io/badge/nextflow%20DSL2-%E2%89%A5([\\d\\.]+)-23aa62\\.svg\\?labelColor=000000\\)\\]\\(https://www\\.nextflow\\.io/\\)\"\n match = re.search(nf_badge_re, content)\n if match:\n nf_badge_version = match.group(1).strip(\"'\\\"\")\n try:\n assert nf_badge_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n \"README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n passed.append(\n \"README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n\n # Check that the minimum version mentioned in the quick start section is consistent\n # Looking for: \"1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)\"\n nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://nf-co.re/usage/installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n try:\n assert nf_quickstart_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n f\"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`\"\n )\n else:\n passed.append(\n f\"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`\"\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version mentioned in Quick Start section.\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed}\n", "path": "nf_core/lint/readme.py"}]} | 1,823 | 291 |
gh_patches_debug_50355 | rasdani/github-patches | git_diff | pypi__warehouse-6747 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Brazilian Portugese to localization footer
https://hosted.weblate.org/projects/pypa/warehouse/#translations says that we're now at 100% translated for Brazilian Portugese. Therefore, let's insert the footer of available locales/translations per #6624, and add Brazilian Portugese.
@yeraydiazdiaz @nlhkabu can either of you do this? Thanks.
</issue>
<code>
[start of warehouse/i18n/__init__.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from babel.core import Locale
14 from pyramid.i18n import TranslationStringFactory, default_locale_negotiator
15 from pyramid.threadlocal import get_current_request
16
17 KNOWN_LOCALES = {"en": "English"}
18
19 LOCALE_ATTR = "_LOCALE_"
20
21 _translation_factory = TranslationStringFactory("messages")
22
23
24 class LazyString:
25 def __init__(self, fn, *args, **kwargs):
26 self.fn = fn
27 self.args = args
28 self.mapping = kwargs.get("mapping", {})
29 self.kwargs = kwargs
30
31 def __json__(self, request):
32 return str(self)
33
34 def __mod__(self, new_mapping):
35 mapping = self.mapping.copy()
36 mapping.update(new_mapping)
37 return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs)
38
39 def __str__(self):
40 return self.fn(*self.args, **self.kwargs)
41
42
43 def _locale(request):
44 """
45 Computes a babel.core:Locale() object for this request.
46 """
47 return Locale.parse(request.locale_name, sep="_")
48
49
50 def _negotiate_locale(request):
51 locale_name = getattr(request, LOCALE_ATTR, None)
52 if locale_name is not None:
53 return locale_name
54
55 locale_name = request.params.get(LOCALE_ATTR)
56 if locale_name is not None:
57 return locale_name
58
59 locale_name = request.cookies.get(LOCALE_ATTR)
60 if locale_name is not None:
61 return locale_name
62
63 if not request.accept_language:
64 return default_locale_negotiator(request)
65
66 return request.accept_language.best_match(
67 tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request)
68 )
69
70
71 def localize(message, **kwargs):
72 def _localize(message, **kwargs):
73 request = get_current_request()
74 return request.localizer.translate(_translation_factory(message, **kwargs))
75
76 return LazyString(_localize, message, **kwargs)
77
78
79 def includeme(config):
80 # Add the request attributes
81 config.add_request_method(_locale, name="locale", reify=True)
82
83 # Register our translation directory.
84 config.add_translation_dirs("warehouse:locale/")
85
86 config.set_locale_negotiator(_negotiate_locale)
87
88 # Register our i18n/l10n filters for Jinja2
89 filters = config.get_settings().setdefault("jinja2.filters", {})
90 filters.setdefault("format_date", "warehouse.i18n.filters:format_date")
91 filters.setdefault("format_datetime", "warehouse.i18n.filters:format_datetime")
92 filters.setdefault(
93 "format_rfc822_datetime", "warehouse.i18n.filters:format_rfc822_datetime"
94 )
95 filters.setdefault("format_number", "warehouse.i18n.filters:format_number")
96
97 jglobals = config.get_settings().setdefault("jinja2.globals", {})
98 jglobals.setdefault("KNOWN_LOCALES", "warehouse.i18n:KNOWN_LOCALES")
99
[end of warehouse/i18n/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py
--- a/warehouse/i18n/__init__.py
+++ b/warehouse/i18n/__init__.py
@@ -14,7 +14,7 @@
from pyramid.i18n import TranslationStringFactory, default_locale_negotiator
from pyramid.threadlocal import get_current_request
-KNOWN_LOCALES = {"en": "English"}
+KNOWN_LOCALES = {"en": "English", "pt_BR": "Portuguese (Brazil)"}
LOCALE_ATTR = "_LOCALE_"
| {"golden_diff": "diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py\n--- a/warehouse/i18n/__init__.py\n+++ b/warehouse/i18n/__init__.py\n@@ -14,7 +14,7 @@\n from pyramid.i18n import TranslationStringFactory, default_locale_negotiator\n from pyramid.threadlocal import get_current_request\n \n-KNOWN_LOCALES = {\"en\": \"English\"}\n+KNOWN_LOCALES = {\"en\": \"English\", \"pt_BR\": \"Portuguese (Brazil)\"}\n \n LOCALE_ATTR = \"_LOCALE_\"\n", "issue": "Add Brazilian Portugese to localization footer\nhttps://hosted.weblate.org/projects/pypa/warehouse/#translations says that we're now at 100% translated for Brazilian Portugese. Therefore, let's insert the footer of available locales/translations per #6624, and add Brazilian Portugese.\r\n\r\n@yeraydiazdiaz @nlhkabu can either of you do this? Thanks.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom babel.core import Locale\nfrom pyramid.i18n import TranslationStringFactory, default_locale_negotiator\nfrom pyramid.threadlocal import get_current_request\n\nKNOWN_LOCALES = {\"en\": \"English\"}\n\nLOCALE_ATTR = \"_LOCALE_\"\n\n_translation_factory = TranslationStringFactory(\"messages\")\n\n\nclass LazyString:\n def __init__(self, fn, *args, **kwargs):\n self.fn = fn\n self.args = args\n self.mapping = kwargs.get(\"mapping\", {})\n self.kwargs = kwargs\n\n def __json__(self, request):\n return str(self)\n\n def __mod__(self, new_mapping):\n mapping = self.mapping.copy()\n mapping.update(new_mapping)\n return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs)\n\n def __str__(self):\n return self.fn(*self.args, **self.kwargs)\n\n\ndef _locale(request):\n \"\"\"\n Computes a babel.core:Locale() object for this request.\n \"\"\"\n return Locale.parse(request.locale_name, sep=\"_\")\n\n\ndef _negotiate_locale(request):\n locale_name = getattr(request, LOCALE_ATTR, None)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.params.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.cookies.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n if not request.accept_language:\n return default_locale_negotiator(request)\n\n return request.accept_language.best_match(\n tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request)\n )\n\n\ndef localize(message, **kwargs):\n def _localize(message, **kwargs):\n request = get_current_request()\n return request.localizer.translate(_translation_factory(message, **kwargs))\n\n return LazyString(_localize, message, **kwargs)\n\n\ndef includeme(config):\n # Add the request attributes\n config.add_request_method(_locale, name=\"locale\", reify=True)\n\n # Register our translation directory.\n config.add_translation_dirs(\"warehouse:locale/\")\n\n config.set_locale_negotiator(_negotiate_locale)\n\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n filters.setdefault(\"format_datetime\", \"warehouse.i18n.filters:format_datetime\")\n filters.setdefault(\n \"format_rfc822_datetime\", \"warehouse.i18n.filters:format_rfc822_datetime\"\n )\n filters.setdefault(\"format_number\", \"warehouse.i18n.filters:format_number\")\n\n jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n jglobals.setdefault(\"KNOWN_LOCALES\", \"warehouse.i18n:KNOWN_LOCALES\")\n", "path": "warehouse/i18n/__init__.py"}]} | 1,579 | 137 |
gh_patches_debug_31430 | rasdani/github-patches | git_diff | chaoss__augur-2541 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue data cntrb_id null for closed issues
Cntrb_id is stated to be "The ID of the person that closed the issue" and even for closed issues I am seeing nulls
</issue>
<code>
[start of augur/tasks/github/events/tasks.py]
1 import time
2 import logging
3 import traceback
4
5 from augur.tasks.init.celery_app import celery_app as celery
6 from augur.tasks.init.celery_app import AugurCoreRepoCollectionTask
7 from augur.application.db.data_parse import *
8 from augur.tasks.github.util.github_paginator import GithubPaginator, hit_api
9 from augur.tasks.github.util.github_task_session import GithubTaskManifest
10 from augur.application.db.session import DatabaseSession
11 from augur.tasks.github.util.util import get_owner_repo
12 from augur.tasks.util.worker_util import remove_duplicate_dicts
13 from augur.application.db.models import PullRequest, Message, PullRequestReview, PullRequestLabel, PullRequestReviewer, PullRequestEvent, PullRequestMeta, PullRequestAssignee, PullRequestReviewMessageRef, Issue, IssueEvent, IssueLabel, IssueAssignee, PullRequestMessageRef, IssueMessageRef, Contributor, Repo
14 from augur.application.db.util import execute_session_query
15
16 platform_id = 1
17
18 @celery.task(base=AugurCoreRepoCollectionTask)
19 def collect_events(repo_git: str):
20
21 logger = logging.getLogger(collect_events.__name__)
22
23 with GithubTaskManifest(logger) as manifest:
24
25 augur_db = manifest.augur_db
26
27 try:
28
29 query = augur_db.session.query(Repo).filter(Repo.repo_git == repo_git)
30 repo_obj = execute_session_query(query, 'one')
31 repo_id = repo_obj.repo_id
32
33 owner, repo = get_owner_repo(repo_git)
34
35 logger.info(f"Collecting Github events for {owner}/{repo}")
36
37 url = f"https://api.github.com/repos/{owner}/{repo}/issues/events"
38
39 event_data = retrieve_all_event_data(repo_git, logger, manifest.key_auth)
40
41 if event_data:
42
43 process_events(event_data, f"{owner}/{repo}: Event task", repo_id, logger, manifest.augur_db)
44
45 else:
46 logger.info(f"{owner}/{repo} has no events")
47 except Exception as e:
48 logger.error(f"Could not collect events for {repo_git}\n Reason: {e} \n Traceback: {''.join(traceback.format_exception(None, e, e.__traceback__))}")
49
50
51 def retrieve_all_event_data(repo_git: str, logger, key_auth):
52
53 owner, repo = get_owner_repo(repo_git)
54
55 logger.info(f"Collecting Github events for {owner}/{repo}")
56
57 url = f"https://api.github.com/repos/{owner}/{repo}/issues/events"
58
59 # returns an iterable of all issues at this url (this essentially means you can treat the issues variable as a list of the issues)
60 events = GithubPaginator(url, key_auth, logger)
61
62
63 num_pages = events.get_num_pages()
64 all_data = []
65 for page_data, page in events.iter_pages():
66
67 if page_data is None:
68 return all_data
69
70 elif len(page_data) == 0:
71 logger.debug(f"{repo.capitalize()} Events Page {page} contains no data...returning")
72 logger.info(f"Events Page {page} of {num_pages}")
73 return all_data
74
75 logger.info(f"{repo} Events Page {page} of {num_pages}")
76
77 all_data += page_data
78
79 return all_data
80
81 def process_events(events, task_name, repo_id, logger, augur_db):
82
83 tool_source = "Github events task"
84 tool_version = "2.0"
85 data_source = "Github API"
86
87 pr_event_dicts = []
88 issue_event_dicts = []
89 contributors = []
90
91
92 # create mapping from issue url to issue id of current issues
93 issue_url_to_id_map = {}
94 issues = augur_db.session.query(Issue).filter(Issue.repo_id == repo_id).all()
95 for issue in issues:
96 issue_url_to_id_map[issue.issue_url] = issue.issue_id
97
98 # create mapping from pr url to pr id of current pull requests
99 pr_url_to_id_map = {}
100 prs = augur_db.session.query(PullRequest).filter(PullRequest.repo_id == repo_id).all()
101 for pr in prs:
102 pr_url_to_id_map[pr.pr_url] = pr.pull_request_id
103
104 not_mapable_event_count = 0
105 event_len = len(events)
106 for event in events:
107
108 event, contributor = process_github_event_contributors(logger, event, tool_source, tool_version, data_source)
109
110 # event_mapping_data is the pr or issue data needed to relate the event to an issue or pr
111 event_mapping_data = event["issue"]
112
113 if event_mapping_data is None:
114 not_mapable_event_count += 1
115 continue
116
117 pull_request = event_mapping_data.get('pull_request', None)
118 if pull_request:
119 pr_url = pull_request["url"]
120
121 try:
122 pull_request_id = pr_url_to_id_map[pr_url]
123
124 # query = augur_db.session.query(PullRequest).filter(PullRequest.pr_url == pr_url)
125 # related_pr = execute_session_query(query, 'one')
126 except KeyError:
127 logger.info(f"{task_name}: Could not find related pr")
128 logger.info(f"{task_name}: We were searching for: {pr_url}")
129 logger.info(f"{task_name}: Skipping")
130 continue
131
132 pr_event_dicts.append(
133 extract_pr_event_data(event, pull_request_id, platform_id, repo_id,
134 tool_source, tool_version, data_source)
135 )
136
137 else:
138 issue_url = event_mapping_data["url"]
139
140 try:
141 issue_id = issue_url_to_id_map[issue_url]
142 # query = augur_db.session.query(Issue).filter(Issue.issue_url == issue_url)
143 # related_issue = execute_session_query(query, 'one')
144 except KeyError:
145 logger.info(f"{task_name}: Could not find related pr")
146 logger.info(f"{task_name}: We were searching for: {issue_url}")
147 logger.info(f"{task_name}: Skipping")
148 continue
149
150 issue_event_dicts.append(
151 extract_issue_event_data(event, issue_id, platform_id, repo_id,
152 tool_source, tool_version, data_source)
153 )
154
155 # add contributor to list after porcessing the event,
156 # so if it fails processing for some reason the contributor is not inserted
157 # NOTE: contributor is none when there is no contributor data on the event
158 if contributor:
159 contributors.append(contributor)
160
161 # remove contributors that were found in the data more than once
162 contributors = remove_duplicate_dicts(contributors)
163
164 augur_db.insert_data(contributors, Contributor, ["cntrb_id"])
165
166 issue_events_len = len(issue_event_dicts)
167 pr_events_len = len(pr_event_dicts)
168 if event_len != (issue_events_len + pr_events_len):
169
170 unassigned_events = event_len - issue_events_len - pr_events_len
171
172 logger.error(f"{task_name}: {event_len} events were processed, but {pr_events_len} pr events were found and related to a pr, and {issue_events_len} issue events were found and related to an issue. {not_mapable_event_count} events were not related to a pr or issue due to the api returning insufficient data. For some reason {unassigned_events} events were not able to be processed even when the api returned sufficient data. This is usually because pull requests or issues have not been collected, and the events are skipped because they cannot be related to a pr or issue")
173
174 logger.info(f"{task_name}: Inserting {len(pr_event_dicts)} pr events and {len(issue_event_dicts)} issue events")
175
176 # TODO: Could replace this with "id" but it isn't stored on the table for some reason
177 pr_event_natural_keys = ["node_id"]
178 augur_db.insert_data(pr_event_dicts, PullRequestEvent, pr_event_natural_keys)
179
180 issue_event_natural_keys = ["issue_id", "issue_event_src_id"]
181 augur_db.insert_data(issue_event_dicts, IssueEvent, issue_event_natural_keys)
182
183
184 # TODO: Should we skip an event if there is no contributor to resolve it o
185 def process_github_event_contributors(logger, event, tool_source, tool_version, data_source):
186
187 if event["actor"]:
188
189 event_cntrb = extract_needed_contributor_data(event["actor"], tool_source, tool_version, data_source)
190 event["cntrb_id"] = event_cntrb["cntrb_id"]
191
192 else:
193 event["cntrb_id"] = None
194 return event, None
195
196 return event, event_cntrb
197
[end of augur/tasks/github/events/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/augur/tasks/github/events/tasks.py b/augur/tasks/github/events/tasks.py
--- a/augur/tasks/github/events/tasks.py
+++ b/augur/tasks/github/events/tasks.py
@@ -1,6 +1,7 @@
import time
import logging
import traceback
+import sqlalchemy as s
from augur.tasks.init.celery_app import celery_app as celery
from augur.tasks.init.celery_app import AugurCoreRepoCollectionTask
@@ -180,6 +181,7 @@
issue_event_natural_keys = ["issue_id", "issue_event_src_id"]
augur_db.insert_data(issue_event_dicts, IssueEvent, issue_event_natural_keys)
+ update_issue_closed_cntrbs_from_events(augur_db.engine, repo_id)
# TODO: Should we skip an event if there is no contributor to resolve it o
def process_github_event_contributors(logger, event, tool_source, tool_version, data_source):
@@ -194,3 +196,30 @@
return event, None
return event, event_cntrb
+
+
+def update_issue_closed_cntrbs_from_events(engine, repo_id):
+
+ get_ranked_issues = s.text(f"""
+ WITH RankedIssues AS (
+ SELECT repo_id, issue_id, cntrb_id,
+ ROW_NUMBER() OVER(PARTITION BY issue_id ORDER BY created_at DESC) AS rn
+ FROM issue_events
+ WHERE "action" = 'closed'
+ )
+
+ SELECT issue_id, cntrb_id from RankedIssues where rn=1 and repo_id={repo_id}
+ """)
+ result = engine.execute(get_ranked_issues).fetchall()
+
+ update_data = [{'issue_id': row['issue_id'], 'cntrb_id': row['cntrb_id'], 'repo_id': repo_id} for row in result]
+ with engine.connect() as connection:
+ update_stmt = s.text("""
+ UPDATE issues
+ SET cntrb_id = :cntrb_id
+ WHERE issue_id = :issue_id
+ AND repo_id = :repo_id
+ """)
+ connection.execute(update_stmt, update_data)
+
+
| {"golden_diff": "diff --git a/augur/tasks/github/events/tasks.py b/augur/tasks/github/events/tasks.py\n--- a/augur/tasks/github/events/tasks.py\n+++ b/augur/tasks/github/events/tasks.py\n@@ -1,6 +1,7 @@\n import time\n import logging\n import traceback\n+import sqlalchemy as s\n \n from augur.tasks.init.celery_app import celery_app as celery\n from augur.tasks.init.celery_app import AugurCoreRepoCollectionTask\n@@ -180,6 +181,7 @@\n issue_event_natural_keys = [\"issue_id\", \"issue_event_src_id\"]\n augur_db.insert_data(issue_event_dicts, IssueEvent, issue_event_natural_keys)\n \n+ update_issue_closed_cntrbs_from_events(augur_db.engine, repo_id)\n \n # TODO: Should we skip an event if there is no contributor to resolve it o\n def process_github_event_contributors(logger, event, tool_source, tool_version, data_source):\n@@ -194,3 +196,30 @@\n return event, None\n \n return event, event_cntrb\n+\n+\n+def update_issue_closed_cntrbs_from_events(engine, repo_id):\n+\n+ get_ranked_issues = s.text(f\"\"\"\n+ WITH RankedIssues AS (\n+ SELECT repo_id, issue_id, cntrb_id, \n+ ROW_NUMBER() OVER(PARTITION BY issue_id ORDER BY created_at DESC) AS rn\n+ FROM issue_events \n+ WHERE \"action\" = 'closed'\n+ )\n+ \n+ SELECT issue_id, cntrb_id from RankedIssues where rn=1 and repo_id={repo_id}\n+ \"\"\")\n+ result = engine.execute(get_ranked_issues).fetchall()\n+\n+ update_data = [{'issue_id': row['issue_id'], 'cntrb_id': row['cntrb_id'], 'repo_id': repo_id} for row in result]\n+ with engine.connect() as connection:\n+ update_stmt = s.text(\"\"\"\n+ UPDATE issues\n+ SET cntrb_id = :cntrb_id\n+ WHERE issue_id = :issue_id\n+ AND repo_id = :repo_id\n+ \"\"\")\n+ connection.execute(update_stmt, update_data)\n+\n+\n", "issue": "Issue data cntrb_id null for closed issues \nCntrb_id is stated to be \"The ID of the person that closed the issue\" and even for closed issues I am seeing nulls \n", "before_files": [{"content": "import time\nimport logging\nimport traceback\n\nfrom augur.tasks.init.celery_app import celery_app as celery\nfrom augur.tasks.init.celery_app import AugurCoreRepoCollectionTask\nfrom augur.application.db.data_parse import *\nfrom augur.tasks.github.util.github_paginator import GithubPaginator, hit_api\nfrom augur.tasks.github.util.github_task_session import GithubTaskManifest\nfrom augur.application.db.session import DatabaseSession\nfrom augur.tasks.github.util.util import get_owner_repo\nfrom augur.tasks.util.worker_util import remove_duplicate_dicts\nfrom augur.application.db.models import PullRequest, Message, PullRequestReview, PullRequestLabel, PullRequestReviewer, PullRequestEvent, PullRequestMeta, PullRequestAssignee, PullRequestReviewMessageRef, Issue, IssueEvent, IssueLabel, IssueAssignee, PullRequestMessageRef, IssueMessageRef, Contributor, Repo\nfrom augur.application.db.util import execute_session_query\n\nplatform_id = 1\n\[email protected](base=AugurCoreRepoCollectionTask)\ndef collect_events(repo_git: str):\n\n logger = logging.getLogger(collect_events.__name__)\n \n with GithubTaskManifest(logger) as manifest:\n\n augur_db = manifest.augur_db\n\n try:\n \n query = augur_db.session.query(Repo).filter(Repo.repo_git == repo_git)\n repo_obj = execute_session_query(query, 'one')\n repo_id = repo_obj.repo_id\n\n owner, repo = get_owner_repo(repo_git)\n\n logger.info(f\"Collecting Github events for {owner}/{repo}\")\n\n url = f\"https://api.github.com/repos/{owner}/{repo}/issues/events\"\n\n event_data = retrieve_all_event_data(repo_git, logger, manifest.key_auth)\n\n if event_data:\n \n process_events(event_data, f\"{owner}/{repo}: Event task\", repo_id, logger, manifest.augur_db)\n\n else:\n logger.info(f\"{owner}/{repo} has no events\")\n except Exception as e:\n logger.error(f\"Could not collect events for {repo_git}\\n Reason: {e} \\n Traceback: {''.join(traceback.format_exception(None, e, e.__traceback__))}\")\n\n\ndef retrieve_all_event_data(repo_git: str, logger, key_auth):\n\n owner, repo = get_owner_repo(repo_git)\n\n logger.info(f\"Collecting Github events for {owner}/{repo}\")\n\n url = f\"https://api.github.com/repos/{owner}/{repo}/issues/events\"\n \n # returns an iterable of all issues at this url (this essentially means you can treat the issues variable as a list of the issues)\n events = GithubPaginator(url, key_auth, logger)\n\n\n num_pages = events.get_num_pages()\n all_data = []\n for page_data, page in events.iter_pages():\n\n if page_data is None:\n return all_data\n \n elif len(page_data) == 0:\n logger.debug(f\"{repo.capitalize()} Events Page {page} contains no data...returning\")\n logger.info(f\"Events Page {page} of {num_pages}\")\n return all_data\n\n logger.info(f\"{repo} Events Page {page} of {num_pages}\")\n\n all_data += page_data\n\n return all_data \n\ndef process_events(events, task_name, repo_id, logger, augur_db):\n \n tool_source = \"Github events task\"\n tool_version = \"2.0\"\n data_source = \"Github API\"\n \n pr_event_dicts = []\n issue_event_dicts = []\n contributors = []\n\n\n # create mapping from issue url to issue id of current issues\n issue_url_to_id_map = {}\n issues = augur_db.session.query(Issue).filter(Issue.repo_id == repo_id).all()\n for issue in issues:\n issue_url_to_id_map[issue.issue_url] = issue.issue_id\n\n # create mapping from pr url to pr id of current pull requests\n pr_url_to_id_map = {}\n prs = augur_db.session.query(PullRequest).filter(PullRequest.repo_id == repo_id).all()\n for pr in prs:\n pr_url_to_id_map[pr.pr_url] = pr.pull_request_id\n\n not_mapable_event_count = 0\n event_len = len(events)\n for event in events:\n\n event, contributor = process_github_event_contributors(logger, event, tool_source, tool_version, data_source)\n\n # event_mapping_data is the pr or issue data needed to relate the event to an issue or pr\n event_mapping_data = event[\"issue\"]\n\n if event_mapping_data is None:\n not_mapable_event_count += 1\n continue\n\n pull_request = event_mapping_data.get('pull_request', None)\n if pull_request:\n pr_url = pull_request[\"url\"]\n\n try:\n pull_request_id = pr_url_to_id_map[pr_url]\n\n # query = augur_db.session.query(PullRequest).filter(PullRequest.pr_url == pr_url)\n # related_pr = execute_session_query(query, 'one')\n except KeyError:\n logger.info(f\"{task_name}: Could not find related pr\")\n logger.info(f\"{task_name}: We were searching for: {pr_url}\")\n logger.info(f\"{task_name}: Skipping\")\n continue\n\n pr_event_dicts.append(\n extract_pr_event_data(event, pull_request_id, platform_id, repo_id,\n tool_source, tool_version, data_source)\n )\n\n else:\n issue_url = event_mapping_data[\"url\"]\n\n try:\n issue_id = issue_url_to_id_map[issue_url]\n # query = augur_db.session.query(Issue).filter(Issue.issue_url == issue_url)\n # related_issue = execute_session_query(query, 'one')\n except KeyError:\n logger.info(f\"{task_name}: Could not find related pr\")\n logger.info(f\"{task_name}: We were searching for: {issue_url}\")\n logger.info(f\"{task_name}: Skipping\")\n continue\n\n issue_event_dicts.append(\n extract_issue_event_data(event, issue_id, platform_id, repo_id,\n tool_source, tool_version, data_source)\n )\n \n # add contributor to list after porcessing the event, \n # so if it fails processing for some reason the contributor is not inserted\n # NOTE: contributor is none when there is no contributor data on the event\n if contributor:\n contributors.append(contributor)\n\n # remove contributors that were found in the data more than once\n contributors = remove_duplicate_dicts(contributors)\n\n augur_db.insert_data(contributors, Contributor, [\"cntrb_id\"])\n\n issue_events_len = len(issue_event_dicts)\n pr_events_len = len(pr_event_dicts)\n if event_len != (issue_events_len + pr_events_len):\n\n unassigned_events = event_len - issue_events_len - pr_events_len\n\n logger.error(f\"{task_name}: {event_len} events were processed, but {pr_events_len} pr events were found and related to a pr, and {issue_events_len} issue events were found and related to an issue. {not_mapable_event_count} events were not related to a pr or issue due to the api returning insufficient data. For some reason {unassigned_events} events were not able to be processed even when the api returned sufficient data. This is usually because pull requests or issues have not been collected, and the events are skipped because they cannot be related to a pr or issue\")\n\n logger.info(f\"{task_name}: Inserting {len(pr_event_dicts)} pr events and {len(issue_event_dicts)} issue events\")\n\n # TODO: Could replace this with \"id\" but it isn't stored on the table for some reason\n pr_event_natural_keys = [\"node_id\"]\n augur_db.insert_data(pr_event_dicts, PullRequestEvent, pr_event_natural_keys)\n\n issue_event_natural_keys = [\"issue_id\", \"issue_event_src_id\"]\n augur_db.insert_data(issue_event_dicts, IssueEvent, issue_event_natural_keys)\n\n\n# TODO: Should we skip an event if there is no contributor to resolve it o\ndef process_github_event_contributors(logger, event, tool_source, tool_version, data_source):\n\n if event[\"actor\"]:\n\n event_cntrb = extract_needed_contributor_data(event[\"actor\"], tool_source, tool_version, data_source)\n event[\"cntrb_id\"] = event_cntrb[\"cntrb_id\"]\n\n else:\n event[\"cntrb_id\"] = None\n return event, None\n \n return event, event_cntrb\n", "path": "augur/tasks/github/events/tasks.py"}]} | 2,904 | 487 |
gh_patches_debug_15882 | rasdani/github-patches | git_diff | beeware__toga-850 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Table rows not rendering correctly in `update_data` winforms
## Expected Behavior
View text in the rows
## Current Behavior
Rows are created but text is not displayed.

When I insert a new row (with insert button) is working ok:

In found that a change was made in `update_data` ( https://github.com/beeware/toga/commit/cb326e79ea1884f9e71fadfb1d7daf0688e78753) and `update_data` use a different ListViewItem creation than `insert` method which is working ok.
The specific line is the change made from this one:
```
item._impl = WinForms.ListViewItem([
str(getattr(item, attr)) for attr in self.interface._accessors
])
```
to this:
```
item._impl = WinForms.ListViewItem(self.row_data(item))
```
## Steps to reproduce
1. Open example and that's it
## Your Environment
* Python Version (list the specific version number)
3.7.7
* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)
- [ ] macOS - version:
- [ ] Linux - distro: - version:
- [x] Windows - version: 10 Pro
- [ ] Other - name: - version:
* Toga Version (list the specific version number or git hash)
Master branch of toga.
* Toga Target (the type of app you are trying to generate)
- [ ] android
- [ ] cocoa
- [ ] django
- [ ] gtk
- [ ] iOS
- [ ] tvOS
- [ ] watchOS
- [x] winforms
- [ ] win32
- [ ] Other (please specify)
</issue>
<code>
[start of src/winforms/toga_winforms/widgets/table.py]
1 from travertino.size import at_least
2
3 from toga_winforms.libs import WinForms
4
5 from .base import Widget
6
7
8 class Table(Widget):
9 def create(self):
10 self._container = self
11 self.native = WinForms.ListView()
12 self.native.View = WinForms.View.Details
13
14 dataColumn = []
15 for heading in self.interface.headings:
16 col = WinForms.ColumnHeader()
17 col.Text = heading
18 dataColumn.append(col)
19
20 self.native.FullRowSelect = True
21 self.native.Multiselect = self.interface.multiple_select
22 self.native.DoubleBuffered = True
23 self.native.Columns.AddRange(dataColumn)
24
25 def change_source(self, source):
26 self.update_data()
27
28 def row_data(self, item):
29 # TODO: Winforms can't support icons in tree cells; so, if the data source
30 # specifies an icon, strip it when converting to row data.
31 def strip_icon(item, attr):
32 val = getattr(item, attr)
33 if isinstance(val, tuple):
34 return str(val[1])
35 return str(val)
36
37 return [item] + [
38 strip_icon(item, attr)
39 for attr in self.interface._accessors
40 ]
41
42 def update_data(self):
43 self.native.BeginUpdate()
44 self.native.Items.Clear()
45 items = []
46 for item in self.interface.data:
47 item._impl = WinForms.ListViewItem(self.row_data(item))
48 items.append(item._impl)
49 self.native.Items.AddRange(items)
50 self.native.EndUpdate()
51
52 def insert(self, index, item):
53 self.native.BeginUpdate()
54 item._impl = WinForms.ListViewItem([
55 str(getattr(item, attr)) for attr in self.interface._accessors
56 ])
57 self.native.Items.Insert(index, item._impl)
58 self.native.EndUpdate()
59
60 def change(self, item):
61 self.interface.factory.not_implemented('Table.change()')
62
63 def remove(self, item):
64 self.update_data()
65
66 def clear(self):
67 self.native.Items.Clear()
68
69 def set_on_select(self, handler):
70 self.interface.factory.not_implemented('Table.set_on_select()')
71
72 def scroll_to_row(self, row):
73 self.native.EnsureVisible(row)
74 self.interface.factory.not_implemented('Table.scroll_to_row()')
75
76 def rehint(self):
77 self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
78 self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)
79
[end of src/winforms/toga_winforms/widgets/table.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/winforms/toga_winforms/widgets/table.py b/src/winforms/toga_winforms/widgets/table.py
--- a/src/winforms/toga_winforms/widgets/table.py
+++ b/src/winforms/toga_winforms/widgets/table.py
@@ -34,7 +34,7 @@
return str(val[1])
return str(val)
- return [item] + [
+ return [
strip_icon(item, attr)
for attr in self.interface._accessors
]
@@ -51,9 +51,7 @@
def insert(self, index, item):
self.native.BeginUpdate()
- item._impl = WinForms.ListViewItem([
- str(getattr(item, attr)) for attr in self.interface._accessors
- ])
+ item._impl = WinForms.ListViewItem(self.row_data(item))
self.native.Items.Insert(index, item._impl)
self.native.EndUpdate()
| {"golden_diff": "diff --git a/src/winforms/toga_winforms/widgets/table.py b/src/winforms/toga_winforms/widgets/table.py\n--- a/src/winforms/toga_winforms/widgets/table.py\n+++ b/src/winforms/toga_winforms/widgets/table.py\n@@ -34,7 +34,7 @@\n return str(val[1])\n return str(val)\n \n- return [item] + [\n+ return [\n strip_icon(item, attr)\n for attr in self.interface._accessors\n ]\n@@ -51,9 +51,7 @@\n \n def insert(self, index, item):\n self.native.BeginUpdate()\n- item._impl = WinForms.ListViewItem([\n- str(getattr(item, attr)) for attr in self.interface._accessors\n- ])\n+ item._impl = WinForms.ListViewItem(self.row_data(item))\n self.native.Items.Insert(index, item._impl)\n self.native.EndUpdate()\n", "issue": "Table rows not rendering correctly in `update_data` winforms\n## Expected Behavior\r\nView text in the rows\r\n\r\n## Current Behavior\r\nRows are created but text is not displayed.\r\n\r\nWhen I insert a new row (with insert button) is working ok:\r\n\r\n\r\nIn found that a change was made in `update_data` ( https://github.com/beeware/toga/commit/cb326e79ea1884f9e71fadfb1d7daf0688e78753) and `update_data` use a different ListViewItem creation than `insert` method which is working ok.\r\n\r\nThe specific line is the change made from this one:\r\n```\r\nitem._impl = WinForms.ListViewItem([\r\n str(getattr(item, attr)) for attr in self.interface._accessors\r\n ])\r\n```\r\nto this:\r\n```\r\nitem._impl = WinForms.ListViewItem(self.row_data(item))\r\n```\r\n\r\n## Steps to reproduce\r\n1. Open example and that's it\r\n\r\n## Your Environment\r\n\r\n* Python Version (list the specific version number)\r\n3.7.7\r\n\r\n* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)\r\n\r\n - [ ] macOS - version:\r\n - [ ] Linux - distro: - version:\r\n - [x] Windows - version: 10 Pro\r\n - [ ] Other - name: - version:\r\n\r\n* Toga Version (list the specific version number or git hash)\r\nMaster branch of toga.\r\n\r\n* Toga Target (the type of app you are trying to generate)\r\n\r\n - [ ] android\r\n - [ ] cocoa\r\n - [ ] django\r\n - [ ] gtk\r\n - [ ] iOS\r\n - [ ] tvOS\r\n - [ ] watchOS\r\n - [x] winforms\r\n - [ ] win32\r\n - [ ] Other (please specify)\r\n\n", "before_files": [{"content": "from travertino.size import at_least\n\nfrom toga_winforms.libs import WinForms\n\nfrom .base import Widget\n\n\nclass Table(Widget):\n def create(self):\n self._container = self\n self.native = WinForms.ListView()\n self.native.View = WinForms.View.Details\n\n dataColumn = []\n for heading in self.interface.headings:\n col = WinForms.ColumnHeader()\n col.Text = heading\n dataColumn.append(col)\n\n self.native.FullRowSelect = True\n self.native.Multiselect = self.interface.multiple_select\n self.native.DoubleBuffered = True\n self.native.Columns.AddRange(dataColumn)\n\n def change_source(self, source):\n self.update_data()\n\n def row_data(self, item):\n # TODO: Winforms can't support icons in tree cells; so, if the data source\n # specifies an icon, strip it when converting to row data.\n def strip_icon(item, attr):\n val = getattr(item, attr)\n if isinstance(val, tuple):\n return str(val[1])\n return str(val)\n\n return [item] + [\n strip_icon(item, attr)\n for attr in self.interface._accessors\n ]\n\n def update_data(self):\n self.native.BeginUpdate()\n self.native.Items.Clear()\n items = []\n for item in self.interface.data:\n item._impl = WinForms.ListViewItem(self.row_data(item))\n items.append(item._impl)\n self.native.Items.AddRange(items)\n self.native.EndUpdate()\n\n def insert(self, index, item):\n self.native.BeginUpdate()\n item._impl = WinForms.ListViewItem([\n str(getattr(item, attr)) for attr in self.interface._accessors\n ])\n self.native.Items.Insert(index, item._impl)\n self.native.EndUpdate()\n\n def change(self, item):\n self.interface.factory.not_implemented('Table.change()')\n\n def remove(self, item):\n self.update_data()\n\n def clear(self):\n self.native.Items.Clear()\n\n def set_on_select(self, handler):\n self.interface.factory.not_implemented('Table.set_on_select()')\n\n def scroll_to_row(self, row):\n self.native.EnsureVisible(row)\n self.interface.factory.not_implemented('Table.scroll_to_row()')\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n", "path": "src/winforms/toga_winforms/widgets/table.py"}]} | 1,742 | 200 |
gh_patches_debug_9260 | rasdani/github-patches | git_diff | translate__pootle-6524 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Changing source (with xliff) can create submissions with no submitter
# Steps to reproduce:
- change source in file of existing unit
- run update_stores
# Results
- submissions have no submitter
- stuff breaks
## Expected result:
- nothing breaks
this only happens in xliff afaict - as i dont think its possible to change source of units in this way with po
</issue>
<code>
[start of pootle/apps/pootle_store/receivers.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from hashlib import md5
10
11 from django.db.models.signals import post_save, pre_save
12 from django.dispatch import receiver
13 from django.utils.encoding import force_bytes
14
15 from pootle.core.delegate import lifecycle, uniqueid
16 from pootle.core.models import Revision
17 from pootle.core.signals import update_checks, update_data
18
19 from .constants import FUZZY, TRANSLATED, UNTRANSLATED
20 from .models import Suggestion, Unit, UnitChange, UnitSource
21
22
23 @receiver(post_save, sender=Suggestion)
24 def handle_suggestion_added(**kwargs):
25 created = kwargs.get("created")
26 if not created:
27 return
28 store = kwargs["instance"].unit.store
29 update_data.send(store.__class__, instance=store)
30
31
32 @receiver(post_save, sender=Suggestion)
33 def handle_suggestion_accepted(**kwargs):
34 created = kwargs.get("created")
35 suggestion = kwargs["instance"]
36 if created or not suggestion.is_accepted:
37 return
38 update_data.send(
39 suggestion.unit.store.__class__,
40 instance=suggestion.unit.store)
41
42
43 @receiver(pre_save, sender=UnitSource)
44 def handle_unit_source_pre_save(**kwargs):
45 unit_source = kwargs["instance"]
46 created = not unit_source.pk
47 unit = unit_source.unit
48 if created:
49 unit_source.creation_revision = unit.revision
50 if created or unit.source_updated:
51 unit_source.source_hash = md5(force_bytes(unit.source_f)).hexdigest()
52 unit_source.source_length = len(unit.source_f)
53 unit_source.source_wordcount = max(
54 1, (unit.counter.count_words(unit.source_f.strings) or 0))
55
56
57 @receiver(pre_save, sender=Unit)
58 def handle_unit_pre_save(**kwargs):
59 unit = kwargs["instance"]
60 auto_translated = False
61
62 if unit.source_updated:
63 # update source related fields
64 wc = unit.counter.count_words(unit.source_f.strings)
65 if not wc and not bool(filter(None, unit.target_f.strings)):
66 # auto-translate untranslated strings
67 unit.target = unit.source
68 unit.state = FUZZY
69 auto_translated = True
70 if unit.target_updated:
71 # update target related fields
72 unit.target_wordcount = unit.counter.count_words(
73 unit.target_f.strings)
74 unit.target_length = len(unit.target_f)
75 if filter(None, unit.target_f.strings):
76 if unit.state == UNTRANSLATED:
77 unit.state = TRANSLATED
78 else:
79 # if it was TRANSLATED then set to UNTRANSLATED
80 if unit.state > FUZZY:
81 unit.state = UNTRANSLATED
82
83 # Updating unit from the .po file set its revision property to
84 # a new value (the same for all units during its store updated)
85 # since that change doesn't require further sync but note that
86 # auto_translated units require further sync
87 update_revision = (
88 unit.revision is None
89 or (not unit.revision_updated
90 and (unit.updated and not auto_translated)))
91 if update_revision:
92 unit.revision = Revision.incr()
93
94 if unit.index is None:
95 unit.index = unit.store.max_index() + 1
96 unitid = uniqueid.get(unit.__class__)(unit)
97 if unitid.changed:
98 unit.setid(unitid.getid())
99
100
101 @receiver(pre_save, sender=UnitChange)
102 def handle_unit_pre_change(**kwargs):
103 unit_change = kwargs["instance"]
104 unit = unit_change.unit
105 if unit.state == UNTRANSLATED:
106 # clear reviewer and translator data if translation
107 # has been deleted
108 unit_change.submitted_by = None
109 unit_change.submitted_on = None
110
111
112 @receiver(post_save, sender=UnitChange)
113 def handle_unit_change(**kwargs):
114 unit_change = kwargs["instance"]
115 unit = unit_change.unit
116 created = not unit._frozen.pk
117
118 if not created:
119 lifecycle.get(Unit)(unit).change()
120 if not unit.source_updated and not unit.target_updated:
121 return
122 new_untranslated = (created and unit.state == UNTRANSLATED)
123 if not new_untranslated:
124 update_checks.send(unit.__class__, instance=unit)
125 if unit.istranslated():
126 unit.update_tmserver()
127
[end of pootle/apps/pootle_store/receivers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_store/receivers.py b/pootle/apps/pootle_store/receivers.py
--- a/pootle/apps/pootle_store/receivers.py
+++ b/pootle/apps/pootle_store/receivers.py
@@ -98,17 +98,6 @@
unit.setid(unitid.getid())
-@receiver(pre_save, sender=UnitChange)
-def handle_unit_pre_change(**kwargs):
- unit_change = kwargs["instance"]
- unit = unit_change.unit
- if unit.state == UNTRANSLATED:
- # clear reviewer and translator data if translation
- # has been deleted
- unit_change.submitted_by = None
- unit_change.submitted_on = None
-
-
@receiver(post_save, sender=UnitChange)
def handle_unit_change(**kwargs):
unit_change = kwargs["instance"]
| {"golden_diff": "diff --git a/pootle/apps/pootle_store/receivers.py b/pootle/apps/pootle_store/receivers.py\n--- a/pootle/apps/pootle_store/receivers.py\n+++ b/pootle/apps/pootle_store/receivers.py\n@@ -98,17 +98,6 @@\n unit.setid(unitid.getid())\n \n \n-@receiver(pre_save, sender=UnitChange)\n-def handle_unit_pre_change(**kwargs):\n- unit_change = kwargs[\"instance\"]\n- unit = unit_change.unit\n- if unit.state == UNTRANSLATED:\n- # clear reviewer and translator data if translation\n- # has been deleted\n- unit_change.submitted_by = None\n- unit_change.submitted_on = None\n-\n-\n @receiver(post_save, sender=UnitChange)\n def handle_unit_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n", "issue": "Changing source (with xliff) can create submissions with no submitter\n# Steps to reproduce:\r\n\r\n- change source in file of existing unit\r\n- run update_stores\r\n\r\n# Results\r\n\r\n- submissions have no submitter\r\n- stuff breaks\r\n\r\n## Expected result:\r\n\r\n- nothing breaks\r\n\r\n\r\nthis only happens in xliff afaict - as i dont think its possible to change source of units in this way with po\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom hashlib import md5\n\nfrom django.db.models.signals import post_save, pre_save\nfrom django.dispatch import receiver\nfrom django.utils.encoding import force_bytes\n\nfrom pootle.core.delegate import lifecycle, uniqueid\nfrom pootle.core.models import Revision\nfrom pootle.core.signals import update_checks, update_data\n\nfrom .constants import FUZZY, TRANSLATED, UNTRANSLATED\nfrom .models import Suggestion, Unit, UnitChange, UnitSource\n\n\n@receiver(post_save, sender=Suggestion)\ndef handle_suggestion_added(**kwargs):\n created = kwargs.get(\"created\")\n if not created:\n return\n store = kwargs[\"instance\"].unit.store\n update_data.send(store.__class__, instance=store)\n\n\n@receiver(post_save, sender=Suggestion)\ndef handle_suggestion_accepted(**kwargs):\n created = kwargs.get(\"created\")\n suggestion = kwargs[\"instance\"]\n if created or not suggestion.is_accepted:\n return\n update_data.send(\n suggestion.unit.store.__class__,\n instance=suggestion.unit.store)\n\n\n@receiver(pre_save, sender=UnitSource)\ndef handle_unit_source_pre_save(**kwargs):\n unit_source = kwargs[\"instance\"]\n created = not unit_source.pk\n unit = unit_source.unit\n if created:\n unit_source.creation_revision = unit.revision\n if created or unit.source_updated:\n unit_source.source_hash = md5(force_bytes(unit.source_f)).hexdigest()\n unit_source.source_length = len(unit.source_f)\n unit_source.source_wordcount = max(\n 1, (unit.counter.count_words(unit.source_f.strings) or 0))\n\n\n@receiver(pre_save, sender=Unit)\ndef handle_unit_pre_save(**kwargs):\n unit = kwargs[\"instance\"]\n auto_translated = False\n\n if unit.source_updated:\n # update source related fields\n wc = unit.counter.count_words(unit.source_f.strings)\n if not wc and not bool(filter(None, unit.target_f.strings)):\n # auto-translate untranslated strings\n unit.target = unit.source\n unit.state = FUZZY\n auto_translated = True\n if unit.target_updated:\n # update target related fields\n unit.target_wordcount = unit.counter.count_words(\n unit.target_f.strings)\n unit.target_length = len(unit.target_f)\n if filter(None, unit.target_f.strings):\n if unit.state == UNTRANSLATED:\n unit.state = TRANSLATED\n else:\n # if it was TRANSLATED then set to UNTRANSLATED\n if unit.state > FUZZY:\n unit.state = UNTRANSLATED\n\n # Updating unit from the .po file set its revision property to\n # a new value (the same for all units during its store updated)\n # since that change doesn't require further sync but note that\n # auto_translated units require further sync\n update_revision = (\n unit.revision is None\n or (not unit.revision_updated\n and (unit.updated and not auto_translated)))\n if update_revision:\n unit.revision = Revision.incr()\n\n if unit.index is None:\n unit.index = unit.store.max_index() + 1\n unitid = uniqueid.get(unit.__class__)(unit)\n if unitid.changed:\n unit.setid(unitid.getid())\n\n\n@receiver(pre_save, sender=UnitChange)\ndef handle_unit_pre_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n unit = unit_change.unit\n if unit.state == UNTRANSLATED:\n # clear reviewer and translator data if translation\n # has been deleted\n unit_change.submitted_by = None\n unit_change.submitted_on = None\n\n\n@receiver(post_save, sender=UnitChange)\ndef handle_unit_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n unit = unit_change.unit\n created = not unit._frozen.pk\n\n if not created:\n lifecycle.get(Unit)(unit).change()\n if not unit.source_updated and not unit.target_updated:\n return\n new_untranslated = (created and unit.state == UNTRANSLATED)\n if not new_untranslated:\n update_checks.send(unit.__class__, instance=unit)\n if unit.istranslated():\n unit.update_tmserver()\n", "path": "pootle/apps/pootle_store/receivers.py"}]} | 1,882 | 195 |
gh_patches_debug_6221 | rasdani/github-patches | git_diff | scikit-hep__pyhf-837 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bump jsonschema to v3.2.0+ to support draft 6
Currently on alpha release 3.0.x but can bump to 3.2.0 which was released.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 from pathlib import Path
3
4 this_directory = Path(__file__).parent.resolve()
5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
6 long_description = readme_rst.read()
7
8 extras_require = {
9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
10 'torch': ['torch~=1.2'],
11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
12 'xmlio': ['uproot'],
13 'minuit': ['iminuit'],
14 }
15 extras_require['backends'] = sorted(
16 set(
17 extras_require['tensorflow']
18 + extras_require['torch']
19 + extras_require['jax']
20 + extras_require['minuit']
21 )
22 )
23 extras_require['contrib'] = sorted(set(['matplotlib']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + [
31 'pyflakes',
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=1.0',
41 'nteract-scrapbook~=0.2',
42 'check-manifest',
43 'jupyter',
44 'uproot~=3.3',
45 'graphviz',
46 'jsonpatch',
47 'black',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 [
54 'sphinx',
55 'sphinxcontrib-bibtex',
56 'sphinx-click',
57 'sphinx_rtd_theme',
58 'nbsphinx',
59 'ipywidgets',
60 'sphinx-issues',
61 'sphinx-copybutton>0.2.9',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['test']
69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
70 )
71 )
72 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
73
74
75 setup(
76 name='pyhf',
77 version='0.4.1',
78 description='(partial) pure python histfactory implementation',
79 long_description=long_description,
80 long_description_content_type='text/x-rst',
81 url='https://github.com/scikit-hep/pyhf',
82 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
83 author_email='[email protected], [email protected], [email protected]',
84 license='Apache',
85 keywords='physics fitting numpy scipy tensorflow pytorch',
86 classifiers=[
87 "Programming Language :: Python :: 3",
88 "Programming Language :: Python :: 3.6",
89 "Programming Language :: Python :: 3.7",
90 ],
91 package_dir={'': 'src'},
92 packages=find_packages(where='src'),
93 include_package_data=True,
94 python_requires=">=3.6",
95 install_requires=[
96 'scipy', # requires numpy, which is required by pyhf and tensorflow
97 'click>=6.0', # for console scripts,
98 'tqdm', # for readxml
99 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
100 'jsonpatch',
101 'pyyaml', # for parsing CLI equal-delimited options
102 ],
103 extras_require=extras_require,
104 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
105 dependency_links=[],
106 use_scm_version=lambda: {'local_scheme': lambda version: ''},
107 )
108
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -96,7 +96,7 @@
'scipy', # requires numpy, which is required by pyhf and tensorflow
'click>=6.0', # for console scripts,
'tqdm', # for readxml
- 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
+ 'jsonschema>=3.2.0', # for utils
'jsonpatch',
'pyyaml', # for parsing CLI equal-delimited options
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -96,7 +96,7 @@\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n- 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n+ 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n", "issue": "bump jsonschema to v3.2.0+ to support draft 6\nCurrently on alpha release 3.0.x but can bump to 3.2.0 which was released.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,641 | 143 |
gh_patches_debug_925 | rasdani/github-patches | git_diff | dynamiqs__dynamiqs-196 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
implement a ver() method
As a user if I want to make sure my setup is up to date with the latest version, I want to be able to call dq.ver() to know which version I am running
</issue>
<code>
[start of dynamiqs/__init__.py]
1 from .mesolve import mesolve
2 from .sesolve import sesolve
3 from .smesolve import smesolve
4 from .utils import *
5
[end of dynamiqs/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dynamiqs/__init__.py b/dynamiqs/__init__.py
--- a/dynamiqs/__init__.py
+++ b/dynamiqs/__init__.py
@@ -1,4 +1,9 @@
+from importlib.metadata import version
+
from .mesolve import mesolve
from .sesolve import sesolve
from .smesolve import smesolve
from .utils import *
+
+# get version from pyproject.toml
+__version__ = version(__package__)
| {"golden_diff": "diff --git a/dynamiqs/__init__.py b/dynamiqs/__init__.py\n--- a/dynamiqs/__init__.py\n+++ b/dynamiqs/__init__.py\n@@ -1,4 +1,9 @@\n+from importlib.metadata import version\n+\n from .mesolve import mesolve\n from .sesolve import sesolve\n from .smesolve import smesolve\n from .utils import *\n+\n+# get version from pyproject.toml\n+__version__ = version(__package__)\n", "issue": "implement a ver() method\nAs a user if I want to make sure my setup is up to date with the latest version, I want to be able to call dq.ver() to know which version I am running\n", "before_files": [{"content": "from .mesolve import mesolve\nfrom .sesolve import sesolve\nfrom .smesolve import smesolve\nfrom .utils import *\n", "path": "dynamiqs/__init__.py"}]} | 619 | 113 |
gh_patches_debug_132 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-3433 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ci - failure around mismatched versions of tabulate
Per current ci failures we're getting 0.8.3 of tabulate installed even though azure-cli-core calls out a pin to under 0.8.2.
This mirrors the issue we had with fakeredis, where it properly declared a dependency for six == 0.12.0 and we picked up the version pin in requirements.txt.
digging around a bit more, pip released a new 19 release series in the last 72hrs, that i'm currently examining for regressions that allowed for installs that ignore package dependencies, when given requirements.
</issue>
<code>
[start of setup.py]
1 import os
2 from io import open
3 from setuptools import setup, find_packages
4
5
6 def read(fname):
7 return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
8
9
10 setup(
11 name="c7n",
12 version='0.8.33.1',
13 description="Cloud Custodian - Policy Rules Engine",
14 long_description=read('README.rst'),
15 classifiers=[
16 "Topic :: System :: Systems Administration",
17 "Topic :: System :: Distributed Computing"
18 ],
19 url="https://github.com/capitalone/cloud-custodian",
20 license="Apache-2.0",
21 packages=find_packages(),
22 entry_points={
23 'console_scripts': [
24 'custodian = c7n.cli:main']},
25 install_requires=[
26 "boto3>=1.9.62",
27 "botocore>=1.12.62",
28 "python-dateutil>=2.6,<3.0.0",
29 "pyyaml",
30 "jsonschema",
31 "jsonpatch>=1.21",
32 "argcomplete",
33 "tabulate"
34 ],
35 )
36
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,6 +30,6 @@
"jsonschema",
"jsonpatch>=1.21",
"argcomplete",
- "tabulate"
+ "tabulate==0.8.2"
],
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,6 +30,6 @@\n \"jsonschema\",\n \"jsonpatch>=1.21\",\n \"argcomplete\",\n- \"tabulate\"\n+ \"tabulate==0.8.2\"\n ],\n )\n", "issue": "ci - failure around mismatched versions of tabulate\nPer current ci failures we're getting 0.8.3 of tabulate installed even though azure-cli-core calls out a pin to under 0.8.2.\r\n\r\nThis mirrors the issue we had with fakeredis, where it properly declared a dependency for six == 0.12.0 and we picked up the version pin in requirements.txt.\r\n\r\ndigging around a bit more, pip released a new 19 release series in the last 72hrs, that i'm currently examining for regressions that allowed for installs that ignore package dependencies, when given requirements.\r\n\n", "before_files": [{"content": "import os\nfrom io import open\nfrom setuptools import setup, find_packages\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()\n\n\nsetup(\n name=\"c7n\",\n version='0.8.33.1',\n description=\"Cloud Custodian - Policy Rules Engine\",\n long_description=read('README.rst'),\n classifiers=[\n \"Topic :: System :: Systems Administration\",\n \"Topic :: System :: Distributed Computing\"\n ],\n url=\"https://github.com/capitalone/cloud-custodian\",\n license=\"Apache-2.0\",\n packages=find_packages(),\n entry_points={\n 'console_scripts': [\n 'custodian = c7n.cli:main']},\n install_requires=[\n \"boto3>=1.9.62\",\n \"botocore>=1.12.62\",\n \"python-dateutil>=2.6,<3.0.0\",\n \"pyyaml\",\n \"jsonschema\",\n \"jsonpatch>=1.21\",\n \"argcomplete\",\n \"tabulate\"\n ],\n)\n", "path": "setup.py"}]} | 964 | 73 |
gh_patches_debug_26997 | rasdani/github-patches | git_diff | mdn__kuma-6098 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
search_phase_execution_exception in ES on huuuge ?page params
https://sentry.prod.mozaws.net/operations/mdn-prod/issues/6620806/
```
TransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')
(24 additional frame(s) were not displayed)
...
File "rest_framework/views.py", line 492, in dispatch
response = handler(request, *args, **kwargs)
File "rest_framework/generics.py", line 201, in get
return self.list(request, *args, **kwargs)
File "rest_framework/mixins.py", line 42, in list
page = self.paginate_queryset(queryset)
File "rest_framework/generics.py", line 173, in paginate_queryset
return self.paginator.paginate_queryset(queryset, self.request, view=self)
File "rest_framework/pagination.py", line 204, in paginate_queryset
self.page = paginator.page(page_number)
TransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')
```
</issue>
<code>
[start of kuma/search/paginator.py]
1 from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator
2 from django.utils.functional import cached_property
3
4
5 class SearchPaginator(Paginator):
6 """
7 A better paginator for search results
8
9 The normal Paginator does a .count() query and then a slice. Since ES
10 results contain the total number of results, we can take an optimistic
11 slice and then adjust the count.
12 """
13
14 def __init__(self, *args, **kwargs):
15 super(SearchPaginator, self).__init__(*args, **kwargs)
16 self._result_total = None
17
18 def validate_number(self, number):
19 """
20 Validates the given 1-based page number.
21
22 This class overrides the default behavior and ignores the upper bound.
23 """
24 try:
25 number = int(number)
26 except (TypeError, ValueError):
27 raise PageNotAnInteger('That page number is not an integer')
28 if number < 1:
29 raise EmptyPage('That page number is less than 1')
30 return number
31
32 def page(self, number):
33 """
34 Returns a page object.
35
36 This class overrides the default behavior and ignores "orphans" and
37 assigns the count from the ES result to the Paginator.
38 """
39 number = self.validate_number(number)
40 bottom = (number - 1) * self.per_page
41 top = bottom + self.per_page
42
43 # Force the search to evaluate and then attach the count. We want to
44 # avoid an extra useless query even if there are no results, so we
45 # directly fetch the count from hits.
46 result = self.object_list[bottom:top].execute()
47 page = Page(result.hits, number, self)
48 # Set the count to the results after post_filter
49 self._result_total = result.hits.total
50 # Also store the aggregations, if any.
51 page.aggregations = getattr(result, 'aggregations', None)
52
53 # Now that we have the count validate that the page number isn't higher
54 # than the possible number of pages and adjust accordingly.
55 if number > self.num_pages:
56 if number == 1 and self.allow_empty_first_page:
57 pass
58 else:
59 raise EmptyPage('That page contains no results')
60 return page
61
62 @cached_property
63 def count(self):
64 """
65 Returns the total number of results.
66
67 Paginator's count property will call .count() on the search object,
68 which returns results before the pre_filter. This will result in a
69 count that is too high. Instead, use 'total' from the results,
70 executing if needed.
71 """
72 if self._result_total is not None:
73 return self._result_total
74 return self.object_list.execute().hits.total
75
[end of kuma/search/paginator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/search/paginator.py b/kuma/search/paginator.py
--- a/kuma/search/paginator.py
+++ b/kuma/search/paginator.py
@@ -1,4 +1,5 @@
-from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator
+from django.core.paginator import (
+ EmptyPage, InvalidPage, Page, PageNotAnInteger, Paginator)
from django.utils.functional import cached_property
@@ -19,7 +20,7 @@
"""
Validates the given 1-based page number.
- This class overrides the default behavior and ignores the upper bound.
+ We also check that the number isn't too large.
"""
try:
number = int(number)
@@ -27,6 +28,19 @@
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
+
+ if number >= 1000:
+ # Anything >=1,000 will result in a hard error in
+ # Elasticsearch which would happen before we even get a chance
+ # to validate that the range is too big. The error you would
+ # get from Elasticsearch 6.x is something like this:
+ #
+ # Result window is too large, from + size must be less
+ # than or equal to: [10000] but was [11000].
+ #
+ # See https://github.com/mdn/kuma/issues/6092
+ raise InvalidPage('Page number too large')
+
return number
def page(self, number):
| {"golden_diff": "diff --git a/kuma/search/paginator.py b/kuma/search/paginator.py\n--- a/kuma/search/paginator.py\n+++ b/kuma/search/paginator.py\n@@ -1,4 +1,5 @@\n-from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator\n+from django.core.paginator import (\n+ EmptyPage, InvalidPage, Page, PageNotAnInteger, Paginator)\n from django.utils.functional import cached_property\n \n \n@@ -19,7 +20,7 @@\n \"\"\"\n Validates the given 1-based page number.\n \n- This class overrides the default behavior and ignores the upper bound.\n+ We also check that the number isn't too large.\n \"\"\"\n try:\n number = int(number)\n@@ -27,6 +28,19 @@\n raise PageNotAnInteger('That page number is not an integer')\n if number < 1:\n raise EmptyPage('That page number is less than 1')\n+\n+ if number >= 1000:\n+ # Anything >=1,000 will result in a hard error in\n+ # Elasticsearch which would happen before we even get a chance\n+ # to validate that the range is too big. The error you would\n+ # get from Elasticsearch 6.x is something like this:\n+ #\n+ # Result window is too large, from + size must be less\n+ # than or equal to: [10000] but was [11000].\n+ #\n+ # See https://github.com/mdn/kuma/issues/6092\n+ raise InvalidPage('Page number too large')\n+\n return number\n \n def page(self, number):\n", "issue": "search_phase_execution_exception in ES on huuuge ?page params\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/6620806/\n\n```\nTransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')\n(24 additional frame(s) were not displayed)\n...\n File \"rest_framework/views.py\", line 492, in dispatch\n response = handler(request, *args, **kwargs)\n File \"rest_framework/generics.py\", line 201, in get\n return self.list(request, *args, **kwargs)\n File \"rest_framework/mixins.py\", line 42, in list\n page = self.paginate_queryset(queryset)\n File \"rest_framework/generics.py\", line 173, in paginate_queryset\n return self.paginator.paginate_queryset(queryset, self.request, view=self)\n File \"rest_framework/pagination.py\", line 204, in paginate_queryset\n self.page = paginator.page(page_number)\n\nTransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')\n```\n", "before_files": [{"content": "from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator\nfrom django.utils.functional import cached_property\n\n\nclass SearchPaginator(Paginator):\n \"\"\"\n A better paginator for search results\n\n The normal Paginator does a .count() query and then a slice. Since ES\n results contain the total number of results, we can take an optimistic\n slice and then adjust the count.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SearchPaginator, self).__init__(*args, **kwargs)\n self._result_total = None\n\n def validate_number(self, number):\n \"\"\"\n Validates the given 1-based page number.\n\n This class overrides the default behavior and ignores the upper bound.\n \"\"\"\n try:\n number = int(number)\n except (TypeError, ValueError):\n raise PageNotAnInteger('That page number is not an integer')\n if number < 1:\n raise EmptyPage('That page number is less than 1')\n return number\n\n def page(self, number):\n \"\"\"\n Returns a page object.\n\n This class overrides the default behavior and ignores \"orphans\" and\n assigns the count from the ES result to the Paginator.\n \"\"\"\n number = self.validate_number(number)\n bottom = (number - 1) * self.per_page\n top = bottom + self.per_page\n\n # Force the search to evaluate and then attach the count. We want to\n # avoid an extra useless query even if there are no results, so we\n # directly fetch the count from hits.\n result = self.object_list[bottom:top].execute()\n page = Page(result.hits, number, self)\n # Set the count to the results after post_filter\n self._result_total = result.hits.total\n # Also store the aggregations, if any.\n page.aggregations = getattr(result, 'aggregations', None)\n\n # Now that we have the count validate that the page number isn't higher\n # than the possible number of pages and adjust accordingly.\n if number > self.num_pages:\n if number == 1 and self.allow_empty_first_page:\n pass\n else:\n raise EmptyPage('That page contains no results')\n return page\n\n @cached_property\n def count(self):\n \"\"\"\n Returns the total number of results.\n\n Paginator's count property will call .count() on the search object,\n which returns results before the pre_filter. This will result in a\n count that is too high. Instead, use 'total' from the results,\n executing if needed.\n \"\"\"\n if self._result_total is not None:\n return self._result_total\n return self.object_list.execute().hits.total\n", "path": "kuma/search/paginator.py"}]} | 1,642 | 373 |
gh_patches_debug_11772 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-19204 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ERROR: Unable to extract OpenGraph title;
## Please follow the guide below
- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)
- Use the *Preview* tab to see what your issue will actually look like
---
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.12.17*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.12.17**
### Before submitting an *issue* make sure you have:
- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
- [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser
### What is the purpose of your *issue*?
- [x] Bug report (encountered problems with youtube-dl)
- [ ] Site support request (request for adding support for a new site)
- [ ] Feature request (request for a new functionality)
- [ ] Question
- [ ] Other
---
```
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: [u'--verbose', u'https://i.imgur.com/jxBXAMC.gifv']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2018.12.17
[debug] Python version 2.7.12 (CPython) - Linux-4.4.0-112-generic-x86_64-with-Ubuntu-16.04-xenial
[debug] exe versions: ffmpeg 2.8.15-0ubuntu0.16.04.1, ffprobe 2.8.15-0ubuntu0.16.04.1, rtmpdump 2.4
[debug] Proxy map: {}
[Imgur] jxBXAMC: Downloading webpage
ERROR: Unable to extract OpenGraph title; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 793, in extract_info
ie_result = ie.extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 508, in extract
ie_result = self._real_extract(url)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/imgur.py", line 90, in _real_extract
'title': self._og_search_title(webpage),
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 1095, in _og_search_title
return self._og_search_property('title', html, **kargs)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 1083, in _og_search_property
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 983, in _search_regex
raise RegexNotFoundError('Unable to extract %s' % _name)
RegexNotFoundError: Unable to extract OpenGraph title; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
```
</issue>
<code>
[start of youtube_dl/extractor/imgur.py]
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..utils import (
7 int_or_none,
8 js_to_json,
9 mimetype2ext,
10 ExtractorError,
11 )
12
13
14 class ImgurIE(InfoExtractor):
15 _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!(?:a|gallery|(?:t(?:opic)?|r)/[^/]+)/)(?P<id>[a-zA-Z0-9]+)'
16
17 _TESTS = [{
18 'url': 'https://i.imgur.com/A61SaA1.gifv',
19 'info_dict': {
20 'id': 'A61SaA1',
21 'ext': 'mp4',
22 'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
23 },
24 }, {
25 'url': 'https://imgur.com/A61SaA1',
26 'only_matching': True,
27 }, {
28 'url': 'https://i.imgur.com/crGpqCV.mp4',
29 'only_matching': True,
30 }]
31
32 def _real_extract(self, url):
33 video_id = self._match_id(url)
34 webpage = self._download_webpage(
35 'https://i.imgur.com/{id}.gifv'.format(id=video_id), video_id)
36
37 width = int_or_none(self._og_search_property(
38 'video:width', webpage, default=None))
39 height = int_or_none(self._og_search_property(
40 'video:height', webpage, default=None))
41
42 video_elements = self._search_regex(
43 r'(?s)<div class="video-elements">(.*?)</div>',
44 webpage, 'video elements', default=None)
45 if not video_elements:
46 raise ExtractorError(
47 'No sources found for video %s. Maybe an image?' % video_id,
48 expected=True)
49
50 formats = []
51 for m in re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements):
52 formats.append({
53 'format_id': m.group('type').partition('/')[2],
54 'url': self._proto_relative_url(m.group('src')),
55 'ext': mimetype2ext(m.group('type')),
56 'width': width,
57 'height': height,
58 'http_headers': {
59 'User-Agent': 'youtube-dl (like wget)',
60 },
61 })
62
63 gif_json = self._search_regex(
64 r'(?s)var\s+videoItem\s*=\s*(\{.*?\})',
65 webpage, 'GIF code', fatal=False)
66 if gif_json:
67 gifd = self._parse_json(
68 gif_json, video_id, transform_source=js_to_json)
69 formats.append({
70 'format_id': 'gif',
71 'preference': -10,
72 'width': width,
73 'height': height,
74 'ext': 'gif',
75 'acodec': 'none',
76 'vcodec': 'gif',
77 'container': 'gif',
78 'url': self._proto_relative_url(gifd['gifUrl']),
79 'filesize': gifd.get('size'),
80 'http_headers': {
81 'User-Agent': 'youtube-dl (like wget)',
82 },
83 })
84
85 self._sort_formats(formats)
86
87 return {
88 'id': video_id,
89 'formats': formats,
90 'title': self._og_search_title(webpage),
91 }
92
93
94 class ImgurGalleryIE(InfoExtractor):
95 IE_NAME = 'imgur:gallery'
96 _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:gallery|(?:t(?:opic)?|r)/[^/]+)/(?P<id>[a-zA-Z0-9]+)'
97
98 _TESTS = [{
99 'url': 'http://imgur.com/gallery/Q95ko',
100 'info_dict': {
101 'id': 'Q95ko',
102 'title': 'Adding faces make every GIF better',
103 },
104 'playlist_count': 25,
105 }, {
106 'url': 'http://imgur.com/topic/Aww/ll5Vk',
107 'only_matching': True,
108 }, {
109 'url': 'https://imgur.com/gallery/YcAQlkx',
110 'info_dict': {
111 'id': 'YcAQlkx',
112 'ext': 'mp4',
113 'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',
114 }
115 }, {
116 'url': 'http://imgur.com/topic/Funny/N8rOudd',
117 'only_matching': True,
118 }, {
119 'url': 'http://imgur.com/r/aww/VQcQPhM',
120 'only_matching': True,
121 }]
122
123 def _real_extract(self, url):
124 gallery_id = self._match_id(url)
125
126 data = self._download_json(
127 'https://imgur.com/gallery/%s.json' % gallery_id,
128 gallery_id)['data']['image']
129
130 if data.get('is_album'):
131 entries = [
132 self.url_result('http://imgur.com/%s' % image['hash'], ImgurIE.ie_key(), image['hash'])
133 for image in data['album_images']['images'] if image.get('hash')]
134 return self.playlist_result(entries, gallery_id, data.get('title'), data.get('description'))
135
136 return self.url_result('http://imgur.com/%s' % gallery_id, ImgurIE.ie_key(), gallery_id)
137
138
139 class ImgurAlbumIE(ImgurGalleryIE):
140 IE_NAME = 'imgur:album'
141 _VALID_URL = r'https?://(?:i\.)?imgur\.com/a/(?P<id>[a-zA-Z0-9]+)'
142
143 _TESTS = [{
144 'url': 'http://imgur.com/a/j6Orj',
145 'info_dict': {
146 'id': 'j6Orj',
147 'title': 'A Literary Analysis of "Star Wars: The Force Awakens"',
148 },
149 'playlist_count': 12,
150 }]
151
[end of youtube_dl/extractor/imgur.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/youtube_dl/extractor/imgur.py b/youtube_dl/extractor/imgur.py
--- a/youtube_dl/extractor/imgur.py
+++ b/youtube_dl/extractor/imgur.py
@@ -27,6 +27,10 @@
}, {
'url': 'https://i.imgur.com/crGpqCV.mp4',
'only_matching': True,
+ }, {
+ # no title
+ 'url': 'https://i.imgur.com/jxBXAMC.gifv',
+ 'only_matching': True,
}]
def _real_extract(self, url):
@@ -87,7 +91,7 @@
return {
'id': video_id,
'formats': formats,
- 'title': self._og_search_title(webpage),
+ 'title': self._og_search_title(webpage, default=video_id),
}
| {"golden_diff": "diff --git a/youtube_dl/extractor/imgur.py b/youtube_dl/extractor/imgur.py\n--- a/youtube_dl/extractor/imgur.py\n+++ b/youtube_dl/extractor/imgur.py\n@@ -27,6 +27,10 @@\n }, {\n 'url': 'https://i.imgur.com/crGpqCV.mp4',\n 'only_matching': True,\n+ }, {\n+ # no title\n+ 'url': 'https://i.imgur.com/jxBXAMC.gifv',\n+ 'only_matching': True,\n }]\n \n def _real_extract(self, url):\n@@ -87,7 +91,7 @@\n return {\n 'id': video_id,\n 'formats': formats,\n- 'title': self._og_search_title(webpage),\n+ 'title': self._og_search_title(webpage, default=video_id),\n }\n", "issue": "ERROR: Unable to extract OpenGraph title;\n## Please follow the guide below\r\n\r\n- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly\r\n- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)\r\n- Use the *Preview* tab to see what your issue will actually look like\r\n\r\n---\r\n\r\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.12.17*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.12.17**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n- [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser\r\n\r\n### What is the purpose of your *issue*?\r\n- [x] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n---\r\n\r\n```\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: [u'--verbose', u'https://i.imgur.com/jxBXAMC.gifv']\r\n[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8\r\n[debug] youtube-dl version 2018.12.17\r\n[debug] Python version 2.7.12 (CPython) - Linux-4.4.0-112-generic-x86_64-with-Ubuntu-16.04-xenial\r\n[debug] exe versions: ffmpeg 2.8.15-0ubuntu0.16.04.1, ffprobe 2.8.15-0ubuntu0.16.04.1, rtmpdump 2.4\r\n[debug] Proxy map: {}\r\n[Imgur] jxBXAMC: Downloading webpage\r\nERROR: Unable to extract OpenGraph title; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 793, in extract_info\r\n ie_result = ie.extract(url)\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 508, in extract\r\n ie_result = self._real_extract(url)\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/imgur.py\", line 90, in _real_extract\r\n 'title': self._og_search_title(webpage),\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 1095, in _og_search_title\r\n return self._og_search_property('title', html, **kargs)\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 1083, in _og_search_property\r\n escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)\r\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 983, in _search_regex\r\n raise RegexNotFoundError('Unable to extract %s' % _name)\r\nRegexNotFoundError: Unable to extract OpenGraph title; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n int_or_none,\n js_to_json,\n mimetype2ext,\n ExtractorError,\n)\n\n\nclass ImgurIE(InfoExtractor):\n _VALID_URL = r'https?://(?:i\\.)?imgur\\.com/(?!(?:a|gallery|(?:t(?:opic)?|r)/[^/]+)/)(?P<id>[a-zA-Z0-9]+)'\n\n _TESTS = [{\n 'url': 'https://i.imgur.com/A61SaA1.gifv',\n 'info_dict': {\n 'id': 'A61SaA1',\n 'ext': 'mp4',\n 'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',\n },\n }, {\n 'url': 'https://imgur.com/A61SaA1',\n 'only_matching': True,\n }, {\n 'url': 'https://i.imgur.com/crGpqCV.mp4',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(\n 'https://i.imgur.com/{id}.gifv'.format(id=video_id), video_id)\n\n width = int_or_none(self._og_search_property(\n 'video:width', webpage, default=None))\n height = int_or_none(self._og_search_property(\n 'video:height', webpage, default=None))\n\n video_elements = self._search_regex(\n r'(?s)<div class=\"video-elements\">(.*?)</div>',\n webpage, 'video elements', default=None)\n if not video_elements:\n raise ExtractorError(\n 'No sources found for video %s. Maybe an image?' % video_id,\n expected=True)\n\n formats = []\n for m in re.finditer(r'<source\\s+src=\"(?P<src>[^\"]+)\"\\s+type=\"(?P<type>[^\"]+)\"', video_elements):\n formats.append({\n 'format_id': m.group('type').partition('/')[2],\n 'url': self._proto_relative_url(m.group('src')),\n 'ext': mimetype2ext(m.group('type')),\n 'width': width,\n 'height': height,\n 'http_headers': {\n 'User-Agent': 'youtube-dl (like wget)',\n },\n })\n\n gif_json = self._search_regex(\n r'(?s)var\\s+videoItem\\s*=\\s*(\\{.*?\\})',\n webpage, 'GIF code', fatal=False)\n if gif_json:\n gifd = self._parse_json(\n gif_json, video_id, transform_source=js_to_json)\n formats.append({\n 'format_id': 'gif',\n 'preference': -10,\n 'width': width,\n 'height': height,\n 'ext': 'gif',\n 'acodec': 'none',\n 'vcodec': 'gif',\n 'container': 'gif',\n 'url': self._proto_relative_url(gifd['gifUrl']),\n 'filesize': gifd.get('size'),\n 'http_headers': {\n 'User-Agent': 'youtube-dl (like wget)',\n },\n })\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'formats': formats,\n 'title': self._og_search_title(webpage),\n }\n\n\nclass ImgurGalleryIE(InfoExtractor):\n IE_NAME = 'imgur:gallery'\n _VALID_URL = r'https?://(?:i\\.)?imgur\\.com/(?:gallery|(?:t(?:opic)?|r)/[^/]+)/(?P<id>[a-zA-Z0-9]+)'\n\n _TESTS = [{\n 'url': 'http://imgur.com/gallery/Q95ko',\n 'info_dict': {\n 'id': 'Q95ko',\n 'title': 'Adding faces make every GIF better',\n },\n 'playlist_count': 25,\n }, {\n 'url': 'http://imgur.com/topic/Aww/ll5Vk',\n 'only_matching': True,\n }, {\n 'url': 'https://imgur.com/gallery/YcAQlkx',\n 'info_dict': {\n 'id': 'YcAQlkx',\n 'ext': 'mp4',\n 'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',\n }\n }, {\n 'url': 'http://imgur.com/topic/Funny/N8rOudd',\n 'only_matching': True,\n }, {\n 'url': 'http://imgur.com/r/aww/VQcQPhM',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n gallery_id = self._match_id(url)\n\n data = self._download_json(\n 'https://imgur.com/gallery/%s.json' % gallery_id,\n gallery_id)['data']['image']\n\n if data.get('is_album'):\n entries = [\n self.url_result('http://imgur.com/%s' % image['hash'], ImgurIE.ie_key(), image['hash'])\n for image in data['album_images']['images'] if image.get('hash')]\n return self.playlist_result(entries, gallery_id, data.get('title'), data.get('description'))\n\n return self.url_result('http://imgur.com/%s' % gallery_id, ImgurIE.ie_key(), gallery_id)\n\n\nclass ImgurAlbumIE(ImgurGalleryIE):\n IE_NAME = 'imgur:album'\n _VALID_URL = r'https?://(?:i\\.)?imgur\\.com/a/(?P<id>[a-zA-Z0-9]+)'\n\n _TESTS = [{\n 'url': 'http://imgur.com/a/j6Orj',\n 'info_dict': {\n 'id': 'j6Orj',\n 'title': 'A Literary Analysis of \"Star Wars: The Force Awakens\"',\n },\n 'playlist_count': 12,\n }]\n", "path": "youtube_dl/extractor/imgur.py"}]} | 3,258 | 199 |
gh_patches_debug_400 | rasdani/github-patches | git_diff | mozilla__bugbug-3941 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[model:accessibility] Add the model to `http_service` and `data_pipeline`
Depends on merging: #3775
</issue>
<code>
[start of http_service/bugbug_http/models.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import logging
7 import os
8 from datetime import timedelta
9 from functools import lru_cache
10 from typing import Sequence
11
12 import orjson
13 import requests
14 import zstandard
15 from redis import Redis
16
17 from bugbug import bugzilla, repository, test_scheduling
18 from bugbug.github import Github
19 from bugbug.model import Model
20 from bugbug.models import testselect
21 from bugbug.utils import get_hgmo_stack
22 from bugbug_http.readthrough_cache import ReadthroughTTLCache
23
24 logging.basicConfig(level=logging.INFO)
25 LOGGER = logging.getLogger()
26
27 MODELS_NAMES = [
28 "defectenhancementtask",
29 "component",
30 "invalidcompatibilityreport",
31 "needsdiagnosis",
32 "regression",
33 "stepstoreproduce",
34 "spambug",
35 "testlabelselect",
36 "testgroupselect",
37 ]
38
39 DEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week
40 redis = Redis.from_url(os.environ.get("REDIS_URL", "redis://localhost/0"))
41
42 MODEL_CACHE: ReadthroughTTLCache[str, Model] = ReadthroughTTLCache(
43 timedelta(hours=1), lambda m: Model.load(f"{m}model")
44 )
45 MODEL_CACHE.start_ttl_thread()
46
47 cctx = zstandard.ZstdCompressor(level=10)
48
49
50 def setkey(key: str, value: bytes, compress: bool = False) -> None:
51 LOGGER.debug(f"Storing data at {key}: {value!r}")
52 if compress:
53 value = cctx.compress(value)
54 redis.set(key, value)
55 redis.expire(key, DEFAULT_EXPIRATION_TTL)
56
57
58 def classify_bug(model_name: str, bug_ids: Sequence[int], bugzilla_token: str) -> str:
59 from bugbug_http.app import JobInfo
60
61 # This should be called in a process worker so it should be safe to set
62 # the token here
63 bug_ids_set = set(map(int, bug_ids))
64 bugzilla.set_token(bugzilla_token)
65
66 bugs = bugzilla.get(bug_ids)
67
68 missing_bugs = bug_ids_set.difference(bugs.keys())
69
70 for bug_id in missing_bugs:
71 job = JobInfo(classify_bug, model_name, bug_id)
72
73 # TODO: Find a better error format
74 setkey(job.result_key, orjson.dumps({"available": False}))
75
76 if not bugs:
77 return "NOK"
78
79 model = MODEL_CACHE.get(model_name)
80
81 if not model:
82 LOGGER.info("Missing model %r, aborting" % model_name)
83 return "NOK"
84
85 model_extra_data = model.get_extra_data()
86
87 # TODO: Classify could choke on a single bug which could make the whole
88 # job to fails. What should we do here?
89 probs = model.classify(list(bugs.values()), True)
90 indexes = probs.argmax(axis=-1)
91 suggestions = model.le.inverse_transform(indexes)
92
93 probs_list = probs.tolist()
94 indexes_list = indexes.tolist()
95 suggestions_list = suggestions.tolist()
96
97 for i, bug_id in enumerate(bugs.keys()):
98 data = {
99 "prob": probs_list[i],
100 "index": indexes_list[i],
101 "class": suggestions_list[i],
102 "extra_data": model_extra_data,
103 }
104
105 job = JobInfo(classify_bug, model_name, bug_id)
106 setkey(job.result_key, orjson.dumps(data), compress=True)
107
108 # Save the bug last change
109 setkey(job.change_time_key, bugs[bug_id]["last_change_time"].encode())
110
111 return "OK"
112
113
114 def classify_issue(
115 model_name: str, owner: str, repo: str, issue_nums: Sequence[int]
116 ) -> str:
117 from bugbug_http.app import JobInfo
118
119 github = Github(owner=owner, repo=repo)
120
121 issue_ids_set = set(map(int, issue_nums))
122
123 issues = {
124 issue_num: github.fetch_issue_by_number(owner, repo, issue_num, True)
125 for issue_num in issue_nums
126 }
127
128 missing_issues = issue_ids_set.difference(issues.keys())
129
130 for issue_id in missing_issues:
131 job = JobInfo(classify_issue, model_name, owner, repo, issue_id)
132
133 # TODO: Find a better error format
134 setkey(job.result_key, orjson.dumps({"available": False}))
135
136 if not issues:
137 return "NOK"
138
139 model = MODEL_CACHE.get(model_name)
140
141 if not model:
142 LOGGER.info("Missing model %r, aborting" % model_name)
143 return "NOK"
144
145 model_extra_data = model.get_extra_data()
146
147 # TODO: Classify could choke on a single bug which could make the whole
148 # job to fail. What should we do here?
149 probs = model.classify(list(issues.values()), True)
150 indexes = probs.argmax(axis=-1)
151 suggestions = model.le.inverse_transform(indexes)
152
153 probs_list = probs.tolist()
154 indexes_list = indexes.tolist()
155 suggestions_list = suggestions.tolist()
156
157 for i, issue_id in enumerate(issues.keys()):
158 data = {
159 "prob": probs_list[i],
160 "index": indexes_list[i],
161 "class": suggestions_list[i],
162 "extra_data": model_extra_data,
163 }
164
165 job = JobInfo(classify_issue, model_name, owner, repo, issue_id)
166 setkey(job.result_key, orjson.dumps(data), compress=True)
167
168 # Save the bug last change
169 setkey(job.change_time_key, issues[issue_id]["updated_at"].encode())
170
171 return "OK"
172
173
174 def classify_broken_site_report(model_name: str, reports_data: list[dict]) -> str:
175 from bugbug_http.app import JobInfo
176
177 reports = {
178 report["uuid"]: {"title": report["title"], "body": report["body"]}
179 for report in reports_data
180 }
181
182 if not reports:
183 return "NOK"
184
185 model = MODEL_CACHE.get(model_name)
186
187 if not model:
188 LOGGER.info("Missing model %r, aborting" % model_name)
189 return "NOK"
190
191 model_extra_data = model.get_extra_data()
192 probs = model.classify(list(reports.values()), True)
193 indexes = probs.argmax(axis=-1)
194 suggestions = model.le.inverse_transform(indexes)
195
196 probs_list = probs.tolist()
197 indexes_list = indexes.tolist()
198 suggestions_list = suggestions.tolist()
199
200 for i, report_uuid in enumerate(reports.keys()):
201 data = {
202 "prob": probs_list[i],
203 "index": indexes_list[i],
204 "class": suggestions_list[i],
205 "extra_data": model_extra_data,
206 }
207
208 job = JobInfo(classify_broken_site_report, model_name, report_uuid)
209 setkey(job.result_key, orjson.dumps(data), compress=True)
210
211 return "OK"
212
213
214 @lru_cache(maxsize=None)
215 def get_known_tasks() -> tuple[str, ...]:
216 with open("known_tasks", "r") as f:
217 return tuple(line.strip() for line in f)
218
219
220 def schedule_tests(branch: str, rev: str) -> str:
221 from bugbug_http import REPO_DIR
222 from bugbug_http.app import JobInfo
223
224 job = JobInfo(schedule_tests, branch, rev)
225 LOGGER.info("Processing %s...", job)
226
227 # Pull the revision to the local repository
228 LOGGER.info("Pulling commits from the remote repository...")
229 repository.pull(REPO_DIR, branch, rev)
230
231 # Load the full stack of patches leading to that revision
232 LOGGER.info("Loading commits to analyze using automationrelevance...")
233 try:
234 revs = get_hgmo_stack(branch, rev)
235 except requests.exceptions.RequestException:
236 LOGGER.warning(f"Push not found for {branch} @ {rev}!")
237 return "NOK"
238
239 test_selection_threshold = float(
240 os.environ.get("TEST_SELECTION_CONFIDENCE_THRESHOLD", 0.5)
241 )
242
243 # On "try", consider commits from other branches too (see https://bugzilla.mozilla.org/show_bug.cgi?id=1790493).
244 # On other repos, only consider "tip" commits (to exclude commits such as https://hg.mozilla.org/integration/autoland/rev/961f253985a4388008700a6a6fde80f4e17c0b4b).
245 if branch == "try":
246 repo_branch = None
247 else:
248 repo_branch = "tip"
249
250 # Analyze patches.
251 commits = repository.download_commits(
252 REPO_DIR,
253 revs=revs,
254 branch=repo_branch,
255 save=False,
256 use_single_process=True,
257 include_no_bug=True,
258 )
259
260 if len(commits) > 0:
261 testlabelselect_model = MODEL_CACHE.get("testlabelselect")
262 testgroupselect_model = MODEL_CACHE.get("testgroupselect")
263
264 tasks = testlabelselect_model.select_tests(commits, test_selection_threshold)
265
266 reduced = testselect.reduce_configs(
267 set(t for t, c in tasks.items() if c >= 0.8), 1.0
268 )
269
270 reduced_higher = testselect.reduce_configs(
271 set(t for t, c in tasks.items() if c >= 0.9), 1.0
272 )
273
274 groups = testgroupselect_model.select_tests(commits, test_selection_threshold)
275
276 config_groups = testselect.select_configs(groups.keys(), 0.9)
277 else:
278 tasks = {}
279 reduced = set()
280 groups = {}
281 config_groups = {}
282
283 data = {
284 "tasks": tasks,
285 "groups": groups,
286 "config_groups": config_groups,
287 "reduced_tasks": {t: c for t, c in tasks.items() if t in reduced},
288 "reduced_tasks_higher": {t: c for t, c in tasks.items() if t in reduced_higher},
289 "known_tasks": get_known_tasks(),
290 }
291 setkey(job.result_key, orjson.dumps(data), compress=True)
292
293 return "OK"
294
295
296 def get_config_specific_groups(config: str) -> str:
297 from bugbug_http.app import JobInfo
298
299 job = JobInfo(get_config_specific_groups, config)
300 LOGGER.info("Processing %s...", job)
301
302 equivalence_sets = testselect._get_equivalence_sets(0.9)
303
304 past_failures_data = test_scheduling.PastFailures("group", True)
305
306 setkey(
307 job.result_key,
308 orjson.dumps(
309 [
310 {"name": group}
311 for group in past_failures_data.all_runnables
312 if any(
313 equivalence_set == {config}
314 for equivalence_set in equivalence_sets[group]
315 )
316 ]
317 ),
318 compress=True,
319 )
320
321 return "OK"
322
[end of http_service/bugbug_http/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/http_service/bugbug_http/models.py b/http_service/bugbug_http/models.py
--- a/http_service/bugbug_http/models.py
+++ b/http_service/bugbug_http/models.py
@@ -34,6 +34,7 @@
"spambug",
"testlabelselect",
"testgroupselect",
+ "accessibility",
]
DEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week
| {"golden_diff": "diff --git a/http_service/bugbug_http/models.py b/http_service/bugbug_http/models.py\n--- a/http_service/bugbug_http/models.py\n+++ b/http_service/bugbug_http/models.py\n@@ -34,6 +34,7 @@\n \"spambug\",\n \"testlabelselect\",\n \"testgroupselect\",\n+ \"accessibility\",\n ]\n \n DEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week\n", "issue": "[model:accessibility] Add the model to `http_service` and `data_pipeline`\nDepends on merging: #3775\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nfrom datetime import timedelta\nfrom functools import lru_cache\nfrom typing import Sequence\n\nimport orjson\nimport requests\nimport zstandard\nfrom redis import Redis\n\nfrom bugbug import bugzilla, repository, test_scheduling\nfrom bugbug.github import Github\nfrom bugbug.model import Model\nfrom bugbug.models import testselect\nfrom bugbug.utils import get_hgmo_stack\nfrom bugbug_http.readthrough_cache import ReadthroughTTLCache\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS_NAMES = [\n \"defectenhancementtask\",\n \"component\",\n \"invalidcompatibilityreport\",\n \"needsdiagnosis\",\n \"regression\",\n \"stepstoreproduce\",\n \"spambug\",\n \"testlabelselect\",\n \"testgroupselect\",\n]\n\nDEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week\nredis = Redis.from_url(os.environ.get(\"REDIS_URL\", \"redis://localhost/0\"))\n\nMODEL_CACHE: ReadthroughTTLCache[str, Model] = ReadthroughTTLCache(\n timedelta(hours=1), lambda m: Model.load(f\"{m}model\")\n)\nMODEL_CACHE.start_ttl_thread()\n\ncctx = zstandard.ZstdCompressor(level=10)\n\n\ndef setkey(key: str, value: bytes, compress: bool = False) -> None:\n LOGGER.debug(f\"Storing data at {key}: {value!r}\")\n if compress:\n value = cctx.compress(value)\n redis.set(key, value)\n redis.expire(key, DEFAULT_EXPIRATION_TTL)\n\n\ndef classify_bug(model_name: str, bug_ids: Sequence[int], bugzilla_token: str) -> str:\n from bugbug_http.app import JobInfo\n\n # This should be called in a process worker so it should be safe to set\n # the token here\n bug_ids_set = set(map(int, bug_ids))\n bugzilla.set_token(bugzilla_token)\n\n bugs = bugzilla.get(bug_ids)\n\n missing_bugs = bug_ids_set.difference(bugs.keys())\n\n for bug_id in missing_bugs:\n job = JobInfo(classify_bug, model_name, bug_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not bugs:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fails. What should we do here?\n probs = model.classify(list(bugs.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, bug_id in enumerate(bugs.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_bug, model_name, bug_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, bugs[bug_id][\"last_change_time\"].encode())\n\n return \"OK\"\n\n\ndef classify_issue(\n model_name: str, owner: str, repo: str, issue_nums: Sequence[int]\n) -> str:\n from bugbug_http.app import JobInfo\n\n github = Github(owner=owner, repo=repo)\n\n issue_ids_set = set(map(int, issue_nums))\n\n issues = {\n issue_num: github.fetch_issue_by_number(owner, repo, issue_num, True)\n for issue_num in issue_nums\n }\n\n missing_issues = issue_ids_set.difference(issues.keys())\n\n for issue_id in missing_issues:\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not issues:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fail. What should we do here?\n probs = model.classify(list(issues.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, issue_id in enumerate(issues.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, issues[issue_id][\"updated_at\"].encode())\n\n return \"OK\"\n\n\ndef classify_broken_site_report(model_name: str, reports_data: list[dict]) -> str:\n from bugbug_http.app import JobInfo\n\n reports = {\n report[\"uuid\"]: {\"title\": report[\"title\"], \"body\": report[\"body\"]}\n for report in reports_data\n }\n\n if not reports:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n probs = model.classify(list(reports.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, report_uuid in enumerate(reports.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_broken_site_report, model_name, report_uuid)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\n@lru_cache(maxsize=None)\ndef get_known_tasks() -> tuple[str, ...]:\n with open(\"known_tasks\", \"r\") as f:\n return tuple(line.strip() for line in f)\n\n\ndef schedule_tests(branch: str, rev: str) -> str:\n from bugbug_http import REPO_DIR\n from bugbug_http.app import JobInfo\n\n job = JobInfo(schedule_tests, branch, rev)\n LOGGER.info(\"Processing %s...\", job)\n\n # Pull the revision to the local repository\n LOGGER.info(\"Pulling commits from the remote repository...\")\n repository.pull(REPO_DIR, branch, rev)\n\n # Load the full stack of patches leading to that revision\n LOGGER.info(\"Loading commits to analyze using automationrelevance...\")\n try:\n revs = get_hgmo_stack(branch, rev)\n except requests.exceptions.RequestException:\n LOGGER.warning(f\"Push not found for {branch} @ {rev}!\")\n return \"NOK\"\n\n test_selection_threshold = float(\n os.environ.get(\"TEST_SELECTION_CONFIDENCE_THRESHOLD\", 0.5)\n )\n\n # On \"try\", consider commits from other branches too (see https://bugzilla.mozilla.org/show_bug.cgi?id=1790493).\n # On other repos, only consider \"tip\" commits (to exclude commits such as https://hg.mozilla.org/integration/autoland/rev/961f253985a4388008700a6a6fde80f4e17c0b4b).\n if branch == \"try\":\n repo_branch = None\n else:\n repo_branch = \"tip\"\n\n # Analyze patches.\n commits = repository.download_commits(\n REPO_DIR,\n revs=revs,\n branch=repo_branch,\n save=False,\n use_single_process=True,\n include_no_bug=True,\n )\n\n if len(commits) > 0:\n testlabelselect_model = MODEL_CACHE.get(\"testlabelselect\")\n testgroupselect_model = MODEL_CACHE.get(\"testgroupselect\")\n\n tasks = testlabelselect_model.select_tests(commits, test_selection_threshold)\n\n reduced = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.8), 1.0\n )\n\n reduced_higher = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.9), 1.0\n )\n\n groups = testgroupselect_model.select_tests(commits, test_selection_threshold)\n\n config_groups = testselect.select_configs(groups.keys(), 0.9)\n else:\n tasks = {}\n reduced = set()\n groups = {}\n config_groups = {}\n\n data = {\n \"tasks\": tasks,\n \"groups\": groups,\n \"config_groups\": config_groups,\n \"reduced_tasks\": {t: c for t, c in tasks.items() if t in reduced},\n \"reduced_tasks_higher\": {t: c for t, c in tasks.items() if t in reduced_higher},\n \"known_tasks\": get_known_tasks(),\n }\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\ndef get_config_specific_groups(config: str) -> str:\n from bugbug_http.app import JobInfo\n\n job = JobInfo(get_config_specific_groups, config)\n LOGGER.info(\"Processing %s...\", job)\n\n equivalence_sets = testselect._get_equivalence_sets(0.9)\n\n past_failures_data = test_scheduling.PastFailures(\"group\", True)\n\n setkey(\n job.result_key,\n orjson.dumps(\n [\n {\"name\": group}\n for group in past_failures_data.all_runnables\n if any(\n equivalence_set == {config}\n for equivalence_set in equivalence_sets[group]\n )\n ]\n ),\n compress=True,\n )\n\n return \"OK\"\n", "path": "http_service/bugbug_http/models.py"}]} | 3,828 | 104 |
gh_patches_debug_8276 | rasdani/github-patches | git_diff | archlinux__archinstall-666 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
re_rank_mirrors bug
`re_rank_mirrors` from `archinstall/lib/mirrors.py` does not work. Since `SyncCommand` doesn't run command from shell, stdout redirection (`foo > bar`) doesn't work.
Also, using source file as destination file does not seem to work. File becomes empty.
</issue>
<code>
[start of archinstall/lib/mirrors.py]
1 import urllib.error
2 import urllib.request
3 from typing import Union
4
5 from .general import *
6 from .output import log
7
8 def sort_mirrorlist(raw_data :bytes, sort_order=["https", "http"]) -> bytes:
9 """
10 This function can sort /etc/pacman.d/mirrorlist according to the
11 mirror's URL prefix. By default places HTTPS before HTTP but it also
12 preserves the country/rank-order.
13
14 This assumes /etc/pacman.d/mirrorlist looks like the following:
15
16 ## Comment
17 Server = url
18
19 or
20
21 ## Comment
22 #Server = url
23
24 But the Comments need to start with double-hashmarks to be distringuished
25 from server url definitions (commented or uncommented).
26 """
27 comments_and_whitespaces = b""
28
29 categories = {key: [] for key in sort_order+["Unknown"]}
30 for line in raw_data.split(b"\n"):
31 if line[0:2] in (b'##', b''):
32 comments_and_whitespaces += line + b'\n'
33 elif line[:6].lower() == b'server' or line[:7].lower() == b'#server':
34 opening, url = line.split(b'=', 1)
35 opening, url = opening.strip(), url.strip()
36 if (category := url.split(b'://',1)[0].decode('UTF-8')) in categories:
37 categories[category].append(comments_and_whitespaces)
38 categories[category].append(opening+b' = '+url+b'\n')
39 else:
40 categories["Unknown"].append(comments_and_whitespaces)
41 categories["Unknown"].append(opening+b' = '+url+b'\n')
42
43 comments_and_whitespaces = b""
44
45
46 new_raw_data = b''
47 for category in sort_order+["Unknown"]:
48 for line in categories[category]:
49 new_raw_data += line
50
51 return new_raw_data
52
53
54 def filter_mirrors_by_region(regions, destination='/etc/pacman.d/mirrorlist', sort_order=["https", "http"], *args, **kwargs) -> Union[bool, bytes]:
55 """
56 This function will change the active mirrors on the live medium by
57 filtering which regions are active based on `regions`.
58
59 :param regions: A series of country codes separated by `,`. For instance `SE,US` for sweden and United States.
60 :type regions: str
61 """
62 region_list = [f'country={region}' for region in regions.split(',')]
63 response = urllib.request.urlopen(urllib.request.Request(f"https://archlinux.org/mirrorlist/?{'&'.join(region_list)}&protocol=https&protocol=http&ip_version=4&ip_version=6&use_mirror_status=on'", headers={'User-Agent': 'ArchInstall'}))
64 new_list = response.read().replace(b"#Server", b"Server")
65
66 if sort_order:
67 new_list = sort_mirrorlist(new_list, sort_order=sort_order)
68
69 if destination:
70 with open(destination, "wb") as mirrorlist:
71 mirrorlist.write(new_list)
72
73 return True
74 else:
75 return new_list.decode('UTF-8')
76
77
78 def add_custom_mirrors(mirrors: list, *args, **kwargs):
79 """
80 This will append custom mirror definitions in pacman.conf
81
82 :param mirrors: A list of mirror data according to: `{'url': 'http://url.com', 'signcheck': 'Optional', 'signoptions': 'TrustAll', 'name': 'testmirror'}`
83 :type mirrors: dict
84 """
85 with open('/etc/pacman.conf', 'a') as pacman:
86 for mirror in mirrors:
87 pacman.write(f"[{mirror['name']}]\n")
88 pacman.write(f"SigLevel = {mirror['signcheck']} {mirror['signoptions']}\n")
89 pacman.write(f"Server = {mirror['url']}\n")
90
91 return True
92
93
94 def insert_mirrors(mirrors, *args, **kwargs):
95 """
96 This function will insert a given mirror-list at the top of `/etc/pacman.d/mirrorlist`.
97 It will not flush any other mirrors, just insert new ones.
98
99 :param mirrors: A dictionary of `{'url' : 'country', 'url2' : 'country'}`
100 :type mirrors: dict
101 """
102 original_mirrorlist = ''
103 with open('/etc/pacman.d/mirrorlist', 'r') as original:
104 original_mirrorlist = original.read()
105
106 with open('/etc/pacman.d/mirrorlist', 'w') as new_mirrorlist:
107 for mirror, country in mirrors.items():
108 new_mirrorlist.write(f'## {country}\n')
109 new_mirrorlist.write(f'Server = {mirror}\n')
110 new_mirrorlist.write('\n')
111 new_mirrorlist.write(original_mirrorlist)
112
113 return True
114
115
116 def use_mirrors(regions: dict, destination='/etc/pacman.d/mirrorlist'):
117 log(f'A new package mirror-list has been created: {destination}', level=logging.INFO)
118 for region, mirrors in regions.items():
119 with open(destination, 'w') as mirrorlist:
120 for mirror in mirrors:
121 mirrorlist.write(f'## {region}\n')
122 mirrorlist.write(f'Server = {mirror}\n')
123 return True
124
125
126 def re_rank_mirrors(top=10, *positionals, **kwargs):
127 if SysCommand(f'/usr/bin/rankmirrors -n {top} /etc/pacman.d/mirrorlist > /etc/pacman.d/mirrorlist').exit_code == 0:
128 return True
129 return False
130
131
132 def list_mirrors(sort_order=["https", "http"]):
133 url = "https://archlinux.org/mirrorlist/?protocol=https&protocol=http&ip_version=4&ip_version=6&use_mirror_status=on"
134 regions = {}
135
136 try:
137 response = urllib.request.urlopen(url)
138 except urllib.error.URLError as err:
139 log(f'Could not fetch an active mirror-list: {err}', level=logging.WARNING, fg="yellow")
140 return regions
141
142 mirrorlist = response.read()
143 if sort_order:
144 mirrorlist = sort_mirrorlist(mirrorlist, sort_order=sort_order)
145
146 region = 'Unknown region'
147 for line in mirrorlist.split(b'\n'):
148 if len(line.strip()) == 0:
149 continue
150
151 line = line.decode('UTF-8').strip('\n').strip('\r')
152 if line[:3] == '## ':
153 region = line[3:]
154 elif line[:10] == '#Server = ':
155 regions.setdefault(region, {})
156
157 url = line.lstrip('#Server = ')
158 regions[region][url] = True
159
160 return regions
161
[end of archinstall/lib/mirrors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/archinstall/lib/mirrors.py b/archinstall/lib/mirrors.py
--- a/archinstall/lib/mirrors.py
+++ b/archinstall/lib/mirrors.py
@@ -123,10 +123,17 @@
return True
-def re_rank_mirrors(top=10, *positionals, **kwargs):
- if SysCommand(f'/usr/bin/rankmirrors -n {top} /etc/pacman.d/mirrorlist > /etc/pacman.d/mirrorlist').exit_code == 0:
- return True
- return False
+def re_rank_mirrors(
+ top: int = 10,
+ src: str = '/etc/pacman.d/mirrorlist',
+ dst: str = '/etc/pacman.d/mirrorlist',
+) -> bool:
+ cmd = SysCommand(f"/usr/bin/rankmirrors -n {top} {src}")
+ if cmd.exit_code != 0:
+ return False
+ with open(dst, 'w') as f:
+ f.write(str(cmd))
+ return True
def list_mirrors(sort_order=["https", "http"]):
| {"golden_diff": "diff --git a/archinstall/lib/mirrors.py b/archinstall/lib/mirrors.py\n--- a/archinstall/lib/mirrors.py\n+++ b/archinstall/lib/mirrors.py\n@@ -123,10 +123,17 @@\n \treturn True\n \n \n-def re_rank_mirrors(top=10, *positionals, **kwargs):\n-\tif SysCommand(f'/usr/bin/rankmirrors -n {top} /etc/pacman.d/mirrorlist > /etc/pacman.d/mirrorlist').exit_code == 0:\n-\t\treturn True\n-\treturn False\n+def re_rank_mirrors(\n+\ttop: int = 10,\n+\tsrc: str = '/etc/pacman.d/mirrorlist',\n+\tdst: str = '/etc/pacman.d/mirrorlist',\n+) -> bool:\n+\tcmd = SysCommand(f\"/usr/bin/rankmirrors -n {top} {src}\")\n+\tif cmd.exit_code != 0:\n+\t\treturn False\n+\twith open(dst, 'w') as f:\n+\t\tf.write(str(cmd))\n+\treturn True\n \n \n def list_mirrors(sort_order=[\"https\", \"http\"]):\n", "issue": "re_rank_mirrors bug\n`re_rank_mirrors` from `archinstall/lib/mirrors.py` does not work. Since `SyncCommand` doesn't run command from shell, stdout redirection (`foo > bar`) doesn't work. \r\n\r\nAlso, using source file as destination file does not seem to work. File becomes empty.\n", "before_files": [{"content": "import urllib.error\nimport urllib.request\nfrom typing import Union\n\nfrom .general import *\nfrom .output import log\n\ndef sort_mirrorlist(raw_data :bytes, sort_order=[\"https\", \"http\"]) -> bytes:\n\t\"\"\"\n\tThis function can sort /etc/pacman.d/mirrorlist according to the\n\tmirror's URL prefix. By default places HTTPS before HTTP but it also\n\tpreserves the country/rank-order.\n\n\tThis assumes /etc/pacman.d/mirrorlist looks like the following:\n\n\t## Comment\n\tServer = url\n\n\tor\n\n\t## Comment\n\t#Server = url\n\n\tBut the Comments need to start with double-hashmarks to be distringuished\n\tfrom server url definitions (commented or uncommented).\n\t\"\"\"\n\tcomments_and_whitespaces = b\"\"\n\n\tcategories = {key: [] for key in sort_order+[\"Unknown\"]}\n\tfor line in raw_data.split(b\"\\n\"):\n\t\tif line[0:2] in (b'##', b''):\n\t\t\tcomments_and_whitespaces += line + b'\\n'\n\t\telif line[:6].lower() == b'server' or line[:7].lower() == b'#server':\n\t\t\topening, url = line.split(b'=', 1)\n\t\t\topening, url = opening.strip(), url.strip()\n\t\t\tif (category := url.split(b'://',1)[0].decode('UTF-8')) in categories:\n\t\t\t\tcategories[category].append(comments_and_whitespaces)\n\t\t\t\tcategories[category].append(opening+b' = '+url+b'\\n')\n\t\t\telse:\n\t\t\t\tcategories[\"Unknown\"].append(comments_and_whitespaces)\n\t\t\t\tcategories[\"Unknown\"].append(opening+b' = '+url+b'\\n')\n\n\t\t\tcomments_and_whitespaces = b\"\"\n\n\n\tnew_raw_data = b''\n\tfor category in sort_order+[\"Unknown\"]:\n\t\tfor line in categories[category]:\n\t\t\tnew_raw_data += line\n\n\treturn new_raw_data\n\n\ndef filter_mirrors_by_region(regions, destination='/etc/pacman.d/mirrorlist', sort_order=[\"https\", \"http\"], *args, **kwargs) -> Union[bool, bytes]:\n\t\"\"\"\n\tThis function will change the active mirrors on the live medium by\n\tfiltering which regions are active based on `regions`.\n\n\t:param regions: A series of country codes separated by `,`. For instance `SE,US` for sweden and United States.\n\t:type regions: str\n\t\"\"\"\n\tregion_list = [f'country={region}' for region in regions.split(',')]\n\tresponse = urllib.request.urlopen(urllib.request.Request(f\"https://archlinux.org/mirrorlist/?{'&'.join(region_list)}&protocol=https&protocol=http&ip_version=4&ip_version=6&use_mirror_status=on'\", headers={'User-Agent': 'ArchInstall'}))\n\tnew_list = response.read().replace(b\"#Server\", b\"Server\")\n\n\tif sort_order:\n\t\tnew_list = sort_mirrorlist(new_list, sort_order=sort_order)\n\n\tif destination:\n\t\twith open(destination, \"wb\") as mirrorlist:\n\t\t\tmirrorlist.write(new_list)\n\n\t\treturn True\n\telse:\n\t\treturn new_list.decode('UTF-8')\n\n\ndef add_custom_mirrors(mirrors: list, *args, **kwargs):\n\t\"\"\"\n\tThis will append custom mirror definitions in pacman.conf\n\n\t:param mirrors: A list of mirror data according to: `{'url': 'http://url.com', 'signcheck': 'Optional', 'signoptions': 'TrustAll', 'name': 'testmirror'}`\n\t:type mirrors: dict\n\t\"\"\"\n\twith open('/etc/pacman.conf', 'a') as pacman:\n\t\tfor mirror in mirrors:\n\t\t\tpacman.write(f\"[{mirror['name']}]\\n\")\n\t\t\tpacman.write(f\"SigLevel = {mirror['signcheck']} {mirror['signoptions']}\\n\")\n\t\t\tpacman.write(f\"Server = {mirror['url']}\\n\")\n\n\treturn True\n\n\ndef insert_mirrors(mirrors, *args, **kwargs):\n\t\"\"\"\n\tThis function will insert a given mirror-list at the top of `/etc/pacman.d/mirrorlist`.\n\tIt will not flush any other mirrors, just insert new ones.\n\n\t:param mirrors: A dictionary of `{'url' : 'country', 'url2' : 'country'}`\n\t:type mirrors: dict\n\t\"\"\"\n\toriginal_mirrorlist = ''\n\twith open('/etc/pacman.d/mirrorlist', 'r') as original:\n\t\toriginal_mirrorlist = original.read()\n\n\twith open('/etc/pacman.d/mirrorlist', 'w') as new_mirrorlist:\n\t\tfor mirror, country in mirrors.items():\n\t\t\tnew_mirrorlist.write(f'## {country}\\n')\n\t\t\tnew_mirrorlist.write(f'Server = {mirror}\\n')\n\t\tnew_mirrorlist.write('\\n')\n\t\tnew_mirrorlist.write(original_mirrorlist)\n\n\treturn True\n\n\ndef use_mirrors(regions: dict, destination='/etc/pacman.d/mirrorlist'):\n\tlog(f'A new package mirror-list has been created: {destination}', level=logging.INFO)\n\tfor region, mirrors in regions.items():\n\t\twith open(destination, 'w') as mirrorlist:\n\t\t\tfor mirror in mirrors:\n\t\t\t\tmirrorlist.write(f'## {region}\\n')\n\t\t\t\tmirrorlist.write(f'Server = {mirror}\\n')\n\treturn True\n\n\ndef re_rank_mirrors(top=10, *positionals, **kwargs):\n\tif SysCommand(f'/usr/bin/rankmirrors -n {top} /etc/pacman.d/mirrorlist > /etc/pacman.d/mirrorlist').exit_code == 0:\n\t\treturn True\n\treturn False\n\n\ndef list_mirrors(sort_order=[\"https\", \"http\"]):\n\turl = \"https://archlinux.org/mirrorlist/?protocol=https&protocol=http&ip_version=4&ip_version=6&use_mirror_status=on\"\n\tregions = {}\n\n\ttry:\n\t\tresponse = urllib.request.urlopen(url)\n\texcept urllib.error.URLError as err:\n\t\tlog(f'Could not fetch an active mirror-list: {err}', level=logging.WARNING, fg=\"yellow\")\n\t\treturn regions\n\n\tmirrorlist = response.read()\n\tif sort_order:\n\t\tmirrorlist = sort_mirrorlist(mirrorlist, sort_order=sort_order)\n\n\tregion = 'Unknown region'\n\tfor line in mirrorlist.split(b'\\n'):\n\t\tif len(line.strip()) == 0:\n\t\t\tcontinue\n\n\t\tline = line.decode('UTF-8').strip('\\n').strip('\\r')\n\t\tif line[:3] == '## ':\n\t\t\tregion = line[3:]\n\t\telif line[:10] == '#Server = ':\n\t\t\tregions.setdefault(region, {})\n\n\t\t\turl = line.lstrip('#Server = ')\n\t\t\tregions[region][url] = True\n\n\treturn regions\n", "path": "archinstall/lib/mirrors.py"}]} | 2,489 | 250 |
gh_patches_debug_3331 | rasdani/github-patches | git_diff | fidals__shopelectro-885 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stale import db issue
PO says the last import was at `<yml_catalog date="2019-05-23 00:38">`
Check if import db can autolaunch.
Import db is called as catalog update command
</issue>
<code>
[start of shopelectro/management/commands/_update_catalog/update_pack.py]
1 """
2 Update Product.in_pack and prices.
3
4 The update_catalog command always resets product prices to per unit format, so:
5 1. Parse in pack quantity from Tag.name and save it to Product.in_pack
6 2. Multiply product prices by in_pack value and save.
7 """
8 import logging
9
10 from django.db import models, transaction
11
12 from shopelectro.models import TagQuerySet, TagGroup
13
14 logger = logging.getLogger(__name__)
15 PRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']
16
17
18 def update_in_packs(packs: TagQuerySet):
19 """Parse and save in pack quantity values."""
20 # @todo #859:60m Implement update_pack and render prices properly.
21
22
23 def update_prices(packs: TagQuerySet):
24 """Multiply product prices on in pack quantity."""
25 fields_to_update = {}
26 for price in PRICES:
27 fields_to_update[price] = models.F(price) * models.F('in_pack')
28
29 with transaction.atomic():
30 packs.products().update(**fields_to_update)
31
32
33 def main(*args, kwargs):
34 uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'
35 pack_group = TagGroup.objects.filter(uuid=uuid).first()
36 if not pack_group:
37 logger.error(f'Couldn\'t find "Упаковка" tag group with uuid = "{uuid}".')
38 return
39
40 return
41
42 packs = pack_group.tags.all().prefetch_related('products')
43 update_in_packs(packs)
44 update_prices(packs)
45
[end of shopelectro/management/commands/_update_catalog/update_pack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py
--- a/shopelectro/management/commands/_update_catalog/update_pack.py
+++ b/shopelectro/management/commands/_update_catalog/update_pack.py
@@ -30,7 +30,7 @@
packs.products().update(**fields_to_update)
-def main(*args, kwargs):
+def main(*args, **kwargs):
uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'
pack_group = TagGroup.objects.filter(uuid=uuid).first()
if not pack_group:
| {"golden_diff": "diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py\n--- a/shopelectro/management/commands/_update_catalog/update_pack.py\n+++ b/shopelectro/management/commands/_update_catalog/update_pack.py\n@@ -30,7 +30,7 @@\n packs.products().update(**fields_to_update)\n \n \n-def main(*args, kwargs):\n+def main(*args, **kwargs):\n uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'\n pack_group = TagGroup.objects.filter(uuid=uuid).first()\n if not pack_group:\n", "issue": "Stale import db issue\nPO says the last import was at `<yml_catalog date=\"2019-05-23 00:38\">`\r\nCheck if import db can autolaunch.\r\n\r\nImport db is called as catalog update command\r\n\n", "before_files": [{"content": "\"\"\"\nUpdate Product.in_pack and prices.\n\nThe update_catalog command always resets product prices to per unit format, so:\n1. Parse in pack quantity from Tag.name and save it to Product.in_pack\n2. Multiply product prices by in_pack value and save.\n\"\"\"\nimport logging\n\nfrom django.db import models, transaction\n\nfrom shopelectro.models import TagQuerySet, TagGroup\n\nlogger = logging.getLogger(__name__)\nPRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']\n\n\ndef update_in_packs(packs: TagQuerySet):\n \"\"\"Parse and save in pack quantity values.\"\"\"\n # @todo #859:60m Implement update_pack and render prices properly.\n\n\ndef update_prices(packs: TagQuerySet):\n \"\"\"Multiply product prices on in pack quantity.\"\"\"\n fields_to_update = {}\n for price in PRICES:\n fields_to_update[price] = models.F(price) * models.F('in_pack')\n\n with transaction.atomic():\n packs.products().update(**fields_to_update)\n\n\ndef main(*args, kwargs):\n uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'\n pack_group = TagGroup.objects.filter(uuid=uuid).first()\n if not pack_group:\n logger.error(f'Couldn\\'t find \"\u0423\u043f\u0430\u043a\u043e\u0432\u043a\u0430\" tag group with uuid = \"{uuid}\".')\n return\n\n return\n\n packs = pack_group.tags.all().prefetch_related('products')\n update_in_packs(packs)\n update_prices(packs)\n", "path": "shopelectro/management/commands/_update_catalog/update_pack.py"}]} | 1,047 | 164 |
gh_patches_debug_4833 | rasdani/github-patches | git_diff | Theano__Theano-146 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Get rid of warning when using Scipy 0.10.x
WARNING: scipy version = 0.10.0b2. We request version >=0.7.0 for the sparse code as it has bugs fixed in the sparse matrix code.
</issue>
<code>
[start of theano/sparse/__init__.py]
1 import sys
2 try:
3 import scipy
4 enable_sparse = scipy.__version__ >= '0.7'
5 if not enable_sparse:
6 sys.stderr.write("WARNING: scipy version = %s."
7 " We request version >=0.7.0 for the sparse code as it has"
8 " bugs fixed in the sparse matrix code.\n" % scipy.__version__)
9 except ImportError:
10 enable_sparse = False
11 sys.stderr.write("WARNING: scipy can't be imported."
12 " We disable the sparse matrix code.")
13
14 if enable_sparse:
15 from basic import *
16 import sharedvar
17 from sharedvar import sparse_constructor as shared
18
19
[end of theano/sparse/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/theano/sparse/__init__.py b/theano/sparse/__init__.py
--- a/theano/sparse/__init__.py
+++ b/theano/sparse/__init__.py
@@ -1,7 +1,10 @@
+from pkg_resources import parse_version as V
import sys
+
try:
import scipy
- enable_sparse = scipy.__version__ >= '0.7'
+ enable_sparse = V(scipy.__version__) >= V('0.7')
+
if not enable_sparse:
sys.stderr.write("WARNING: scipy version = %s."
" We request version >=0.7.0 for the sparse code as it has"
| {"golden_diff": "diff --git a/theano/sparse/__init__.py b/theano/sparse/__init__.py\n--- a/theano/sparse/__init__.py\n+++ b/theano/sparse/__init__.py\n@@ -1,7 +1,10 @@\n+from pkg_resources import parse_version as V\n import sys\n+\n try:\n import scipy\n- enable_sparse = scipy.__version__ >= '0.7'\n+ enable_sparse = V(scipy.__version__) >= V('0.7')\n+\n if not enable_sparse:\n sys.stderr.write(\"WARNING: scipy version = %s.\"\n \" We request version >=0.7.0 for the sparse code as it has\"\n", "issue": "Get rid of warning when using Scipy 0.10.x\nWARNING: scipy version = 0.10.0b2. We request version >=0.7.0 for the sparse code as it has bugs fixed in the sparse matrix code.\n\n", "before_files": [{"content": "import sys\ntry:\n import scipy\n enable_sparse = scipy.__version__ >= '0.7'\n if not enable_sparse:\n sys.stderr.write(\"WARNING: scipy version = %s.\"\n \" We request version >=0.7.0 for the sparse code as it has\"\n \" bugs fixed in the sparse matrix code.\\n\" % scipy.__version__)\nexcept ImportError:\n enable_sparse = False\n sys.stderr.write(\"WARNING: scipy can't be imported.\"\n \" We disable the sparse matrix code.\")\n\nif enable_sparse:\n from basic import *\n import sharedvar\n from sharedvar import sparse_constructor as shared\n\n", "path": "theano/sparse/__init__.py"}]} | 757 | 147 |
gh_patches_debug_16010 | rasdani/github-patches | git_diff | boto__botocore-15 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using botocore without config files, env vars or IAM roles
It's desirable to be able to use botocore entirely standalone - no reliance on configuration files, environment variables or IAM roles.
Currently, it's necessary to do something hacky like this:
```
session = botocore.session.get_session()
service = session.get_service('ec2')
# HACK manually set the botocore credentials object
session._credentials = botocore.credentials.Credentials(
access_key=__opts__['AWS.id'],
secret_key=__opts__['AWS.key'],
)
endpoint = service.get_endpoint(region)
```
</issue>
<code>
[start of botocore/session.py]
1 # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
2 # Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish, dis-
8 # tribute, sublicense, and/or sell copies of the Software, and to permit
9 # persons to whom the Software is furnished to do so, subject to the fol-
10 # lowing conditions:
11 #
12 # The above copyright notice and this permission notice shall be included
13 # in all copies or substantial portions of the Software.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
17 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
18 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
19 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 # IN THE SOFTWARE.
22 #
23 """
24 This module contains the main interface to the botocore package, the
25 Session object.
26 """
27
28 import logging
29 import platform
30 import os
31 import copy
32 import botocore.config
33 import botocore.credentials
34 import botocore.base
35 import botocore.service
36 from . import __version__
37
38
39 EnvironmentVariables = {
40 'profile': 'BOTO_DEFAULT_PROFILE',
41 'region': 'BOTO_DEFAULT_REGION',
42 'data_path': 'BOTO_DATA_PATH',
43 'config_file': 'AWS_CONFIG_FILE',
44 'access_key': 'AWS_ACCESS_KEY_ID',
45 'secret_key': 'AWS_SECRET_ACCESS_KEY'
46 }
47 """
48 A dictionary mapping logical names to specific environment variable
49 names. When creating a new Session object, you can pass in your own
50 dictionary to remap the logical names to the environment variable names
51 you want to use for your application. The logical variable names are:
52
53 * profile - Default profile name you want to use.
54 * region - Default region name to use, if not otherwise specified.
55 * data_path - Additional directories to search for data files.
56 * config_file - Location of a Boto config file.
57 * access_key - The AWS access key part of your credentials.
58 * secret_key - The AWS secret key part of your credentials.
59 """
60
61
62 class Session(object):
63 """
64 The Session object collects together useful functionality
65 from `botocore` as well as important data such as configuration
66 information and credentials into a single, easy-to-use object.
67
68 :ivar available_profiles: A list of profiles defined in the config
69 file associated with this session.
70 :ivar profile: The current profile.
71 """
72
73 FmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
74
75 def __init__(self, env_vars=None):
76 """
77 Create a new Session object.
78
79 :type env_vars: dict
80 :param env_vars: A dictionary that is used to override some or all
81 of the environment variables associated with this session. The
82 key/value pairs defined in this dictionary will override the
83 corresponding variables defined in `EnvironmentVariables`.
84 """
85 self.env_vars = copy.copy(EnvironmentVariables)
86 if env_vars:
87 self.env_vars.update(env_vars)
88 self.user_agent_name = 'Boto'
89 self.user_agent_version = __version__
90 self._profile = os.environ.get(self.env_vars['profile'], 'default')
91 self._config = None
92 self._credentials = None
93
94 @property
95 def available_profiles(self):
96 profiles = []
97 self.get_config()
98 for key in self._config.keys():
99 if not key.startswith('_'):
100 profiles.append(key)
101 return profiles
102
103 @property
104 def profile(self):
105 return self._profile
106
107 @profile.setter
108 def profile(self, profile):
109 if profile != self._profile:
110 self._credentials = None
111 self._profile = profile
112
113 def get_envvar(self, logical_name):
114 """
115 Retrieve the value associated with the specified logical_name
116 from the environment.
117
118 :type logical_name: str
119 :param logical_name: The logical name of the environment variable
120 you want to retrieve. This name will be mapped to the
121 appropriate environment variable name for this session.
122
123 * profile - Default profile name you want to use.
124 * region - Default region name to use, if not otherwise specified.
125 * data_path - Additional directories to search for data files.
126 * config_file - Location of a Boto config file.
127 * access_key - The AWS access key part of your credentials.
128 * secret_key - The AWS secret key part of your credentials.
129
130 :returns: str value of variable of None if not defined.
131 """
132 value = None
133 if logical_name in self.env_vars:
134 value = os.environ.get(self.env_vars[logical_name], None)
135 return value
136
137 def get_config(self):
138 """
139 Returns the configuration associated with this session. If
140 the configuration has not yet been loaded, it will be loaded
141 using the current `profile` attribute value. If it has already been
142 loaded, the cached configuration will be returned.
143
144 :raises: ConfigNotFound, ConfigParseError
145 """
146 if self._config is None:
147 self._config = botocore.config.get_config(self)
148 return self._config.get(self._profile, None)
149
150 def get_credentials(self, metadata=None):
151 """
152 Return the :class:`botocore.credential.Credential` object
153 associated with this session. If the credentials have not
154 yet been loaded, this will attempt to load them. If they
155 have already been loaded, this will return the cached
156 credentials.
157
158 :type metadata: dict
159 :param metadata: This parameter allows you to pass in
160 EC2 instance metadata containing IAM Role credentials.
161 This metadata will be used rather than retrieving the
162 metadata from the metadata service. This is mainly used
163 for unit testing.
164 """
165 if self._credentials is None:
166 cfg = self.get_config()
167 self._credentials = botocore.credentials.get_credentials(cfg,
168 metadata)
169 return self._credentials
170
171 def user_agent(self):
172 """
173 Return a string suitable for use as a User-Agent header.
174 The string will be of the form:
175
176 <agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver>
177
178 Where:
179
180 - agent_name is the value of the `user_agent_name` attribute
181 of the session object (`Boto` by default).
182 - agent_version is the value of the `user_agent_version`
183 attribute of the session object (the botocore version by default).
184 by default.
185 - py_ver is the version of the Python interpreter beng used.
186 - plat_name is the name of the platform (e.g. Darwin)
187 - plat_ver is the version of the platform
188
189 """
190 return '%s/%s Python/%s %s/%s' % (self.user_agent_name,
191 self.user_agent_version,
192 platform.python_version(),
193 platform.system(),
194 platform.release())
195
196 def get_data(self, data_path):
197 """
198 Retrieve the data associated with `data_path`.
199
200 :type data_path: str
201 :param data_path: The path to the data you wish to retrieve.
202 """
203 return botocore.base.get_data(self, data_path)
204
205 def get_service_data(self, service_name, provider_name='aws'):
206 """
207 Retrieve the fully merged data associated with a service.
208 """
209 return botocore.base.get_service_data(self, service_name,
210 provider_name)
211
212 def get_service(self, service_name, provider_name='aws'):
213 """
214 Get information about a service.
215
216 :type service_name: str
217 :param service_name: The name of the service (e.g. 'ec2')
218
219 :type provider_name: str
220 :param provider_name: The name of the provider. Defaults
221 to 'aws'.
222
223 :returns: :class:`botocore.service.Service`
224 """
225 return botocore.service.get_service(self, service_name, provider_name)
226
227 def set_debug_logger(self):
228 """
229 Convenience function to quickly configure full debug output
230 to go to the console.
231 """
232 log = logging.getLogger('botocore')
233 log.setLevel(logging.DEBUG)
234
235 # create console handler and set level to debug
236 ch = logging.StreamHandler()
237 ch.setLevel(logging.DEBUG)
238
239 # create formatter
240 formatter = logging.Formatter(self.FmtString)
241
242 # add formatter to ch
243 ch.setFormatter(formatter)
244
245 # add ch to logger
246 log.addHandler(ch)
247
248 def set_file_logger(self, log_level, path):
249 """
250 Convenience function to quickly configure any level of logging
251 to a file.
252
253 :type log_level: int
254 :param log_level: A log level as specified in the `logging` module
255
256 :type path: string
257 :param path: Path to the log file. The file will be created
258 if it doesn't already exist.
259 """
260 log = logging.getLogger('botocore')
261 log.setLevel(log_level)
262
263 # create console handler and set level to debug
264 ch = logging.FileHandler(path)
265 ch.setLevel(log_level)
266
267 # create formatter
268 formatter = logging.Formatter(self.FmtString)
269
270 # add formatter to ch
271 ch.setFormatter(formatter)
272
273 # add ch to logger
274 log.addHandler(ch)
275
276
277 def get_session(env_vars=None):
278 """
279 Return a new session object.
280 """
281 return Session(env_vars)
282
[end of botocore/session.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/botocore/session.py b/botocore/session.py
--- a/botocore/session.py
+++ b/botocore/session.py
@@ -147,6 +147,20 @@
self._config = botocore.config.get_config(self)
return self._config.get(self._profile, None)
+ def set_credentials(self, access_key, secret_key):
+ """
+ Create the :class:`botocore.credential.Credential` object
+ associated with this session using the supplied AccessKey and SecretKey
+
+ :type access_key: str
+ :param access_key: The access key part of the credentials.
+
+ :type secret_key: str
+ :param secret_key: The secret key part of the credentials.
+ """
+ self._credentials = botocore.credentials.Credentials(access_key,
+ secret_key)
+
def get_credentials(self, metadata=None):
"""
Return the :class:`botocore.credential.Credential` object
| {"golden_diff": "diff --git a/botocore/session.py b/botocore/session.py\n--- a/botocore/session.py\n+++ b/botocore/session.py\n@@ -147,6 +147,20 @@\n self._config = botocore.config.get_config(self)\n return self._config.get(self._profile, None)\n \n+ def set_credentials(self, access_key, secret_key):\n+ \"\"\"\n+ Create the :class:`botocore.credential.Credential` object\n+ associated with this session using the supplied AccessKey and SecretKey\n+\n+ :type access_key: str\n+ :param access_key: The access key part of the credentials.\n+\n+ :type secret_key: str\n+ :param secret_key: The secret key part of the credentials.\n+ \"\"\"\n+ self._credentials = botocore.credentials.Credentials(access_key,\n+ secret_key)\n+\n def get_credentials(self, metadata=None):\n \"\"\"\n Return the :class:`botocore.credential.Credential` object\n", "issue": "Using botocore without config files, env vars or IAM roles\nIt's desirable to be able to use botocore entirely standalone - no reliance on configuration files, environment variables or IAM roles.\n\nCurrently, it's necessary to do something hacky like this:\n\n```\nsession = botocore.session.get_session()\nservice = session.get_service('ec2')\n\n# HACK manually set the botocore credentials object\nsession._credentials = botocore.credentials.Credentials(\n access_key=__opts__['AWS.id'],\n secret_key=__opts__['AWS.key'],\n )\n\nendpoint = service.get_endpoint(region)\n```\n\n", "before_files": [{"content": "# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/\n# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\n\"\"\"\nThis module contains the main interface to the botocore package, the\nSession object.\n\"\"\"\n\nimport logging\nimport platform\nimport os\nimport copy\nimport botocore.config\nimport botocore.credentials\nimport botocore.base\nimport botocore.service\nfrom . import __version__\n\n\nEnvironmentVariables = {\n 'profile': 'BOTO_DEFAULT_PROFILE',\n 'region': 'BOTO_DEFAULT_REGION',\n 'data_path': 'BOTO_DATA_PATH',\n 'config_file': 'AWS_CONFIG_FILE',\n 'access_key': 'AWS_ACCESS_KEY_ID',\n 'secret_key': 'AWS_SECRET_ACCESS_KEY'\n }\n\"\"\"\nA dictionary mapping logical names to specific environment variable\nnames. When creating a new Session object, you can pass in your own\ndictionary to remap the logical names to the environment variable names\nyou want to use for your application. The logical variable names are:\n\n* profile - Default profile name you want to use.\n* region - Default region name to use, if not otherwise specified.\n* data_path - Additional directories to search for data files.\n* config_file - Location of a Boto config file.\n* access_key - The AWS access key part of your credentials.\n* secret_key - The AWS secret key part of your credentials.\n\"\"\"\n\n\nclass Session(object):\n \"\"\"\n The Session object collects together useful functionality\n from `botocore` as well as important data such as configuration\n information and credentials into a single, easy-to-use object.\n\n :ivar available_profiles: A list of profiles defined in the config\n file associated with this session.\n :ivar profile: The current profile.\n \"\"\"\n\n FmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n\n def __init__(self, env_vars=None):\n \"\"\"\n Create a new Session object.\n\n :type env_vars: dict\n :param env_vars: A dictionary that is used to override some or all\n of the environment variables associated with this session. The\n key/value pairs defined in this dictionary will override the\n corresponding variables defined in `EnvironmentVariables`.\n \"\"\"\n self.env_vars = copy.copy(EnvironmentVariables)\n if env_vars:\n self.env_vars.update(env_vars)\n self.user_agent_name = 'Boto'\n self.user_agent_version = __version__\n self._profile = os.environ.get(self.env_vars['profile'], 'default')\n self._config = None\n self._credentials = None\n\n @property\n def available_profiles(self):\n profiles = []\n self.get_config()\n for key in self._config.keys():\n if not key.startswith('_'):\n profiles.append(key)\n return profiles\n\n @property\n def profile(self):\n return self._profile\n\n @profile.setter\n def profile(self, profile):\n if profile != self._profile:\n self._credentials = None\n self._profile = profile\n\n def get_envvar(self, logical_name):\n \"\"\"\n Retrieve the value associated with the specified logical_name\n from the environment.\n\n :type logical_name: str\n :param logical_name: The logical name of the environment variable\n you want to retrieve. This name will be mapped to the\n appropriate environment variable name for this session.\n\n * profile - Default profile name you want to use.\n * region - Default region name to use, if not otherwise specified.\n * data_path - Additional directories to search for data files.\n * config_file - Location of a Boto config file.\n * access_key - The AWS access key part of your credentials.\n * secret_key - The AWS secret key part of your credentials.\n\n :returns: str value of variable of None if not defined.\n \"\"\"\n value = None\n if logical_name in self.env_vars:\n value = os.environ.get(self.env_vars[logical_name], None)\n return value\n\n def get_config(self):\n \"\"\"\n Returns the configuration associated with this session. If\n the configuration has not yet been loaded, it will be loaded\n using the current `profile` attribute value. If it has already been\n loaded, the cached configuration will be returned.\n\n :raises: ConfigNotFound, ConfigParseError\n \"\"\"\n if self._config is None:\n self._config = botocore.config.get_config(self)\n return self._config.get(self._profile, None)\n\n def get_credentials(self, metadata=None):\n \"\"\"\n Return the :class:`botocore.credential.Credential` object\n associated with this session. If the credentials have not\n yet been loaded, this will attempt to load them. If they\n have already been loaded, this will return the cached\n credentials.\n\n :type metadata: dict\n :param metadata: This parameter allows you to pass in\n EC2 instance metadata containing IAM Role credentials.\n This metadata will be used rather than retrieving the\n metadata from the metadata service. This is mainly used\n for unit testing.\n \"\"\"\n if self._credentials is None:\n cfg = self.get_config()\n self._credentials = botocore.credentials.get_credentials(cfg,\n metadata)\n return self._credentials\n\n def user_agent(self):\n \"\"\"\n Return a string suitable for use as a User-Agent header.\n The string will be of the form:\n\n <agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver>\n\n Where:\n\n - agent_name is the value of the `user_agent_name` attribute\n of the session object (`Boto` by default).\n - agent_version is the value of the `user_agent_version`\n attribute of the session object (the botocore version by default).\n by default.\n - py_ver is the version of the Python interpreter beng used.\n - plat_name is the name of the platform (e.g. Darwin)\n - plat_ver is the version of the platform\n\n \"\"\"\n return '%s/%s Python/%s %s/%s' % (self.user_agent_name,\n self.user_agent_version,\n platform.python_version(),\n platform.system(),\n platform.release())\n\n def get_data(self, data_path):\n \"\"\"\n Retrieve the data associated with `data_path`.\n\n :type data_path: str\n :param data_path: The path to the data you wish to retrieve.\n \"\"\"\n return botocore.base.get_data(self, data_path)\n\n def get_service_data(self, service_name, provider_name='aws'):\n \"\"\"\n Retrieve the fully merged data associated with a service.\n \"\"\"\n return botocore.base.get_service_data(self, service_name,\n provider_name)\n\n def get_service(self, service_name, provider_name='aws'):\n \"\"\"\n Get information about a service.\n\n :type service_name: str\n :param service_name: The name of the service (e.g. 'ec2')\n\n :type provider_name: str\n :param provider_name: The name of the provider. Defaults\n to 'aws'.\n\n :returns: :class:`botocore.service.Service`\n \"\"\"\n return botocore.service.get_service(self, service_name, provider_name)\n\n def set_debug_logger(self):\n \"\"\"\n Convenience function to quickly configure full debug output\n to go to the console.\n \"\"\"\n log = logging.getLogger('botocore')\n log.setLevel(logging.DEBUG)\n\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # create formatter\n formatter = logging.Formatter(self.FmtString)\n\n # add formatter to ch\n ch.setFormatter(formatter)\n\n # add ch to logger\n log.addHandler(ch)\n\n def set_file_logger(self, log_level, path):\n \"\"\"\n Convenience function to quickly configure any level of logging\n to a file.\n\n :type log_level: int\n :param log_level: A log level as specified in the `logging` module\n\n :type path: string\n :param path: Path to the log file. The file will be created\n if it doesn't already exist.\n \"\"\"\n log = logging.getLogger('botocore')\n log.setLevel(log_level)\n\n # create console handler and set level to debug\n ch = logging.FileHandler(path)\n ch.setLevel(log_level)\n\n # create formatter\n formatter = logging.Formatter(self.FmtString)\n\n # add formatter to ch\n ch.setFormatter(formatter)\n\n # add ch to logger\n log.addHandler(ch)\n\n\ndef get_session(env_vars=None):\n \"\"\"\n Return a new session object.\n \"\"\"\n return Session(env_vars)\n", "path": "botocore/session.py"}]} | 3,523 | 226 |
gh_patches_debug_1163 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-2712 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Document that RTD uses `rel` branch for production
Hi, i'd like to add a new builder for doxygen documentation (but native, not with breath). Since there are a lot of branches like real/relcorp which a far ahead of master, i'd like to know, which branch to choose for development.
Thanks in advance!
Oli
</issue>
<code>
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 import os
4 import sys
5
6 from recommonmark.parser import CommonMarkParser
7
8 sys.path.insert(0, os.path.abspath('..'))
9 sys.path.append(os.path.dirname(__file__))
10 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "readthedocs.settings.dev")
11
12 from django.conf import settings
13
14 import django
15 django.setup()
16
17
18 sys.path.append(os.path.abspath('_ext'))
19 extensions = [
20 'sphinx.ext.autodoc',
21 'sphinx.ext.intersphinx',
22 'sphinxcontrib.httpdomain',
23 'djangodocs',
24 'doc_extensions',
25 ]
26 templates_path = ['_templates']
27
28 source_suffix = ['.rst', '.md']
29 source_parsers = {
30 '.md': CommonMarkParser,
31 }
32
33 master_doc = 'index'
34 project = u'Read The Docs'
35 copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'
36 version = '1.0'
37 release = '1.0'
38 exclude_patterns = ['_build']
39 default_role = 'obj'
40 pygments_style = 'sphinx'
41 intersphinx_mapping = {
42 'python': ('http://python.readthedocs.io/en/latest/', None),
43 'django': ('http://django.readthedocs.io/en/1.8.x/', None),
44 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),
45 }
46 # This doesn't exist since we aren't shipping any static files ourselves.
47 #html_static_path = ['_static']
48 htmlhelp_basename = 'ReadTheDocsdoc'
49 latex_documents = [
50 ('index', 'ReadTheDocs.tex', u'Read The Docs Documentation',
51 u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),
52 ]
53 man_pages = [
54 ('index', 'read-the-docs', u'Read The Docs Documentation',
55 [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)
56 ]
57
58 exclude_patterns = [
59 # 'api' # needed for ``make gettext`` to not die.
60 ]
61
62 language = 'en'
63
64 locale_dirs = [
65 'locale/',
66 ]
67 gettext_compact = False
68
69
70 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
71 if not on_rtd: # only import and set the theme if we're building docs locally
72 import sphinx_rtd_theme
73 html_theme = 'sphinx_rtd_theme'
74 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
75
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -32,7 +32,7 @@
master_doc = 'index'
project = u'Read The Docs'
-copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'
+copyright = u'2010-2017, Read the Docs, Inc & contributors'
version = '1.0'
release = '1.0'
exclude_patterns = ['_build']
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -32,7 +32,7 @@\n \n master_doc = 'index'\n project = u'Read The Docs'\n-copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'\n+copyright = u'2010-2017, Read the Docs, Inc & contributors'\n version = '1.0'\n release = '1.0'\n exclude_patterns = ['_build']\n", "issue": "Document that RTD uses `rel` branch for production\nHi, i'd like to add a new builder for doxygen documentation (but native, not with breath). Since there are a lot of branches like real/relcorp which a far ahead of master, i'd like to know, which branch to choose for development.\r\n\r\nThanks in advance!\r\nOli\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\nimport os\nimport sys\n\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read The Docs'\ncopyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'\nversion = '1.0'\nrelease = '1.0'\nexclude_patterns = ['_build']\ndefault_role = 'obj'\npygments_style = 'sphinx'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.8.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\n# This doesn't exist since we aren't shipping any static files ourselves.\n#html_static_path = ['_static']\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read The Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read The Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "path": "docs/conf.py"}]} | 1,279 | 117 |
gh_patches_debug_8285 | rasdani/github-patches | git_diff | apache__airflow-35167 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dry_run not optional in api for set task instances state
### Apache Airflow version
2.7.2
### What happened
Sent request without dry_run parameter to dags/{dag_id}/updateTaskInstancesState and got a 500 error.
### What you think should happen instead
I should be able to send a request to update task instances state and get a valid response.
### How to reproduce
You can see this by commenting out line 215 in tests/api_connexion/schemas/test_task_instance_schema.py and running tests. This is a similar error to #34563.
### Operating System
Ubuntu 20.04
### Versions of Apache Airflow Providers
_No response_
### Deployment
Docker-Compose
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of airflow/api_connexion/schemas/task_instance_schema.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 from __future__ import annotations
18
19 from typing import TYPE_CHECKING, NamedTuple
20
21 from marshmallow import Schema, ValidationError, fields, validate, validates_schema
22 from marshmallow.utils import get_value
23 from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
24
25 from airflow.api_connexion.parameters import validate_istimezone
26 from airflow.api_connexion.schemas.common_schema import JsonObjectField
27 from airflow.api_connexion.schemas.enum_schemas import TaskInstanceStateField
28 from airflow.api_connexion.schemas.job_schema import JobSchema
29 from airflow.api_connexion.schemas.sla_miss_schema import SlaMissSchema
30 from airflow.api_connexion.schemas.trigger_schema import TriggerSchema
31 from airflow.models import TaskInstance
32 from airflow.utils.helpers import exactly_one
33 from airflow.utils.state import TaskInstanceState
34
35 if TYPE_CHECKING:
36 from airflow.models import SlaMiss
37
38
39 class TaskInstanceSchema(SQLAlchemySchema):
40 """Task instance schema."""
41
42 class Meta:
43 """Meta."""
44
45 model = TaskInstance
46
47 task_id = auto_field()
48 dag_id = auto_field()
49 run_id = auto_field(data_key="dag_run_id")
50 map_index = auto_field()
51 execution_date = auto_field()
52 start_date = auto_field()
53 end_date = auto_field()
54 duration = auto_field()
55 state = TaskInstanceStateField()
56 _try_number = auto_field(data_key="try_number")
57 max_tries = auto_field()
58 hostname = auto_field()
59 unixname = auto_field()
60 pool = auto_field()
61 pool_slots = auto_field()
62 queue = auto_field()
63 priority_weight = auto_field()
64 operator = auto_field()
65 queued_dttm = auto_field(data_key="queued_when")
66 pid = auto_field()
67 executor_config = auto_field()
68 note = auto_field()
69 sla_miss = fields.Nested(SlaMissSchema, dump_default=None)
70 rendered_fields = JsonObjectField(dump_default={})
71 trigger = fields.Nested(TriggerSchema)
72 triggerer_job = fields.Nested(JobSchema)
73
74 def get_attribute(self, obj, attr, default):
75 if attr == "sla_miss":
76 # Object is a tuple of task_instance and slamiss
77 # and the get_value expects a dict with key, value
78 # corresponding to the attr.
79 slamiss_instance = {"sla_miss": obj[1]}
80 return get_value(slamiss_instance, attr, default)
81 elif attr == "rendered_fields":
82 return get_value(obj[0], "rendered_task_instance_fields.rendered_fields", default)
83 return get_value(obj[0], attr, default)
84
85
86 class TaskInstanceCollection(NamedTuple):
87 """List of task instances with metadata."""
88
89 task_instances: list[tuple[TaskInstance, SlaMiss | None]]
90 total_entries: int
91
92
93 class TaskInstanceCollectionSchema(Schema):
94 """Task instance collection schema."""
95
96 task_instances = fields.List(fields.Nested(TaskInstanceSchema))
97 total_entries = fields.Int()
98
99
100 class TaskInstanceBatchFormSchema(Schema):
101 """Schema for the request form passed to Task Instance Batch endpoint."""
102
103 page_offset = fields.Int(load_default=0, validate=validate.Range(min=0))
104 page_limit = fields.Int(load_default=100, validate=validate.Range(min=1))
105 dag_ids = fields.List(fields.Str(), load_default=None)
106 dag_run_ids = fields.List(fields.Str(), load_default=None)
107 task_ids = fields.List(fields.Str(), load_default=None)
108 execution_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)
109 execution_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)
110 start_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)
111 start_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)
112 end_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)
113 end_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)
114 duration_gte = fields.Int(load_default=None)
115 duration_lte = fields.Int(load_default=None)
116 state = fields.List(fields.Str(allow_none=True), load_default=None)
117 pool = fields.List(fields.Str(), load_default=None)
118 queue = fields.List(fields.Str(), load_default=None)
119
120
121 class ClearTaskInstanceFormSchema(Schema):
122 """Schema for handling the request of clearing task instance of a Dag."""
123
124 dry_run = fields.Boolean(load_default=True)
125 start_date = fields.DateTime(load_default=None, validate=validate_istimezone)
126 end_date = fields.DateTime(load_default=None, validate=validate_istimezone)
127 only_failed = fields.Boolean(load_default=True)
128 only_running = fields.Boolean(load_default=False)
129 include_subdags = fields.Boolean(load_default=False)
130 include_parentdag = fields.Boolean(load_default=False)
131 reset_dag_runs = fields.Boolean(load_default=False)
132 task_ids = fields.List(fields.String(), validate=validate.Length(min=1))
133 dag_run_id = fields.Str(load_default=None)
134 include_upstream = fields.Boolean(load_default=False)
135 include_downstream = fields.Boolean(load_default=False)
136 include_future = fields.Boolean(load_default=False)
137 include_past = fields.Boolean(load_default=False)
138
139 @validates_schema
140 def validate_form(self, data, **kwargs):
141 """Validate clear task instance form."""
142 if data["only_failed"] and data["only_running"]:
143 raise ValidationError("only_failed and only_running both are set to True")
144 if data["start_date"] and data["end_date"]:
145 if data["start_date"] > data["end_date"]:
146 raise ValidationError("end_date is sooner than start_date")
147 if data["start_date"] and data["end_date"] and data["dag_run_id"]:
148 raise ValidationError("Exactly one of dag_run_id or (start_date and end_date) must be provided")
149 if data["start_date"] and data["dag_run_id"]:
150 raise ValidationError("Exactly one of dag_run_id or start_date must be provided")
151 if data["end_date"] and data["dag_run_id"]:
152 raise ValidationError("Exactly one of dag_run_id or end_date must be provided")
153
154
155 class SetTaskInstanceStateFormSchema(Schema):
156 """Schema for handling the request of setting state of task instance of a DAG."""
157
158 dry_run = fields.Boolean(dump_default=True)
159 task_id = fields.Str(required=True)
160 execution_date = fields.DateTime(validate=validate_istimezone)
161 dag_run_id = fields.Str()
162 include_upstream = fields.Boolean(required=True)
163 include_downstream = fields.Boolean(required=True)
164 include_future = fields.Boolean(required=True)
165 include_past = fields.Boolean(required=True)
166 new_state = TaskInstanceStateField(
167 required=True,
168 validate=validate.OneOf(
169 [TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED]
170 ),
171 )
172
173 @validates_schema
174 def validate_form(self, data, **kwargs):
175 """Validate set task instance state form."""
176 if not exactly_one(data.get("execution_date"), data.get("dag_run_id")):
177 raise ValidationError("Exactly one of execution_date or dag_run_id must be provided")
178
179
180 class SetSingleTaskInstanceStateFormSchema(Schema):
181 """Schema for handling the request of updating state of a single task instance."""
182
183 dry_run = fields.Boolean(load_default=True)
184 new_state = TaskInstanceStateField(
185 required=True,
186 validate=validate.OneOf(
187 [TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED]
188 ),
189 )
190
191
192 class TaskInstanceReferenceSchema(Schema):
193 """Schema for the task instance reference schema."""
194
195 task_id = fields.Str()
196 run_id = fields.Str(data_key="dag_run_id")
197 dag_id = fields.Str()
198 execution_date = fields.DateTime()
199
200
201 class TaskInstanceReferenceCollection(NamedTuple):
202 """List of objects with metadata about taskinstance and dag_run_id."""
203
204 task_instances: list[tuple[TaskInstance, str]]
205
206
207 class TaskInstanceReferenceCollectionSchema(Schema):
208 """Collection schema for task reference."""
209
210 task_instances = fields.List(fields.Nested(TaskInstanceReferenceSchema))
211
212
213 class SetTaskInstanceNoteFormSchema(Schema):
214 """Schema for settings a note for a TaskInstance."""
215
216 # Note: We can't add map_index to the url as subpaths can't start with dashes.
217 map_index = fields.Int(allow_none=False)
218 note = fields.String(allow_none=True, validate=validate.Length(max=1000))
219
220
221 task_instance_schema = TaskInstanceSchema()
222 task_instance_collection_schema = TaskInstanceCollectionSchema()
223 task_instance_batch_form = TaskInstanceBatchFormSchema()
224 clear_task_instance_form = ClearTaskInstanceFormSchema()
225 set_task_instance_state_form = SetTaskInstanceStateFormSchema()
226 set_single_task_instance_state_form = SetSingleTaskInstanceStateFormSchema()
227 task_instance_reference_schema = TaskInstanceReferenceSchema()
228 task_instance_reference_collection_schema = TaskInstanceReferenceCollectionSchema()
229 set_task_instance_note_form_schema = SetTaskInstanceNoteFormSchema()
230
[end of airflow/api_connexion/schemas/task_instance_schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/api_connexion/schemas/task_instance_schema.py b/airflow/api_connexion/schemas/task_instance_schema.py
--- a/airflow/api_connexion/schemas/task_instance_schema.py
+++ b/airflow/api_connexion/schemas/task_instance_schema.py
@@ -155,7 +155,7 @@
class SetTaskInstanceStateFormSchema(Schema):
"""Schema for handling the request of setting state of task instance of a DAG."""
- dry_run = fields.Boolean(dump_default=True)
+ dry_run = fields.Boolean(load_default=True)
task_id = fields.Str(required=True)
execution_date = fields.DateTime(validate=validate_istimezone)
dag_run_id = fields.Str()
| {"golden_diff": "diff --git a/airflow/api_connexion/schemas/task_instance_schema.py b/airflow/api_connexion/schemas/task_instance_schema.py\n--- a/airflow/api_connexion/schemas/task_instance_schema.py\n+++ b/airflow/api_connexion/schemas/task_instance_schema.py\n@@ -155,7 +155,7 @@\n class SetTaskInstanceStateFormSchema(Schema):\n \"\"\"Schema for handling the request of setting state of task instance of a DAG.\"\"\"\n \n- dry_run = fields.Boolean(dump_default=True)\n+ dry_run = fields.Boolean(load_default=True)\n task_id = fields.Str(required=True)\n execution_date = fields.DateTime(validate=validate_istimezone)\n dag_run_id = fields.Str()\n", "issue": "dry_run not optional in api for set task instances state\n### Apache Airflow version\n\n2.7.2\n\n### What happened\n\nSent request without dry_run parameter to dags/{dag_id}/updateTaskInstancesState and got a 500 error.\n\n### What you think should happen instead\n\nI should be able to send a request to update task instances state and get a valid response.\n\n### How to reproduce\n\nYou can see this by commenting out line 215 in tests/api_connexion/schemas/test_task_instance_schema.py and running tests. This is a similar error to #34563.\n\n### Operating System\n\nUbuntu 20.04\n\n### Versions of Apache Airflow Providers\n\n_No response_\n\n### Deployment\n\nDocker-Compose\n\n### Deployment details\n\n_No response_\n\n### Anything else\n\n_No response_\n\n### Are you willing to submit PR?\n\n- [X] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, NamedTuple\n\nfrom marshmallow import Schema, ValidationError, fields, validate, validates_schema\nfrom marshmallow.utils import get_value\nfrom marshmallow_sqlalchemy import SQLAlchemySchema, auto_field\n\nfrom airflow.api_connexion.parameters import validate_istimezone\nfrom airflow.api_connexion.schemas.common_schema import JsonObjectField\nfrom airflow.api_connexion.schemas.enum_schemas import TaskInstanceStateField\nfrom airflow.api_connexion.schemas.job_schema import JobSchema\nfrom airflow.api_connexion.schemas.sla_miss_schema import SlaMissSchema\nfrom airflow.api_connexion.schemas.trigger_schema import TriggerSchema\nfrom airflow.models import TaskInstance\nfrom airflow.utils.helpers import exactly_one\nfrom airflow.utils.state import TaskInstanceState\n\nif TYPE_CHECKING:\n from airflow.models import SlaMiss\n\n\nclass TaskInstanceSchema(SQLAlchemySchema):\n \"\"\"Task instance schema.\"\"\"\n\n class Meta:\n \"\"\"Meta.\"\"\"\n\n model = TaskInstance\n\n task_id = auto_field()\n dag_id = auto_field()\n run_id = auto_field(data_key=\"dag_run_id\")\n map_index = auto_field()\n execution_date = auto_field()\n start_date = auto_field()\n end_date = auto_field()\n duration = auto_field()\n state = TaskInstanceStateField()\n _try_number = auto_field(data_key=\"try_number\")\n max_tries = auto_field()\n hostname = auto_field()\n unixname = auto_field()\n pool = auto_field()\n pool_slots = auto_field()\n queue = auto_field()\n priority_weight = auto_field()\n operator = auto_field()\n queued_dttm = auto_field(data_key=\"queued_when\")\n pid = auto_field()\n executor_config = auto_field()\n note = auto_field()\n sla_miss = fields.Nested(SlaMissSchema, dump_default=None)\n rendered_fields = JsonObjectField(dump_default={})\n trigger = fields.Nested(TriggerSchema)\n triggerer_job = fields.Nested(JobSchema)\n\n def get_attribute(self, obj, attr, default):\n if attr == \"sla_miss\":\n # Object is a tuple of task_instance and slamiss\n # and the get_value expects a dict with key, value\n # corresponding to the attr.\n slamiss_instance = {\"sla_miss\": obj[1]}\n return get_value(slamiss_instance, attr, default)\n elif attr == \"rendered_fields\":\n return get_value(obj[0], \"rendered_task_instance_fields.rendered_fields\", default)\n return get_value(obj[0], attr, default)\n\n\nclass TaskInstanceCollection(NamedTuple):\n \"\"\"List of task instances with metadata.\"\"\"\n\n task_instances: list[tuple[TaskInstance, SlaMiss | None]]\n total_entries: int\n\n\nclass TaskInstanceCollectionSchema(Schema):\n \"\"\"Task instance collection schema.\"\"\"\n\n task_instances = fields.List(fields.Nested(TaskInstanceSchema))\n total_entries = fields.Int()\n\n\nclass TaskInstanceBatchFormSchema(Schema):\n \"\"\"Schema for the request form passed to Task Instance Batch endpoint.\"\"\"\n\n page_offset = fields.Int(load_default=0, validate=validate.Range(min=0))\n page_limit = fields.Int(load_default=100, validate=validate.Range(min=1))\n dag_ids = fields.List(fields.Str(), load_default=None)\n dag_run_ids = fields.List(fields.Str(), load_default=None)\n task_ids = fields.List(fields.Str(), load_default=None)\n execution_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)\n execution_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)\n start_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)\n start_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)\n end_date_gte = fields.DateTime(load_default=None, validate=validate_istimezone)\n end_date_lte = fields.DateTime(load_default=None, validate=validate_istimezone)\n duration_gte = fields.Int(load_default=None)\n duration_lte = fields.Int(load_default=None)\n state = fields.List(fields.Str(allow_none=True), load_default=None)\n pool = fields.List(fields.Str(), load_default=None)\n queue = fields.List(fields.Str(), load_default=None)\n\n\nclass ClearTaskInstanceFormSchema(Schema):\n \"\"\"Schema for handling the request of clearing task instance of a Dag.\"\"\"\n\n dry_run = fields.Boolean(load_default=True)\n start_date = fields.DateTime(load_default=None, validate=validate_istimezone)\n end_date = fields.DateTime(load_default=None, validate=validate_istimezone)\n only_failed = fields.Boolean(load_default=True)\n only_running = fields.Boolean(load_default=False)\n include_subdags = fields.Boolean(load_default=False)\n include_parentdag = fields.Boolean(load_default=False)\n reset_dag_runs = fields.Boolean(load_default=False)\n task_ids = fields.List(fields.String(), validate=validate.Length(min=1))\n dag_run_id = fields.Str(load_default=None)\n include_upstream = fields.Boolean(load_default=False)\n include_downstream = fields.Boolean(load_default=False)\n include_future = fields.Boolean(load_default=False)\n include_past = fields.Boolean(load_default=False)\n\n @validates_schema\n def validate_form(self, data, **kwargs):\n \"\"\"Validate clear task instance form.\"\"\"\n if data[\"only_failed\"] and data[\"only_running\"]:\n raise ValidationError(\"only_failed and only_running both are set to True\")\n if data[\"start_date\"] and data[\"end_date\"]:\n if data[\"start_date\"] > data[\"end_date\"]:\n raise ValidationError(\"end_date is sooner than start_date\")\n if data[\"start_date\"] and data[\"end_date\"] and data[\"dag_run_id\"]:\n raise ValidationError(\"Exactly one of dag_run_id or (start_date and end_date) must be provided\")\n if data[\"start_date\"] and data[\"dag_run_id\"]:\n raise ValidationError(\"Exactly one of dag_run_id or start_date must be provided\")\n if data[\"end_date\"] and data[\"dag_run_id\"]:\n raise ValidationError(\"Exactly one of dag_run_id or end_date must be provided\")\n\n\nclass SetTaskInstanceStateFormSchema(Schema):\n \"\"\"Schema for handling the request of setting state of task instance of a DAG.\"\"\"\n\n dry_run = fields.Boolean(dump_default=True)\n task_id = fields.Str(required=True)\n execution_date = fields.DateTime(validate=validate_istimezone)\n dag_run_id = fields.Str()\n include_upstream = fields.Boolean(required=True)\n include_downstream = fields.Boolean(required=True)\n include_future = fields.Boolean(required=True)\n include_past = fields.Boolean(required=True)\n new_state = TaskInstanceStateField(\n required=True,\n validate=validate.OneOf(\n [TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED]\n ),\n )\n\n @validates_schema\n def validate_form(self, data, **kwargs):\n \"\"\"Validate set task instance state form.\"\"\"\n if not exactly_one(data.get(\"execution_date\"), data.get(\"dag_run_id\")):\n raise ValidationError(\"Exactly one of execution_date or dag_run_id must be provided\")\n\n\nclass SetSingleTaskInstanceStateFormSchema(Schema):\n \"\"\"Schema for handling the request of updating state of a single task instance.\"\"\"\n\n dry_run = fields.Boolean(load_default=True)\n new_state = TaskInstanceStateField(\n required=True,\n validate=validate.OneOf(\n [TaskInstanceState.SUCCESS, TaskInstanceState.FAILED, TaskInstanceState.SKIPPED]\n ),\n )\n\n\nclass TaskInstanceReferenceSchema(Schema):\n \"\"\"Schema for the task instance reference schema.\"\"\"\n\n task_id = fields.Str()\n run_id = fields.Str(data_key=\"dag_run_id\")\n dag_id = fields.Str()\n execution_date = fields.DateTime()\n\n\nclass TaskInstanceReferenceCollection(NamedTuple):\n \"\"\"List of objects with metadata about taskinstance and dag_run_id.\"\"\"\n\n task_instances: list[tuple[TaskInstance, str]]\n\n\nclass TaskInstanceReferenceCollectionSchema(Schema):\n \"\"\"Collection schema for task reference.\"\"\"\n\n task_instances = fields.List(fields.Nested(TaskInstanceReferenceSchema))\n\n\nclass SetTaskInstanceNoteFormSchema(Schema):\n \"\"\"Schema for settings a note for a TaskInstance.\"\"\"\n\n # Note: We can't add map_index to the url as subpaths can't start with dashes.\n map_index = fields.Int(allow_none=False)\n note = fields.String(allow_none=True, validate=validate.Length(max=1000))\n\n\ntask_instance_schema = TaskInstanceSchema()\ntask_instance_collection_schema = TaskInstanceCollectionSchema()\ntask_instance_batch_form = TaskInstanceBatchFormSchema()\nclear_task_instance_form = ClearTaskInstanceFormSchema()\nset_task_instance_state_form = SetTaskInstanceStateFormSchema()\nset_single_task_instance_state_form = SetSingleTaskInstanceStateFormSchema()\ntask_instance_reference_schema = TaskInstanceReferenceSchema()\ntask_instance_reference_collection_schema = TaskInstanceReferenceCollectionSchema()\nset_task_instance_note_form_schema = SetTaskInstanceNoteFormSchema()\n", "path": "airflow/api_connexion/schemas/task_instance_schema.py"}]} | 3,447 | 155 |
gh_patches_debug_6901 | rasdani/github-patches | git_diff | pymedusa__Medusa-2851 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[APP SUBMITTED]: ValueError: hour must be in 0..23
### INFO
**Python Version**: `2.7.9 (default, Sep 17 2016, 20:26:04) [GCC 4.9.2]`
**Operating System**: `Linux-4.4.38-v7+-armv7l-with-debian-8.0`
**Locale**: `UTF-8`
**Branch**: [develop](../tree/develop)
**Database**: `44.8`
**Commit**: pymedusa/Medusa@94e0649919e9e05ac380f8e6d3817c18255931ee
**Link to Log**: https://gist.github.com/31ec35e671692a5cc7ba13b4d2d07353
### ERROR
<pre>
2017-05-30 15:24:07 ERROR SEARCHQUEUE-BACKLOG-75340 :: [TorrentProject] :: [94e0649] Failed parsing publishing date: 24 hours ago
Traceback (most recent call last):
File "/home/pi/Medusa/<a href="../blob/94e0649919e9e05ac380f8e6d3817c18255931ee/medusa/providers/generic_provider.py#L512">medusa/providers/generic_provider.py</a>", line 512, in parse_pubdate
dt = parser.parse(pubdate, fuzzy=True)
File "/home/pi/Medusa/<a href="../blob/94e0649919e9e05ac380f8e6d3817c18255931ee/lib/dateutil/parser.py#L1181">lib/dateutil/parser.py</a>", line 1181, in parse
return DEFAULTPARSER.parse(timestr, **kwargs)
File "/home/pi/Medusa/<a href="../blob/94e0649919e9e05ac380f8e6d3817c18255931ee/lib/dateutil/parser.py#L584">lib/dateutil/parser.py</a>", line 584, in parse
ret = default.replace(**repl)
ValueError: hour must be in 0..23
</pre>
---
_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators
</issue>
<code>
[start of medusa/providers/torrent/html/torrentproject.py]
1 # coding=utf-8
2
3 """Provider code for TorrentProject."""
4
5 from __future__ import unicode_literals
6
7 import logging
8 import traceback
9
10 from medusa import tv
11 from medusa.bs4_parser import BS4Parser
12 from medusa.common import USER_AGENT
13 from medusa.helper.common import (
14 convert_size,
15 try_int,
16 )
17 from medusa.logger.adapters.style import BraceAdapter
18 from medusa.providers.torrent.torrent_provider import TorrentProvider
19
20 import validators
21
22 log = BraceAdapter(logging.getLogger(__name__))
23 log.logger.addHandler(logging.NullHandler())
24
25
26 class TorrentProjectProvider(TorrentProvider):
27 """TorrentProject Torrent provider."""
28
29 def __init__(self):
30 """Initialize the class."""
31 super(TorrentProjectProvider, self).__init__('TorrentProject')
32
33 # Credentials
34 self.public = True
35
36 # URLs
37 self.url = 'https://torrentproject.se'
38 self.custom_url = None
39
40 # Proper Strings
41
42 # Miscellaneous Options
43 self.headers.update({'User-Agent': USER_AGENT})
44
45 # Torrent Stats
46 self.minseed = None
47 self.minleech = None
48
49 # Cache
50 self.cache = tv.Cache(self, min_time=20)
51
52 def search(self, search_strings, age=0, ep_obj=None):
53 """
54 Search a provider and parse the results.
55
56 :param search_strings: A dict with mode (key) and the search value (value)
57 :param age: Not used
58 :param ep_obj: Not used
59 :returns: A list of search results (structure)
60 """
61 results = []
62
63 if self.custom_url:
64 if not validators.url(self.custom_url):
65 log.warning('Invalid custom url: {0}', self.custom_url)
66 return results
67 search_url = self.custom_url
68 else:
69 search_url = self.url
70
71 search_params = {
72 'hl': 'en',
73 'num': 40,
74 'start': 0,
75 'filter': 2101,
76 'safe': 'on',
77 'orderby': 'latest',
78 's': '',
79 }
80
81 for mode in search_strings:
82 log.debug('Search mode: {0}', mode)
83
84 for search_string in search_strings[mode]:
85 if mode != 'RSS':
86 search_params['s'] = search_string
87 log.debug('Search string: {search}',
88 {'search': search_string})
89
90 response = self.get_url(search_url, params=search_params, returns='response')
91 if not response or not response.text:
92 log.debug('No data returned from provider')
93 continue
94
95 results += self.parse(response.text, mode)
96
97 return results
98
99 def parse(self, data, mode):
100 """
101 Parse search results for items.
102
103 :param data: The raw response from a search
104 :param mode: The current mode used to search, e.g. RSS
105
106 :return: A list of items found
107 """
108 items = []
109
110 with BS4Parser(data, 'html5lib') as html:
111 torrent_rows = html.find_all('div', class_='torrent')
112
113 # Continue only if at least one release is found
114 if not torrent_rows:
115 log.debug('Data returned from provider does not contain any torrents')
116 return items
117
118 for row in torrent_rows:
119 anchor = row.find('a')
120
121 try:
122 # Removes ' torrent' in the end
123 title = anchor.get('title')[:-8]
124 download_url = anchor.get('href')
125 if not all([title, download_url]):
126 continue
127
128 info_hash = download_url.split('/')[1]
129 download_url = 'magnet:?xt=urn:btih:{hash}&dn={title}{trackers}'.format(
130 hash=info_hash, title=title, trackers=self._custom_trackers)
131
132 seeders = try_int(row.find('span', class_='bc seeders').find('span').get_text(), 1)
133 leechers = try_int(row.find('span', class_='bc leechers').find('span').get_text())
134
135 # Filter unseeded torrent
136 if seeders < min(self.minseed, 1):
137 if mode != 'RSS':
138 log.debug("Discarding torrent because it doesn't meet the"
139 " minimum seeders: {0}. Seeders: {1}",
140 title, seeders)
141 continue
142
143 torrent_size = row.find('span', class_='bc torrent-size').get_text().rstrip()
144 size = convert_size(torrent_size) or -1
145
146 pubdate_raw = row.find('span', class_='bc cated').get_text(strip=True)
147 pubdate = self.parse_pubdate(pubdate_raw)
148
149 item = {
150 'title': title,
151 'link': download_url,
152 'size': size,
153 'seeders': seeders,
154 'leechers': leechers,
155 'pubdate': pubdate,
156 }
157 if mode != 'RSS':
158 log.debug('Found result: {0} with {1} seeders and {2} leechers',
159 title, seeders, leechers)
160
161 items.append(item)
162 except (AttributeError, TypeError, KeyError, ValueError, IndexError):
163 log.error('Failed parsing provider. Traceback: {0!r}',
164 traceback.format_exc())
165
166 return items
167
168
169 provider = TorrentProjectProvider()
170
[end of medusa/providers/torrent/html/torrentproject.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/medusa/providers/torrent/html/torrentproject.py b/medusa/providers/torrent/html/torrentproject.py
--- a/medusa/providers/torrent/html/torrentproject.py
+++ b/medusa/providers/torrent/html/torrentproject.py
@@ -144,7 +144,7 @@
size = convert_size(torrent_size) or -1
pubdate_raw = row.find('span', class_='bc cated').get_text(strip=True)
- pubdate = self.parse_pubdate(pubdate_raw)
+ pubdate = self.parse_pubdate(pubdate_raw, human_time=True)
item = {
'title': title,
| {"golden_diff": "diff --git a/medusa/providers/torrent/html/torrentproject.py b/medusa/providers/torrent/html/torrentproject.py\n--- a/medusa/providers/torrent/html/torrentproject.py\n+++ b/medusa/providers/torrent/html/torrentproject.py\n@@ -144,7 +144,7 @@\n size = convert_size(torrent_size) or -1\n \n pubdate_raw = row.find('span', class_='bc cated').get_text(strip=True)\n- pubdate = self.parse_pubdate(pubdate_raw)\n+ pubdate = self.parse_pubdate(pubdate_raw, human_time=True)\n \n item = {\n 'title': title,\n", "issue": "[APP SUBMITTED]: ValueError: hour must be in 0..23\n### INFO\n**Python Version**: `2.7.9 (default, Sep 17 2016, 20:26:04) [GCC 4.9.2]`\n**Operating System**: `Linux-4.4.38-v7+-armv7l-with-debian-8.0`\n**Locale**: `UTF-8`\n**Branch**: [develop](../tree/develop)\n**Database**: `44.8`\n**Commit**: pymedusa/Medusa@94e0649919e9e05ac380f8e6d3817c18255931ee\n**Link to Log**: https://gist.github.com/31ec35e671692a5cc7ba13b4d2d07353\n### ERROR\n<pre>\n2017-05-30 15:24:07 ERROR SEARCHQUEUE-BACKLOG-75340 :: [TorrentProject] :: [94e0649] Failed parsing publishing date: 24 hours ago\nTraceback (most recent call last):\n File \"/home/pi/Medusa/<a href=\"../blob/94e0649919e9e05ac380f8e6d3817c18255931ee/medusa/providers/generic_provider.py#L512\">medusa/providers/generic_provider.py</a>\", line 512, in parse_pubdate\n dt = parser.parse(pubdate, fuzzy=True)\n File \"/home/pi/Medusa/<a href=\"../blob/94e0649919e9e05ac380f8e6d3817c18255931ee/lib/dateutil/parser.py#L1181\">lib/dateutil/parser.py</a>\", line 1181, in parse\n return DEFAULTPARSER.parse(timestr, **kwargs)\n File \"/home/pi/Medusa/<a href=\"../blob/94e0649919e9e05ac380f8e6d3817c18255931ee/lib/dateutil/parser.py#L584\">lib/dateutil/parser.py</a>\", line 584, in parse\n ret = default.replace(**repl)\nValueError: hour must be in 0..23\n</pre>\n---\n_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for TorrentProject.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport traceback\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.common import USER_AGENT\nfrom medusa.helper.common import (\n convert_size,\n try_int,\n)\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nimport validators\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass TorrentProjectProvider(TorrentProvider):\n \"\"\"TorrentProject Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(TorrentProjectProvider, self).__init__('TorrentProject')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://torrentproject.se'\n self.custom_url = None\n\n # Proper Strings\n\n # Miscellaneous Options\n self.headers.update({'User-Agent': USER_AGENT})\n\n # Torrent Stats\n self.minseed = None\n self.minleech = None\n\n # Cache\n self.cache = tv.Cache(self, min_time=20)\n\n def search(self, search_strings, age=0, ep_obj=None):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n\n if self.custom_url:\n if not validators.url(self.custom_url):\n log.warning('Invalid custom url: {0}', self.custom_url)\n return results\n search_url = self.custom_url\n else:\n search_url = self.url\n\n search_params = {\n 'hl': 'en',\n 'num': 40,\n 'start': 0,\n 'filter': 2101,\n 'safe': 'on',\n 'orderby': 'latest',\n 's': '',\n }\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n if mode != 'RSS':\n search_params['s'] = search_string\n log.debug('Search string: {search}',\n {'search': search_string})\n\n response = self.get_url(search_url, params=search_params, returns='response')\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n torrent_rows = html.find_all('div', class_='torrent')\n\n # Continue only if at least one release is found\n if not torrent_rows:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n for row in torrent_rows:\n anchor = row.find('a')\n\n try:\n # Removes ' torrent' in the end\n title = anchor.get('title')[:-8]\n download_url = anchor.get('href')\n if not all([title, download_url]):\n continue\n\n info_hash = download_url.split('/')[1]\n download_url = 'magnet:?xt=urn:btih:{hash}&dn={title}{trackers}'.format(\n hash=info_hash, title=title, trackers=self._custom_trackers)\n\n seeders = try_int(row.find('span', class_='bc seeders').find('span').get_text(), 1)\n leechers = try_int(row.find('span', class_='bc leechers').find('span').get_text())\n\n # Filter unseeded torrent\n if seeders < min(self.minseed, 1):\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n \" minimum seeders: {0}. Seeders: {1}\",\n title, seeders)\n continue\n\n torrent_size = row.find('span', class_='bc torrent-size').get_text().rstrip()\n size = convert_size(torrent_size) or -1\n\n pubdate_raw = row.find('span', class_='bc cated').get_text(strip=True)\n pubdate = self.parse_pubdate(pubdate_raw)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.error('Failed parsing provider. Traceback: {0!r}',\n traceback.format_exc())\n\n return items\n\n\nprovider = TorrentProjectProvider()\n", "path": "medusa/providers/torrent/html/torrentproject.py"}]} | 2,706 | 145 |
gh_patches_debug_5807 | rasdani/github-patches | git_diff | scikit-hep__awkward-970 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Doubly jagged workaround broken in awkward 1.4.0rc2
There was a quick fix (made for KM3NeT data 😄) for doubly jagged arrays which were discussed here https://github.com/scikit-hep/uproot4/issues/90 and the it uses the `akward._io` submodule which is not accessible anymore directly (`AttributeError`) in `1.4.0rc2`.
See here:
https://github.com/scikit-hep/awkward-1.0/blob/main/src/awkward/_connect/_uproot.py#L35
I am not sure what the desired fix is, to import `_io` in `_connect/_uproot.py` or if this fix is now covered by further developments, so I thought I ask first before I do a PR 😉
Here is the full MWE (needs `pip install km3net-testdata`, I am not sure if this test
```python
>>> import uproot
>>> uproot.__version__
'4.0.7'
>>> import awkward as ak
>>> ak.__version__
'1.4.0rc2'
>>> from km3net_testdata import data_path
>>> f = uproot.open(data_path("offline/mcv5.11r2.gsg_muonCChigherE-CC_50-5000GeV.km3_AAv1.jterbr00004695.jchain.aanet.498.root"))
>>> f["E/Evt/trks/trks.rec_stages"].array()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-18-80472509fad7> in <module>
----> 1 f["E/Evt/trks/trks.rec_stages"].array()
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in array(self, interpretation, entry_start, entry_stop, decompression_executor, interpretation_executor, array_cache, library)
2076 ranges_or_baskets.append((branch, basket_num, range_or_basket))
2077
-> 2078 _ranges_or_baskets_to_arrays(
2079 self,
2080 ranges_or_baskets,
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in _ranges_or_baskets_to_arrays(hasbranches, ranges_or_baskets, branchid_interpretation, entry_start, entry_stop, decompression_executor, interpretation_executor, library, arrays, update_ranges_or_baskets)
3476
3477 elif isinstance(obj, tuple) and len(obj) == 3:
-> 3478 uproot.source.futures.delayed_raise(*obj)
3479
3480 else:
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/source/futures.py in delayed_raise(exception_class, exception_value, traceback)
44 exec("raise exception_class, exception_value, traceback")
45 else:
---> 46 raise exception_value.with_traceback(traceback)
47
48
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in basket_to_array(basket)
3420 basket_arrays = branchid_arrays[branch.cache_key]
3421
-> 3422 basket_arrays[basket.basket_num] = interpretation.basket_array(
3423 basket.data,
3424 basket.byte_offsets,
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/interpretation/objects.py in basket_array(self, data, byte_offsets, basket, branch, context, cursor_offset, library)
151 "cursor_offset": cursor_offset,
152 }
--> 153 output = awkward._connect._uproot.basket_array(
154 form, data, byte_offsets, extra
155 )
~/Dev/km3io/venv/lib/python3.9/site-packages/awkward/_connect/_uproot.py in basket_array(form, data, byte_offsets, extra)
36 # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done
37
---> 38 return ak._io.uproot_issue_90(
39 form,
40 ak.layout.NumpyArray(data),
AttributeError: module 'awkward' has no attribute '_io'
```
</issue>
<code>
[start of src/awkward/_connect/_uproot.py]
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 from __future__ import absolute_import
4
5 import json
6
7 # don't import awkward._connect._uproot in awkward/__init__.py!
8 import uproot
9
10 import awkward as ak
11
12
13 def can_optimize(interpretation, form):
14 if isinstance(interpretation, uproot.interpretation.objects.AsObjects):
15 jsonform = json.loads(form.tojson(verbose=True))
16 if (
17 jsonform["class"] == "ListOffsetArray64"
18 and jsonform["parameters"].get("uproot")
19 == {"as": "array", "header": True, "speedbump": False}
20 and jsonform["content"]["class"] == "ListOffsetArray64"
21 and jsonform["content"]["parameters"].get("uproot")
22 == {"as": "vector", "header": False}
23 and jsonform["content"]["content"]["class"] == "NumpyArray"
24 and jsonform["content"]["content"]["inner_shape"] == []
25 and (
26 jsonform["content"]["content"].get("primitive") == "float64"
27 or jsonform["content"]["content"].get("primitive") == "int32"
28 )
29 ):
30 return True
31
32 return False
33
34
35 def basket_array(form, data, byte_offsets, extra):
36 # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done
37
38 return ak._io.uproot_issue_90(
39 form,
40 ak.layout.NumpyArray(data),
41 ak.layout.Index32(byte_offsets),
42 )
43
[end of src/awkward/_connect/_uproot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/awkward/_connect/_uproot.py b/src/awkward/_connect/_uproot.py
--- a/src/awkward/_connect/_uproot.py
+++ b/src/awkward/_connect/_uproot.py
@@ -33,9 +33,11 @@
def basket_array(form, data, byte_offsets, extra):
+ import awkward._io
+
# FIXME: uproot_issue_90 is just a placeholder, to show how it would be done
- return ak._io.uproot_issue_90(
+ return awkward._io.uproot_issue_90(
form,
ak.layout.NumpyArray(data),
ak.layout.Index32(byte_offsets),
| {"golden_diff": "diff --git a/src/awkward/_connect/_uproot.py b/src/awkward/_connect/_uproot.py\n--- a/src/awkward/_connect/_uproot.py\n+++ b/src/awkward/_connect/_uproot.py\n@@ -33,9 +33,11 @@\n \n \n def basket_array(form, data, byte_offsets, extra):\n+ import awkward._io\n+\n # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done\n \n- return ak._io.uproot_issue_90(\n+ return awkward._io.uproot_issue_90(\n form,\n ak.layout.NumpyArray(data),\n ak.layout.Index32(byte_offsets),\n", "issue": "Doubly jagged workaround broken in awkward 1.4.0rc2\nThere was a quick fix (made for KM3NeT data \ud83d\ude04) for doubly jagged arrays which were discussed here https://github.com/scikit-hep/uproot4/issues/90 and the it uses the `akward._io` submodule which is not accessible anymore directly (`AttributeError`) in `1.4.0rc2`.\r\n\r\nSee here:\r\n\r\nhttps://github.com/scikit-hep/awkward-1.0/blob/main/src/awkward/_connect/_uproot.py#L35\r\n\r\nI am not sure what the desired fix is, to import `_io` in `_connect/_uproot.py` or if this fix is now covered by further developments, so I thought I ask first before I do a PR \ud83d\ude09 \r\n\r\nHere is the full MWE (needs `pip install km3net-testdata`, I am not sure if this test\r\n\r\n```python\r\n>>> import uproot\r\n\r\n>>> uproot.__version__\r\n'4.0.7'\r\n\r\n>>> import awkward as ak\r\n\r\n>>> ak.__version__\r\n'1.4.0rc2'\r\n\r\n>>> from km3net_testdata import data_path\r\n\r\n>>> f = uproot.open(data_path(\"offline/mcv5.11r2.gsg_muonCChigherE-CC_50-5000GeV.km3_AAv1.jterbr00004695.jchain.aanet.498.root\"))\r\n\r\n>>> f[\"E/Evt/trks/trks.rec_stages\"].array()\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-18-80472509fad7> in <module>\r\n----> 1 f[\"E/Evt/trks/trks.rec_stages\"].array()\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in array(self, interpretation, entry_start, entry_stop, decompression_executor, interpretation_executor, array_cache, library)\r\n 2076 ranges_or_baskets.append((branch, basket_num, range_or_basket))\r\n 2077\r\n-> 2078 _ranges_or_baskets_to_arrays(\r\n 2079 self,\r\n 2080 ranges_or_baskets,\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in _ranges_or_baskets_to_arrays(hasbranches, ranges_or_baskets, branchid_interpretation, entry_start, entry_stop, decompression_executor, interpretation_executor, library, arrays, update_ranges_or_baskets)\r\n 3476\r\n 3477 elif isinstance(obj, tuple) and len(obj) == 3:\r\n-> 3478 uproot.source.futures.delayed_raise(*obj)\r\n 3479\r\n 3480 else:\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/source/futures.py in delayed_raise(exception_class, exception_value, traceback)\r\n 44 exec(\"raise exception_class, exception_value, traceback\")\r\n 45 else:\r\n---> 46 raise exception_value.with_traceback(traceback)\r\n 47\r\n 48\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in basket_to_array(basket)\r\n 3420 basket_arrays = branchid_arrays[branch.cache_key]\r\n 3421\r\n-> 3422 basket_arrays[basket.basket_num] = interpretation.basket_array(\r\n 3423 basket.data,\r\n 3424 basket.byte_offsets,\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/interpretation/objects.py in basket_array(self, data, byte_offsets, basket, branch, context, cursor_offset, library)\r\n 151 \"cursor_offset\": cursor_offset,\r\n 152 }\r\n--> 153 output = awkward._connect._uproot.basket_array(\r\n 154 form, data, byte_offsets, extra\r\n 155 )\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/awkward/_connect/_uproot.py in basket_array(form, data, byte_offsets, extra)\r\n 36 # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done\r\n 37\r\n---> 38 return ak._io.uproot_issue_90(\r\n 39 form,\r\n 40 ak.layout.NumpyArray(data),\r\n\r\nAttributeError: module 'awkward' has no attribute '_io'\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nfrom __future__ import absolute_import\n\nimport json\n\n# don't import awkward._connect._uproot in awkward/__init__.py!\nimport uproot\n\nimport awkward as ak\n\n\ndef can_optimize(interpretation, form):\n if isinstance(interpretation, uproot.interpretation.objects.AsObjects):\n jsonform = json.loads(form.tojson(verbose=True))\n if (\n jsonform[\"class\"] == \"ListOffsetArray64\"\n and jsonform[\"parameters\"].get(\"uproot\")\n == {\"as\": \"array\", \"header\": True, \"speedbump\": False}\n and jsonform[\"content\"][\"class\"] == \"ListOffsetArray64\"\n and jsonform[\"content\"][\"parameters\"].get(\"uproot\")\n == {\"as\": \"vector\", \"header\": False}\n and jsonform[\"content\"][\"content\"][\"class\"] == \"NumpyArray\"\n and jsonform[\"content\"][\"content\"][\"inner_shape\"] == []\n and (\n jsonform[\"content\"][\"content\"].get(\"primitive\") == \"float64\"\n or jsonform[\"content\"][\"content\"].get(\"primitive\") == \"int32\"\n )\n ):\n return True\n\n return False\n\n\ndef basket_array(form, data, byte_offsets, extra):\n # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done\n\n return ak._io.uproot_issue_90(\n form,\n ak.layout.NumpyArray(data),\n ak.layout.Index32(byte_offsets),\n )\n", "path": "src/awkward/_connect/_uproot.py"}]} | 2,020 | 155 |
gh_patches_debug_25846 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-1591 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Could not parse docx document
### Description
I can't upload a docx document after updating to the latest release candidate.
### Steps to reproduce
1. Update paperless-ngx to 1.9.0-rc1 (ghcr.io/paperless-ngx/paperless-ngx:beta)
2. Upload a docx-document and an error occurs "_`Error while consuming document khms-immoconsult_20170924_2.docx: Could not parse /tmp/paperless/paperless-upload-iwgwe9h0 with tika server at http://tika:9998: 'PosixPath' object has no attribute 'decode'`_"
### Webserver logs
```bash
2022-09-14 05:45:14,789] [INFO] [paperless.consumer] Consuming khms-immoconsult_20170924_2.docx
[2022-09-14 05:45:14,791] [DEBUG] [paperless.consumer] Detected mime type: application/vnd.openxmlformats-officedocument.wordprocessingml.document
[2022-09-14 05:45:14,795] [DEBUG] [paperless.consumer] Parser: TikaDocumentParser
[2022-09-14 05:45:14,800] [DEBUG] [paperless.consumer] Parsing khms-immoconsult_20170924_2.docx...
[2022-09-14 05:45:14,801] [INFO] [paperless.parsing.tika] Sending /tmp/paperless/paperless-upload-iwgwe9h0 to Tika server
[2022-09-14 05:45:14,802] [DEBUG] [paperless.parsing.tika] Deleting directory /tmp/paperless/paperless-86t5km9q
[2022-09-14 05:45:14,806] [ERROR] [paperless.consumer] Error while consuming document khms-immoconsult_20170924_2.docx: Could not parse /tmp/paperless/paperless-upload-iwgwe9h0 with tika server at http://tika:9998: 'PosixPath' object has no attribute 'decode'
Traceback (most recent call last):
File "/usr/src/paperless/src/paperless_tika/parsers.py", line 55, in parse
parsed = parser.from_file(document_path, tika_server)
File "/usr/local/lib/python3.9/site-packages/tika/parser.py", line 40, in from_file
output = parse1(service, filename, serverEndpoint, headers=headers, config_path=config_path, requestOptions=requestOptions)
File "/usr/local/lib/python3.9/site-packages/tika/tika.py", line 327, in parse1
path, file_type = getRemoteFile(urlOrPath, TikaFilesPath)
File "/usr/local/lib/python3.9/site-packages/tika/tika.py", line 762, in getRemoteFile
urlp = urlparse(urlOrPath)
File "/usr/local/lib/python3.9/urllib/parse.py", line 392, in urlparse
url, scheme, _coerce_result = _coerce_args(url, scheme)
File "/usr/local/lib/python3.9/urllib/parse.py", line 128, in _coerce_args
return _decode_args(args) + (_encode_result,)
File "/usr/local/lib/python3.9/urllib/parse.py", line 112, in _decode_args
return tuple(x.decode(encoding, errors) if x else '' for x in args)
File "/usr/local/lib/python3.9/urllib/parse.py", line 112, in <genexpr>
return tuple(x.decode(encoding, errors) if x else '' for x in args)
AttributeError: 'PosixPath' object has no attribute 'decode'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/src/paperless/src/documents/consumer.py", line 320, in try_consume_file
document_parser.parse(self.path, mime_type, self.filename)
File "/usr/src/paperless/src/paperless_tika/parsers.py", line 57, in parse
raise ParseError(
documents.parsers.ParseError: Could not parse /tmp/paperless/paperless-upload-iwgwe9h0 with tika server at http://tika:9998: 'PosixPath' object has no attribute 'decode'
```
### Paperless-ngx version
1.9.0-rc1
### Host OS
Ubuntu 22.04 LTS
### Installation method
Docker - official image
### Browser
Google Chrome
### Configuration changes
_No response_
### Other
_No response_
</issue>
<code>
[start of src/paperless_tika/parsers.py]
1 import os
2
3 import dateutil.parser
4 import requests
5 from django.conf import settings
6 from documents.parsers import DocumentParser
7 from documents.parsers import make_thumbnail_from_pdf
8 from documents.parsers import ParseError
9 from tika import parser
10
11
12 class TikaDocumentParser(DocumentParser):
13 """
14 This parser sends documents to a local tika server
15 """
16
17 logging_name = "paperless.parsing.tika"
18
19 def get_thumbnail(self, document_path, mime_type, file_name=None):
20 if not self.archive_path:
21 self.archive_path = self.convert_to_pdf(document_path, file_name)
22
23 return make_thumbnail_from_pdf(
24 self.archive_path,
25 self.tempdir,
26 self.logging_group,
27 )
28
29 def extract_metadata(self, document_path, mime_type):
30 tika_server = settings.TIKA_ENDPOINT
31 try:
32 parsed = parser.from_file(document_path, tika_server)
33 except Exception as e:
34 self.log(
35 "warning",
36 f"Error while fetching document metadata for " f"{document_path}: {e}",
37 )
38 return []
39
40 return [
41 {
42 "namespace": "",
43 "prefix": "",
44 "key": key,
45 "value": parsed["metadata"][key],
46 }
47 for key in parsed["metadata"]
48 ]
49
50 def parse(self, document_path, mime_type, file_name=None):
51 self.log("info", f"Sending {document_path} to Tika server")
52 tika_server = settings.TIKA_ENDPOINT
53
54 try:
55 parsed = parser.from_file(document_path, tika_server)
56 except Exception as err:
57 raise ParseError(
58 f"Could not parse {document_path} with tika server at "
59 f"{tika_server}: {err}",
60 ) from err
61
62 self.text = parsed["content"].strip()
63
64 try:
65 self.date = dateutil.parser.isoparse(parsed["metadata"]["Creation-Date"])
66 except Exception as e:
67 self.log(
68 "warning",
69 f"Unable to extract date for document " f"{document_path}: {e}",
70 )
71
72 self.archive_path = self.convert_to_pdf(document_path, file_name)
73
74 def convert_to_pdf(self, document_path, file_name):
75 pdf_path = os.path.join(self.tempdir, "convert.pdf")
76 gotenberg_server = settings.TIKA_GOTENBERG_ENDPOINT
77 url = gotenberg_server + "/forms/libreoffice/convert"
78
79 self.log("info", f"Converting {document_path} to PDF as {pdf_path}")
80 with open(document_path, "rb") as document_handle:
81 files = {
82 "files": (
83 file_name or os.path.basename(document_path),
84 document_handle,
85 ),
86 }
87 headers = {}
88
89 try:
90 response = requests.post(url, files=files, headers=headers)
91 response.raise_for_status() # ensure we notice bad responses
92 except Exception as err:
93 raise ParseError(
94 f"Error while converting document to PDF: {err}",
95 ) from err
96
97 with open(pdf_path, "wb") as file:
98 file.write(response.content)
99 file.close()
100
101 return pdf_path
102
[end of src/paperless_tika/parsers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/paperless_tika/parsers.py b/src/paperless_tika/parsers.py
--- a/src/paperless_tika/parsers.py
+++ b/src/paperless_tika/parsers.py
@@ -1,4 +1,5 @@
import os
+from pathlib import Path
import dateutil.parser
import requests
@@ -28,6 +29,11 @@
def extract_metadata(self, document_path, mime_type):
tika_server = settings.TIKA_ENDPOINT
+
+ # tika does not support a PathLike, only strings
+ # ensure this is a string
+ document_path = str(document_path)
+
try:
parsed = parser.from_file(document_path, tika_server)
except Exception as e:
@@ -47,10 +53,14 @@
for key in parsed["metadata"]
]
- def parse(self, document_path, mime_type, file_name=None):
+ def parse(self, document_path: Path, mime_type, file_name=None):
self.log("info", f"Sending {document_path} to Tika server")
tika_server = settings.TIKA_ENDPOINT
+ # tika does not support a PathLike, only strings
+ # ensure this is a string
+ document_path = str(document_path)
+
try:
parsed = parser.from_file(document_path, tika_server)
except Exception as err:
| {"golden_diff": "diff --git a/src/paperless_tika/parsers.py b/src/paperless_tika/parsers.py\n--- a/src/paperless_tika/parsers.py\n+++ b/src/paperless_tika/parsers.py\n@@ -1,4 +1,5 @@\n import os\n+from pathlib import Path\n \n import dateutil.parser\n import requests\n@@ -28,6 +29,11 @@\n \n def extract_metadata(self, document_path, mime_type):\n tika_server = settings.TIKA_ENDPOINT\n+\n+ # tika does not support a PathLike, only strings\n+ # ensure this is a string\n+ document_path = str(document_path)\n+\n try:\n parsed = parser.from_file(document_path, tika_server)\n except Exception as e:\n@@ -47,10 +53,14 @@\n for key in parsed[\"metadata\"]\n ]\n \n- def parse(self, document_path, mime_type, file_name=None):\n+ def parse(self, document_path: Path, mime_type, file_name=None):\n self.log(\"info\", f\"Sending {document_path} to Tika server\")\n tika_server = settings.TIKA_ENDPOINT\n \n+ # tika does not support a PathLike, only strings\n+ # ensure this is a string\n+ document_path = str(document_path)\n+\n try:\n parsed = parser.from_file(document_path, tika_server)\n except Exception as err:\n", "issue": "[BUG] Could not parse docx document\n### Description\n\nI can't upload a docx document after updating to the latest release candidate.\n\n### Steps to reproduce\n\n1. Update paperless-ngx to 1.9.0-rc1 (ghcr.io/paperless-ngx/paperless-ngx:beta)\r\n2. Upload a docx-document and an error occurs \"_`Error while consuming document khms-immoconsult_20170924_2.docx: Could not parse /tmp/paperless/paperless-upload-iwgwe9h0 with tika server at http://tika:9998: 'PosixPath' object has no attribute 'decode'`_\"\n\n### Webserver logs\n\n```bash\n2022-09-14 05:45:14,789] [INFO] [paperless.consumer] Consuming khms-immoconsult_20170924_2.docx\r\n\r\n[2022-09-14 05:45:14,791] [DEBUG] [paperless.consumer] Detected mime type: application/vnd.openxmlformats-officedocument.wordprocessingml.document\r\n\r\n[2022-09-14 05:45:14,795] [DEBUG] [paperless.consumer] Parser: TikaDocumentParser\r\n\r\n[2022-09-14 05:45:14,800] [DEBUG] [paperless.consumer] Parsing khms-immoconsult_20170924_2.docx...\r\n\r\n[2022-09-14 05:45:14,801] [INFO] [paperless.parsing.tika] Sending /tmp/paperless/paperless-upload-iwgwe9h0 to Tika server\r\n\r\n[2022-09-14 05:45:14,802] [DEBUG] [paperless.parsing.tika] Deleting directory /tmp/paperless/paperless-86t5km9q\r\n\r\n[2022-09-14 05:45:14,806] [ERROR] [paperless.consumer] Error while consuming document khms-immoconsult_20170924_2.docx: Could not parse /tmp/paperless/paperless-upload-iwgwe9h0 with tika server at http://tika:9998: 'PosixPath' object has no attribute 'decode'\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"/usr/src/paperless/src/paperless_tika/parsers.py\", line 55, in parse\r\n\r\n parsed = parser.from_file(document_path, tika_server)\r\n\r\n File \"/usr/local/lib/python3.9/site-packages/tika/parser.py\", line 40, in from_file\r\n\r\n output = parse1(service, filename, serverEndpoint, headers=headers, config_path=config_path, requestOptions=requestOptions)\r\n\r\n File \"/usr/local/lib/python3.9/site-packages/tika/tika.py\", line 327, in parse1\r\n\r\n path, file_type = getRemoteFile(urlOrPath, TikaFilesPath)\r\n\r\n File \"/usr/local/lib/python3.9/site-packages/tika/tika.py\", line 762, in getRemoteFile\r\n\r\n urlp = urlparse(urlOrPath)\r\n\r\n File \"/usr/local/lib/python3.9/urllib/parse.py\", line 392, in urlparse\r\n\r\n url, scheme, _coerce_result = _coerce_args(url, scheme)\r\n\r\n File \"/usr/local/lib/python3.9/urllib/parse.py\", line 128, in _coerce_args\r\n\r\n return _decode_args(args) + (_encode_result,)\r\n\r\n File \"/usr/local/lib/python3.9/urllib/parse.py\", line 112, in _decode_args\r\n\r\n return tuple(x.decode(encoding, errors) if x else '' for x in args)\r\n\r\n File \"/usr/local/lib/python3.9/urllib/parse.py\", line 112, in <genexpr>\r\n\r\n return tuple(x.decode(encoding, errors) if x else '' for x in args)\r\n\r\nAttributeError: 'PosixPath' object has no attribute 'decode'\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"/usr/src/paperless/src/documents/consumer.py\", line 320, in try_consume_file\r\n\r\n document_parser.parse(self.path, mime_type, self.filename)\r\n\r\n File \"/usr/src/paperless/src/paperless_tika/parsers.py\", line 57, in parse\r\n\r\n raise ParseError(\r\n\r\ndocuments.parsers.ParseError: Could not parse /tmp/paperless/paperless-upload-iwgwe9h0 with tika server at http://tika:9998: 'PosixPath' object has no attribute 'decode'\n```\n\n\n### Paperless-ngx version\n\n1.9.0-rc1\n\n### Host OS\n\nUbuntu 22.04 LTS\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nGoogle Chrome\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n", "before_files": [{"content": "import os\n\nimport dateutil.parser\nimport requests\nfrom django.conf import settings\nfrom documents.parsers import DocumentParser\nfrom documents.parsers import make_thumbnail_from_pdf\nfrom documents.parsers import ParseError\nfrom tika import parser\n\n\nclass TikaDocumentParser(DocumentParser):\n \"\"\"\n This parser sends documents to a local tika server\n \"\"\"\n\n logging_name = \"paperless.parsing.tika\"\n\n def get_thumbnail(self, document_path, mime_type, file_name=None):\n if not self.archive_path:\n self.archive_path = self.convert_to_pdf(document_path, file_name)\n\n return make_thumbnail_from_pdf(\n self.archive_path,\n self.tempdir,\n self.logging_group,\n )\n\n def extract_metadata(self, document_path, mime_type):\n tika_server = settings.TIKA_ENDPOINT\n try:\n parsed = parser.from_file(document_path, tika_server)\n except Exception as e:\n self.log(\n \"warning\",\n f\"Error while fetching document metadata for \" f\"{document_path}: {e}\",\n )\n return []\n\n return [\n {\n \"namespace\": \"\",\n \"prefix\": \"\",\n \"key\": key,\n \"value\": parsed[\"metadata\"][key],\n }\n for key in parsed[\"metadata\"]\n ]\n\n def parse(self, document_path, mime_type, file_name=None):\n self.log(\"info\", f\"Sending {document_path} to Tika server\")\n tika_server = settings.TIKA_ENDPOINT\n\n try:\n parsed = parser.from_file(document_path, tika_server)\n except Exception as err:\n raise ParseError(\n f\"Could not parse {document_path} with tika server at \"\n f\"{tika_server}: {err}\",\n ) from err\n\n self.text = parsed[\"content\"].strip()\n\n try:\n self.date = dateutil.parser.isoparse(parsed[\"metadata\"][\"Creation-Date\"])\n except Exception as e:\n self.log(\n \"warning\",\n f\"Unable to extract date for document \" f\"{document_path}: {e}\",\n )\n\n self.archive_path = self.convert_to_pdf(document_path, file_name)\n\n def convert_to_pdf(self, document_path, file_name):\n pdf_path = os.path.join(self.tempdir, \"convert.pdf\")\n gotenberg_server = settings.TIKA_GOTENBERG_ENDPOINT\n url = gotenberg_server + \"/forms/libreoffice/convert\"\n\n self.log(\"info\", f\"Converting {document_path} to PDF as {pdf_path}\")\n with open(document_path, \"rb\") as document_handle:\n files = {\n \"files\": (\n file_name or os.path.basename(document_path),\n document_handle,\n ),\n }\n headers = {}\n\n try:\n response = requests.post(url, files=files, headers=headers)\n response.raise_for_status() # ensure we notice bad responses\n except Exception as err:\n raise ParseError(\n f\"Error while converting document to PDF: {err}\",\n ) from err\n\n with open(pdf_path, \"wb\") as file:\n file.write(response.content)\n file.close()\n\n return pdf_path\n", "path": "src/paperless_tika/parsers.py"}]} | 2,560 | 313 |
gh_patches_debug_30613 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-2729 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Erreurs 404 non générées sur certaines pages
Certaines pages devraient générées des erreurs 404 au lieu d'afficher le contenu des pages.
J'ai découvert ce bug en voulant corriger des erreurs dans les liens sur la page des CGU, un oubli de mailto produit actuellement un lien vers http://zestedesavoir.com/pages/cgu/[email protected] qui affiche la page des CGU. Or une erreur 404 devrait être générée. Peu importe la suite de l'URL cela fonctionne et ce pour plusieurs pages...
Il manque un `$` à la fin de certaines expressions régulières dans zds/pages/urls.py.
De plus le fichier urls.py importe pages.views mais ne l'utilise à aucun moment.
Du coup je me demande si je supprime l'import de views (ligne 5) ou bien si j'utilise par exemple `views.about` au lieu de `'zds.pages.views.about'` ?
Je veux bien bien me charger de faire une PR pour corriger ces bugs :)
EDIT: Je n'arrive pas à m'assigner à cette issue (comme suggéré par CONTRIBUTING.md), problèmes de droits ?
</issue>
<code>
[start of zds/pages/urls.py]
1 # coding: utf-8
2
3 from django.conf.urls import patterns, url
4
5 from . import views
6
7
8 urlpatterns = patterns('',
9
10 url(r'^apropos/$', 'zds.pages.views.about'),
11 url(r'^association/$', 'zds.pages.views.association'),
12 url(r'^contact/', 'zds.pages.views.contact'),
13 url(r'^cgu/', 'zds.pages.views.eula'),
14 url(r'^alertes/', 'zds.pages.views.alerts'),
15 url(r'^cookies/', 'zds.pages.views.cookies'),
16 url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),
17
18 url(r'^$', 'zds.pages.views.index'),
19 )
20
[end of zds/pages/urls.py]
[start of zds/search/urls.py]
1 # coding: utf-8
2
3 from django.conf.urls import patterns, url
4
5 from . import views
6 from haystack.views import search_view_factory
7 from zds.search.views import CustomSearchView
8 from zds.search.forms import CustomSearchForm
9
10 urlpatterns = patterns('haystack.views',
11 url(r'^$', search_view_factory(
12 view_class=CustomSearchView,
13 template='search/search.html',
14 form_class=CustomSearchForm
15 ), name='haystack_search'))
16
17 urlpatterns += patterns('',
18 url(r'^opensearch.xml', 'zds.search.views.opensearch')
19 )
20
[end of zds/search/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/pages/urls.py b/zds/pages/urls.py
--- a/zds/pages/urls.py
+++ b/zds/pages/urls.py
@@ -2,17 +2,15 @@
from django.conf.urls import patterns, url
-from . import views
-
urlpatterns = patterns('',
url(r'^apropos/$', 'zds.pages.views.about'),
url(r'^association/$', 'zds.pages.views.association'),
- url(r'^contact/', 'zds.pages.views.contact'),
- url(r'^cgu/', 'zds.pages.views.eula'),
- url(r'^alertes/', 'zds.pages.views.alerts'),
- url(r'^cookies/', 'zds.pages.views.cookies'),
+ url(r'^contact/$', 'zds.pages.views.contact'),
+ url(r'^cgu/$', 'zds.pages.views.eula'),
+ url(r'^alertes/$', 'zds.pages.views.alerts'),
+ url(r'^cookies/$', 'zds.pages.views.cookies'),
url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),
url(r'^$', 'zds.pages.views.index'),
diff --git a/zds/search/urls.py b/zds/search/urls.py
--- a/zds/search/urls.py
+++ b/zds/search/urls.py
@@ -2,7 +2,6 @@
from django.conf.urls import patterns, url
-from . import views
from haystack.views import search_view_factory
from zds.search.views import CustomSearchView
from zds.search.forms import CustomSearchForm
@@ -15,5 +14,5 @@
), name='haystack_search'))
urlpatterns += patterns('',
- url(r'^opensearch.xml', 'zds.search.views.opensearch')
+ url(r'^opensearch\.xml$', 'zds.search.views.opensearch')
)
| {"golden_diff": "diff --git a/zds/pages/urls.py b/zds/pages/urls.py\n--- a/zds/pages/urls.py\n+++ b/zds/pages/urls.py\n@@ -2,17 +2,15 @@\n \n from django.conf.urls import patterns, url\n \n-from . import views\n-\n \n urlpatterns = patterns('',\n \n url(r'^apropos/$', 'zds.pages.views.about'),\n url(r'^association/$', 'zds.pages.views.association'),\n- url(r'^contact/', 'zds.pages.views.contact'),\n- url(r'^cgu/', 'zds.pages.views.eula'),\n- url(r'^alertes/', 'zds.pages.views.alerts'),\n- url(r'^cookies/', 'zds.pages.views.cookies'),\n+ url(r'^contact/$', 'zds.pages.views.contact'),\n+ url(r'^cgu/$', 'zds.pages.views.eula'),\n+ url(r'^alertes/$', 'zds.pages.views.alerts'),\n+ url(r'^cookies/$', 'zds.pages.views.cookies'),\n url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),\n \n url(r'^$', 'zds.pages.views.index'),\ndiff --git a/zds/search/urls.py b/zds/search/urls.py\n--- a/zds/search/urls.py\n+++ b/zds/search/urls.py\n@@ -2,7 +2,6 @@\n \n from django.conf.urls import patterns, url\n \n-from . import views\n from haystack.views import search_view_factory\n from zds.search.views import CustomSearchView\n from zds.search.forms import CustomSearchForm\n@@ -15,5 +14,5 @@\n ), name='haystack_search'))\n \n urlpatterns += patterns('',\n- url(r'^opensearch.xml', 'zds.search.views.opensearch')\n+ url(r'^opensearch\\.xml$', 'zds.search.views.opensearch')\n )\n", "issue": "Erreurs 404 non g\u00e9n\u00e9r\u00e9es sur certaines pages\nCertaines pages devraient g\u00e9n\u00e9r\u00e9es des erreurs 404 au lieu d'afficher le contenu des pages.\nJ'ai d\u00e9couvert ce bug en voulant corriger des erreurs dans les liens sur la page des CGU, un oubli de mailto produit actuellement un lien vers http://zestedesavoir.com/pages/cgu/[email protected] qui affiche la page des CGU. Or une erreur 404 devrait \u00eatre g\u00e9n\u00e9r\u00e9e. Peu importe la suite de l'URL cela fonctionne et ce pour plusieurs pages...\nIl manque un `$` \u00e0 la fin de certaines expressions r\u00e9guli\u00e8res dans zds/pages/urls.py.\n\nDe plus le fichier urls.py importe pages.views mais ne l'utilise \u00e0 aucun moment.\nDu coup je me demande si je supprime l'import de views (ligne 5) ou bien si j'utilise par exemple `views.about` au lieu de `'zds.pages.views.about'` ?\n\nJe veux bien bien me charger de faire une PR pour corriger ces bugs :)\n\nEDIT: Je n'arrive pas \u00e0 m'assigner \u00e0 cette issue (comme sugg\u00e9r\u00e9 par CONTRIBUTING.md), probl\u00e8mes de droits ?\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom django.conf.urls import patterns, url\n\nfrom . import views\n\n\nurlpatterns = patterns('',\n\n url(r'^apropos/$', 'zds.pages.views.about'),\n url(r'^association/$', 'zds.pages.views.association'),\n url(r'^contact/', 'zds.pages.views.contact'),\n url(r'^cgu/', 'zds.pages.views.eula'),\n url(r'^alertes/', 'zds.pages.views.alerts'),\n url(r'^cookies/', 'zds.pages.views.cookies'),\n url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),\n\n url(r'^$', 'zds.pages.views.index'),\n )\n", "path": "zds/pages/urls.py"}, {"content": "# coding: utf-8\n\nfrom django.conf.urls import patterns, url\n\nfrom . import views\nfrom haystack.views import search_view_factory\nfrom zds.search.views import CustomSearchView\nfrom zds.search.forms import CustomSearchForm\n\nurlpatterns = patterns('haystack.views',\n url(r'^$', search_view_factory(\n view_class=CustomSearchView,\n template='search/search.html',\n form_class=CustomSearchForm\n ), name='haystack_search'))\n\nurlpatterns += patterns('',\n url(r'^opensearch.xml', 'zds.search.views.opensearch')\n )\n", "path": "zds/search/urls.py"}]} | 1,165 | 405 |
gh_patches_debug_912 | rasdani/github-patches | git_diff | AUTOMATIC1111__stable-diffusion-webui-60 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FileNotFoundError after new update
Getting a FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\admin\\stable-diffusion-webui\\scripts' after the new update.
Not exactly good at all the coding stuff, using it just fine yesterday but I downloaded the repo instead of git clone, for the sake of easier update I started a new installation by git cloning into user folder and the installation went well but ran into this while launching through webui.py.
Python 3.10.6
venv C:\Users\admin\stable-diffusion-webui\venv\Scripts\Python.exe
Launching webui.py...
Loading model from C:\Users\admin\stable-diffusion-webui\model.ckpt
Global Step: 470000
LatentDiffusion: Running in eps-prediction mode
DiffusionWrapper has 859.52 M params.
making attention of type 'vanilla' with 512 in_channels
Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
making attention of type 'vanilla' with 512 in_channels
Traceback (most recent call last):
File "C:\Users\admin\stable-diffusion-webui\webui.py", line 135, in <module>
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
File "C:\Users\admin\stable-diffusion-webui\modules\scripts.py", line 32, in load_scripts
for filename in os.listdir(basedir):
FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\admin\\stable-diffusion-webui\\scripts'
</issue>
<code>
[start of modules/scripts.py]
1 import os
2 import sys
3 import traceback
4
5 import modules.ui as ui
6 import gradio as gr
7
8 from modules.processing import StableDiffusionProcessing
9
10 class Script:
11 filename = None
12 args_from = None
13 args_to = None
14
15 def title(self):
16 raise NotImplementedError()
17
18 def ui(self, is_img2img):
19 pass
20
21 def run(self, *args):
22 raise NotImplementedError()
23
24 def describe(self):
25 return ""
26
27
28 scripts = []
29
30
31 def load_scripts(basedir):
32 for filename in os.listdir(basedir):
33 path = os.path.join(basedir, filename)
34
35 if not os.path.isfile(path):
36 continue
37
38 with open(path, "r", encoding="utf8") as file:
39 text = file.read()
40
41 from types import ModuleType
42 compiled = compile(text, path, 'exec')
43 module = ModuleType(filename)
44 exec(compiled, module.__dict__)
45
46 for key, script_class in module.__dict__.items():
47 if type(script_class) == type and issubclass(script_class, Script):
48 obj = script_class()
49 obj.filename = path
50
51 scripts.append(obj)
52
53
54 def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
55 try:
56 res = func(*args, **kwargs)
57 return res
58 except Exception:
59 print(f"Error calling: {filename}/{funcname}", file=sys.stderr)
60 print(traceback.format_exc(), file=sys.stderr)
61
62 return default
63
64
65 def setup_ui(is_img2img):
66 titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in scripts]
67
68 dropdown = gr.Dropdown(label="Script", choices=["None"] + titles, value="None", type="index")
69
70 inputs = [dropdown]
71
72 for script in scripts:
73 script.args_from = len(inputs)
74 controls = script.ui(is_img2img)
75
76 for control in controls:
77 control.visible = False
78
79 inputs += controls
80 script.args_to = len(inputs)
81
82 def select_script(index):
83 if index > 0:
84 script = scripts[index-1]
85 args_from = script.args_from
86 args_to = script.args_to
87 else:
88 args_from = 0
89 args_to = 0
90
91 return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]
92
93 dropdown.change(
94 fn=select_script,
95 inputs=[dropdown],
96 outputs=inputs
97 )
98
99 return inputs
100
101
102 def run(p: StableDiffusionProcessing, *args):
103 script_index = args[0] - 1
104
105 if script_index < 0 or script_index >= len(scripts):
106 return None
107
108 script = scripts[script_index]
109
110 script_args = args[script.args_from:script.args_to]
111 processed = script.run(p, *script_args)
112
113 return processed
114
[end of modules/scripts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modules/scripts.py b/modules/scripts.py
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -29,6 +29,9 @@
def load_scripts(basedir):
+ if not os.path.exists(basedir):
+ return
+
for filename in os.listdir(basedir):
path = os.path.join(basedir, filename)
| {"golden_diff": "diff --git a/modules/scripts.py b/modules/scripts.py\n--- a/modules/scripts.py\n+++ b/modules/scripts.py\n@@ -29,6 +29,9 @@\n \r\n \r\n def load_scripts(basedir):\r\n+ if not os.path.exists(basedir):\r\n+ return\r\n+\r\n for filename in os.listdir(basedir):\r\n path = os.path.join(basedir, filename)\n", "issue": "FileNotFoundError after new update\nGetting a FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\admin\\\\stable-diffusion-webui\\\\scripts' after the new update. \r\n\r\nNot exactly good at all the coding stuff, using it just fine yesterday but I downloaded the repo instead of git clone, for the sake of easier update I started a new installation by git cloning into user folder and the installation went well but ran into this while launching through webui.py.\r\n\r\nPython 3.10.6\r\nvenv C:\\Users\\admin\\stable-diffusion-webui\\venv\\Scripts\\Python.exe\r\nLaunching webui.py...\r\nLoading model from C:\\Users\\admin\\stable-diffusion-webui\\model.ckpt\r\nGlobal Step: 470000\r\nLatentDiffusion: Running in eps-prediction mode\r\nDiffusionWrapper has 859.52 M params.\r\nmaking attention of type 'vanilla' with 512 in_channels\r\nWorking with z of shape (1, 4, 32, 32) = 4096 dimensions.\r\nmaking attention of type 'vanilla' with 512 in_channels\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\admin\\stable-diffusion-webui\\webui.py\", line 135, in <module>\r\n modules.scripts.load_scripts(os.path.join(script_path, \"scripts\"))\r\n File \"C:\\Users\\admin\\stable-diffusion-webui\\modules\\scripts.py\", line 32, in load_scripts\r\n for filename in os.listdir(basedir):\r\nFileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\admin\\\\stable-diffusion-webui\\\\scripts'\n", "before_files": [{"content": "import os\r\nimport sys\r\nimport traceback\r\n\r\nimport modules.ui as ui\r\nimport gradio as gr\r\n\r\nfrom modules.processing import StableDiffusionProcessing\r\n\r\nclass Script:\r\n filename = None\r\n args_from = None\r\n args_to = None\r\n\r\n def title(self):\r\n raise NotImplementedError()\r\n\r\n def ui(self, is_img2img):\r\n pass\r\n\r\n def run(self, *args):\r\n raise NotImplementedError()\r\n\r\n def describe(self):\r\n return \"\"\r\n\r\n\r\nscripts = []\r\n\r\n\r\ndef load_scripts(basedir):\r\n for filename in os.listdir(basedir):\r\n path = os.path.join(basedir, filename)\r\n\r\n if not os.path.isfile(path):\r\n continue\r\n\r\n with open(path, \"r\", encoding=\"utf8\") as file:\r\n text = file.read()\r\n\r\n from types import ModuleType\r\n compiled = compile(text, path, 'exec')\r\n module = ModuleType(filename)\r\n exec(compiled, module.__dict__)\r\n\r\n for key, script_class in module.__dict__.items():\r\n if type(script_class) == type and issubclass(script_class, Script):\r\n obj = script_class()\r\n obj.filename = path\r\n\r\n scripts.append(obj)\r\n\r\n\r\ndef wrap_call(func, filename, funcname, *args, default=None, **kwargs):\r\n try:\r\n res = func(*args, **kwargs)\r\n return res\r\n except Exception:\r\n print(f\"Error calling: {filename}/{funcname}\", file=sys.stderr)\r\n print(traceback.format_exc(), file=sys.stderr)\r\n\r\n return default\r\n\r\n\r\ndef setup_ui(is_img2img):\r\n titles = [wrap_call(script.title, script.filename, \"title\") or f\"{script.filename} [error]\" for script in scripts]\r\n\r\n dropdown = gr.Dropdown(label=\"Script\", choices=[\"None\"] + titles, value=\"None\", type=\"index\")\r\n\r\n inputs = [dropdown]\r\n\r\n for script in scripts:\r\n script.args_from = len(inputs)\r\n controls = script.ui(is_img2img)\r\n\r\n for control in controls:\r\n control.visible = False\r\n\r\n inputs += controls\r\n script.args_to = len(inputs)\r\n\r\n def select_script(index):\r\n if index > 0:\r\n script = scripts[index-1]\r\n args_from = script.args_from\r\n args_to = script.args_to\r\n else:\r\n args_from = 0\r\n args_to = 0\r\n\r\n return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]\r\n\r\n dropdown.change(\r\n fn=select_script,\r\n inputs=[dropdown],\r\n outputs=inputs\r\n )\r\n\r\n return inputs\r\n\r\n\r\ndef run(p: StableDiffusionProcessing, *args):\r\n script_index = args[0] - 1\r\n\r\n if script_index < 0 or script_index >= len(scripts):\r\n return None\r\n\r\n script = scripts[script_index]\r\n\r\n script_args = args[script.args_from:script.args_to]\r\n processed = script.run(p, *script_args)\r\n\r\n return processed\r\n", "path": "modules/scripts.py"}]} | 1,794 | 83 |
gh_patches_debug_18976 | rasdani/github-patches | git_diff | ros2__ros2_documentation-1163 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add the ROS distro version above the fold
With #1150 we have the new versioning tabs which are moderately small and compact, but that makes it hard to tell what version you're looking at. Having a subtitle or other content at the top to know what version you're looking at at the top would be valuable. It could likely go in as a subtitle under ros2_documentation or elsewhere near the top of the left menu.
</issue>
<code>
[start of conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # rosindex documentation build configuration file, created by
4 # sphinx-quickstart on Tue Oct 2 16:34:57 2018.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 # If extensions (or modules to document with autodoc) are in another directory,
16 # add these directories to sys.path here. If the directory is relative to the
17 # documentation root, use os.path.abspath to make it absolute, like shown here.
18 #
19
20 import itertools
21 import os
22 import sys
23 import time
24
25 from docutils.parsers.rst import Directive
26
27 sys.path.append(os.path.abspath('./sphinx-multiversion'))
28
29
30 # The suffix(es) of source filenames.
31 # You can specify multiple suffix as a list of string:
32 #
33 # The master toctree document.
34 master_doc = 'index'
35
36 # The default role
37 default_role = 'any'
38
39 # The set of warnings to suppress.
40 suppress_warnings = ['image.nonlocal_uri']
41
42 # General information about the project.
43 project = 'ros2 documentation'
44 author = 'Open Robotics'
45 copyright = '{}, {}'.format(time.strftime('%Y'), author)
46
47 # The version info for the project you're documenting, acts as replacement for
48 # |version| and |release|, also used in various other places throughout the
49 # built documents.
50 #
51 # The short X.Y version.
52 version = u''
53 # The full version, including alpha/beta/rc tags.
54 release = u''
55
56 # Define the default role to use for links
57 default_role = 'any'
58
59 # The language for content autogenerated by Sphinx. Refer to documentation
60 # for a list of supported languages.
61 #
62 # This is also used if you do content translation via gettext catalogs.
63 # Usually you set "language" from the command line for these cases.
64 language = None
65
66 # List of patterns, relative to source directory, that match files and
67 # directories to ignore when looking for source files.
68 # This patterns also effect to html_static_path and html_extra_path
69 exclude_patterns = ['**/_*.rst']
70
71 # The name of the Pygments (syntax highlighting) style to use.
72 pygments_style = 'sphinx'
73
74 # Add any Sphinx extension module names here, as strings. They can be
75 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
76 extensions = ['sphinx.ext.intersphinx', 'sphinx_tabs.tabs', 'sphinx_multiversion', 'sphinx_rtd_theme']
77
78 # Intersphinx mapping
79
80 intersphinx_mapping = {
81 'catkin_pkg': ('http://docs.ros.org/en/independent/api/catkin_pkg/html', None),
82 'jenkins_tools': ('http://docs.ros.org/en/independent/api/jenkins_tools/html', None),
83 'rosdep': ('http://docs.ros.org/en/independent/api/rosdep/html', None),
84 'rosdistro': ('http://docs.ros.org/en/independent/api/rosdistro/html', None),
85 'rosinstall': ('http://docs.ros.org/en/independent/api/rosinstall/html', None),
86 'rospkg': ('http://docs.ros.org/en/independent/api/rospkg/html', None),
87 'vcstools': ('http://docs.ros.org/en/independent/api/vcstools/html', None)
88 }
89
90 # -- Options for HTML output ----------------------------------------------
91
92 # The theme to use for HTML and HTML Help pages. See the documentation for
93 # a list of builtin themes.
94 #
95 html_theme = 'sphinx_rtd_theme'
96 html_theme_options = {
97 'collapse_navigation': False,
98 'sticky_navigation': True,
99 'navigation_depth': -1,
100 }
101
102 html_context = {
103 'display_github': True,
104 'github_user': 'ros2',
105 'github_repo': 'ros2_documentation',
106 'github_version': 'rolling/source/', # Will be overridden when building multiversion
107 }
108
109 templates_path = [
110 "source/_templates",
111 ]
112
113 # smv_tag_whitelist = None
114
115 smv_branch_whitelist = r'^(rolling|foxy|eloquent|dashing|crystal)$'
116
117
118 smv_released_pattern = r'^refs/(heads|remotes/[^/]+)/(foxy|eloquent|dashing|crystal).*$'
119 smv_remote_whitelist = r'^(origin)$'
120 smv_latest_version = 'foxy'
121
122
123
124 html_favicon = 'favicon.ico'
125
126 # Add any paths that contain custom static files (such as style sheets) here,
127 # relative to this directory. They are copied after the builtin static files,
128 # so a file named "default.css" will overwrite the builtin "default.css".
129 # html_static_path = ['_static']
130
131 # Drop any source link suffix
132 html_sourcelink_suffix = ''
133
134 # -- Options for HTMLHelp output ------------------------------------------
135
136 # Output file base name for HTML help builder.
137 htmlhelp_basename = 'ros2_docsdoc'
138
139 html_baseurl = 'https://docs.ros.org/en'
140
141 class RedirectFrom(Directive):
142
143 has_content = True
144 template_name = 'layout.html'
145 redirections = {}
146
147 @classmethod
148 def register(cls, app):
149 app.connect('html-collect-pages', cls.generate)
150 app.add_directive('redirect-from', cls)
151 return app
152
153 @classmethod
154 def generate(cls, app):
155 from sphinx.builders.html import StandaloneHTMLBuilder
156 if not isinstance(app.builder, StandaloneHTMLBuilder):
157 return
158
159 redirect_html_fragment = """
160 <link rel="canonical" href="{base_url}/{url}" />
161 <meta http-equiv="refresh" content="0; url={url}" />
162 <script>
163 window.location.href = '{url}';
164 </script>
165 """
166 redirections = {
167 os.path.splitext(os.path.relpath(
168 document_path, app.srcdir
169 ))[0]: redirect_urls
170 for document_path, redirect_urls in cls.redirections.items()
171 }
172 redirection_conflict = next((
173 (canon_1, canon_2, redirs_1.intersection(redirs_2))
174 for (canon_1, redirs_1), (canon_2, redirs_2)
175 in itertools.combinations(redirections.items(), 2)
176 if redirs_1.intersection(redirs_2)
177 ), None)
178 if redirection_conflict:
179 canonical_url_1, canonical_url_2 = redirection_conflict[:2]
180 conflicting_redirect_urls = redirection_conflict[-1]
181 raise RuntimeError(
182 'Documents {} and {} define conflicting redirects: {}'.format(
183 canonical_url_1, canonical_url_2, conflicting_redirect_urls
184 )
185 )
186 all_canonical_urls = set(redirections.keys())
187 all_redirect_urls = {
188 redirect_url
189 for redirect_urls in redirections.values()
190 for redirect_url in redirect_urls
191 }
192 conflicting_urls = all_canonical_urls.intersection(all_redirect_urls)
193 if conflicting_urls:
194 raise RuntimeError(
195 'Some redirects conflict with existing documents: {}'.format(
196 conflicting_urls
197 )
198 )
199
200 for canonical_url, redirect_urls in redirections.items():
201 for redirect_url in redirect_urls:
202 context = {
203 'canonical_url': os.path.relpath(
204 canonical_url, redirect_url
205 ),
206 'title': os.path.basename(redirect_url),
207 'metatags': redirect_html_fragment.format(
208 base_url=app.config.html_baseurl,
209 url=app.builder.get_relative_uri(
210 redirect_url, canonical_url
211 )
212 )
213 }
214 yield (redirect_url, context, cls.template_name)
215
216 def run(self):
217 document_path = self.state.document.current_source
218 if document_path not in RedirectFrom.redirections:
219 RedirectFrom.redirections[document_path] = set()
220 RedirectFrom.redirections[document_path].update(self.content)
221 return []
222
223
224 def make_router(origin, destination):
225 def _missing_reference(app, env, node, contnode):
226 from docutils import nodes
227 from docutils.utils import relative_path
228 from sphinx.util import docname_join
229
230 doctarget = docname_join(node['refdoc'], node['reftarget'])
231 if doctarget.startswith(origin):
232 routed_doctarget = doctarget.replace(origin, destination)
233 if routed_doctarget in env.all_docs:
234 newnode = nodes.reference(
235 '', contnode.astext(), internal=True
236 )
237 newnode['refuri'] = app.builder.get_relative_uri(
238 node['refdoc'], routed_doctarget
239 )
240 return newnode
241 return _missing_reference
242
243 def smv_rewrite_baseurl(app, config):
244 # When using Sphinx multiversion, there is no way at initial configuration time
245 # to determine the distribution we are currently targeting (conf.py is read before
246 # external defines are setup, and environment variables aren't passed through to
247 # conf.py). Instead, hook into the 'config-inited' event which is late enough
248 # to rewrite the html_baseurl with the current version.
249 if app.config.smv_current_version != '':
250 app.config.html_baseurl = app.config.html_baseurl + '/' + app.config.smv_current_version
251
252 def github_link_rewrite_branch(app, pagename, templatename, context, doctree):
253 if app.config.smv_current_version != '':
254 context['github_version'] = app.config.smv_current_version + '/source/'
255
256 def setup(app):
257 app.connect('config-inited', smv_rewrite_baseurl)
258 app.connect('html-page-context', github_link_rewrite_branch)
259 RedirectFrom.register(app)
260
[end of conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conf.py b/conf.py
--- a/conf.py
+++ b/conf.py
@@ -40,7 +40,7 @@
suppress_warnings = ['image.nonlocal_uri']
# General information about the project.
-project = 'ros2 documentation'
+project = 'ROS 2 documentation'
author = 'Open Robotics'
copyright = '{}, {}'.format(time.strftime('%Y'), author)
@@ -248,6 +248,7 @@
# to rewrite the html_baseurl with the current version.
if app.config.smv_current_version != '':
app.config.html_baseurl = app.config.html_baseurl + '/' + app.config.smv_current_version
+ app.config.project = 'ROS 2 Documentation: ' + app.config.smv_current_version.title()
def github_link_rewrite_branch(app, pagename, templatename, context, doctree):
if app.config.smv_current_version != '':
| {"golden_diff": "diff --git a/conf.py b/conf.py\n--- a/conf.py\n+++ b/conf.py\n@@ -40,7 +40,7 @@\n suppress_warnings = ['image.nonlocal_uri']\n \n # General information about the project.\n-project = 'ros2 documentation'\n+project = 'ROS 2 documentation'\n author = 'Open Robotics'\n copyright = '{}, {}'.format(time.strftime('%Y'), author)\n \n@@ -248,6 +248,7 @@\n # to rewrite the html_baseurl with the current version.\n if app.config.smv_current_version != '':\n app.config.html_baseurl = app.config.html_baseurl + '/' + app.config.smv_current_version\n+ app.config.project = 'ROS 2 Documentation: ' + app.config.smv_current_version.title()\n \n def github_link_rewrite_branch(app, pagename, templatename, context, doctree):\n if app.config.smv_current_version != '':\n", "issue": "Add the ROS distro version above the fold\nWith #1150 we have the new versioning tabs which are moderately small and compact, but that makes it hard to tell what version you're looking at. Having a subtitle or other content at the top to know what version you're looking at at the top would be valuable. It could likely go in as a subtitle under ros2_documentation or elsewhere near the top of the left menu.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# rosindex documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 2 16:34:57 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\nimport itertools\nimport os\nimport sys\nimport time\n\nfrom docutils.parsers.rst import Directive\n\nsys.path.append(os.path.abspath('./sphinx-multiversion'))\n\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# The master toctree document.\nmaster_doc = 'index'\n\n# The default role\ndefault_role = 'any'\n\n# The set of warnings to suppress.\nsuppress_warnings = ['image.nonlocal_uri']\n\n# General information about the project.\nproject = 'ros2 documentation'\nauthor = 'Open Robotics'\ncopyright = '{}, {}'.format(time.strftime('%Y'), author)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u''\n# The full version, including alpha/beta/rc tags.\nrelease = u''\n\n# Define the default role to use for links\ndefault_role = 'any'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['**/_*.rst']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\nextensions = ['sphinx.ext.intersphinx', 'sphinx_tabs.tabs', 'sphinx_multiversion', 'sphinx_rtd_theme']\n\n# Intersphinx mapping\n\nintersphinx_mapping = {\n 'catkin_pkg': ('http://docs.ros.org/en/independent/api/catkin_pkg/html', None),\n 'jenkins_tools': ('http://docs.ros.org/en/independent/api/jenkins_tools/html', None),\n 'rosdep': ('http://docs.ros.org/en/independent/api/rosdep/html', None),\n 'rosdistro': ('http://docs.ros.org/en/independent/api/rosdistro/html', None),\n 'rosinstall': ('http://docs.ros.org/en/independent/api/rosinstall/html', None),\n 'rospkg': ('http://docs.ros.org/en/independent/api/rospkg/html', None),\n 'vcstools': ('http://docs.ros.org/en/independent/api/vcstools/html', None)\n}\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_options = {\n 'collapse_navigation': False,\n 'sticky_navigation': True,\n 'navigation_depth': -1,\n}\n\nhtml_context = {\n 'display_github': True,\n 'github_user': 'ros2',\n 'github_repo': 'ros2_documentation',\n 'github_version': 'rolling/source/', # Will be overridden when building multiversion\n}\n\ntemplates_path = [\n \"source/_templates\",\n]\n\n# smv_tag_whitelist = None\n\nsmv_branch_whitelist = r'^(rolling|foxy|eloquent|dashing|crystal)$'\n\n\nsmv_released_pattern = r'^refs/(heads|remotes/[^/]+)/(foxy|eloquent|dashing|crystal).*$'\nsmv_remote_whitelist = r'^(origin)$'\nsmv_latest_version = 'foxy'\n\n\n\nhtml_favicon = 'favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Drop any source link suffix\nhtml_sourcelink_suffix = ''\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ros2_docsdoc'\n\nhtml_baseurl = 'https://docs.ros.org/en'\n\nclass RedirectFrom(Directive):\n\n has_content = True\n template_name = 'layout.html'\n redirections = {}\n\n @classmethod\n def register(cls, app):\n app.connect('html-collect-pages', cls.generate)\n app.add_directive('redirect-from', cls)\n return app\n\n @classmethod\n def generate(cls, app):\n from sphinx.builders.html import StandaloneHTMLBuilder\n if not isinstance(app.builder, StandaloneHTMLBuilder):\n return\n\n redirect_html_fragment = \"\"\"\n <link rel=\"canonical\" href=\"{base_url}/{url}\" />\n <meta http-equiv=\"refresh\" content=\"0; url={url}\" />\n <script>\n window.location.href = '{url}';\n </script>\n \"\"\"\n redirections = {\n os.path.splitext(os.path.relpath(\n document_path, app.srcdir\n ))[0]: redirect_urls\n for document_path, redirect_urls in cls.redirections.items()\n }\n redirection_conflict = next((\n (canon_1, canon_2, redirs_1.intersection(redirs_2))\n for (canon_1, redirs_1), (canon_2, redirs_2)\n in itertools.combinations(redirections.items(), 2)\n if redirs_1.intersection(redirs_2)\n ), None)\n if redirection_conflict:\n canonical_url_1, canonical_url_2 = redirection_conflict[:2]\n conflicting_redirect_urls = redirection_conflict[-1]\n raise RuntimeError(\n 'Documents {} and {} define conflicting redirects: {}'.format(\n canonical_url_1, canonical_url_2, conflicting_redirect_urls\n )\n )\n all_canonical_urls = set(redirections.keys())\n all_redirect_urls = {\n redirect_url\n for redirect_urls in redirections.values()\n for redirect_url in redirect_urls\n }\n conflicting_urls = all_canonical_urls.intersection(all_redirect_urls)\n if conflicting_urls:\n raise RuntimeError(\n 'Some redirects conflict with existing documents: {}'.format(\n conflicting_urls\n )\n )\n\n for canonical_url, redirect_urls in redirections.items():\n for redirect_url in redirect_urls:\n context = {\n 'canonical_url': os.path.relpath(\n canonical_url, redirect_url\n ),\n 'title': os.path.basename(redirect_url),\n 'metatags': redirect_html_fragment.format(\n base_url=app.config.html_baseurl,\n url=app.builder.get_relative_uri(\n redirect_url, canonical_url\n )\n )\n }\n yield (redirect_url, context, cls.template_name)\n\n def run(self):\n document_path = self.state.document.current_source\n if document_path not in RedirectFrom.redirections:\n RedirectFrom.redirections[document_path] = set()\n RedirectFrom.redirections[document_path].update(self.content)\n return []\n\n\ndef make_router(origin, destination):\n def _missing_reference(app, env, node, contnode):\n from docutils import nodes\n from docutils.utils import relative_path\n from sphinx.util import docname_join\n\n doctarget = docname_join(node['refdoc'], node['reftarget'])\n if doctarget.startswith(origin):\n routed_doctarget = doctarget.replace(origin, destination)\n if routed_doctarget in env.all_docs:\n newnode = nodes.reference(\n '', contnode.astext(), internal=True\n )\n newnode['refuri'] = app.builder.get_relative_uri(\n node['refdoc'], routed_doctarget\n )\n return newnode\n return _missing_reference\n\ndef smv_rewrite_baseurl(app, config):\n # When using Sphinx multiversion, there is no way at initial configuration time\n # to determine the distribution we are currently targeting (conf.py is read before\n # external defines are setup, and environment variables aren't passed through to\n # conf.py). Instead, hook into the 'config-inited' event which is late enough\n # to rewrite the html_baseurl with the current version.\n if app.config.smv_current_version != '':\n app.config.html_baseurl = app.config.html_baseurl + '/' + app.config.smv_current_version\n\ndef github_link_rewrite_branch(app, pagename, templatename, context, doctree):\n if app.config.smv_current_version != '':\n context['github_version'] = app.config.smv_current_version + '/source/'\n\ndef setup(app):\n app.connect('config-inited', smv_rewrite_baseurl)\n app.connect('html-page-context', github_link_rewrite_branch)\n RedirectFrom.register(app)\n", "path": "conf.py"}]} | 3,405 | 197 |
gh_patches_debug_12710 | rasdani/github-patches | git_diff | autorope__donkeycar-273 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support WIFI network that does not have internet access
REF: https://github.com/wroscoe/donkey/blob/dev/donkeycar/util/web.py
The system determines its IP address using a ping to 8.8.8.8
This approach fails when the WIFI network does not have internet access.
</issue>
<code>
[start of donkeycar/util/web.py]
1 import socket
2
3 def get_ip_address():
4 ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1],
5 [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in
6 [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])
7 return ip
8
[end of donkeycar/util/web.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/donkeycar/util/web.py b/donkeycar/util/web.py
--- a/donkeycar/util/web.py
+++ b/donkeycar/util/web.py
@@ -1,7 +1,10 @@
import socket
def get_ip_address():
- ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1],
- [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in
- [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])
- return ip
+ try:
+ ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1],
+ [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in
+ [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])
+ return ip
+ except OSError: #occurs when cannot connect to '8.8.8.8'
+ return "127.0.0.1" #loopback
\ No newline at end of file
| {"golden_diff": "diff --git a/donkeycar/util/web.py b/donkeycar/util/web.py\n--- a/donkeycar/util/web.py\n+++ b/donkeycar/util/web.py\n@@ -1,7 +1,10 @@\n import socket\n \n def get_ip_address():\n- ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")][:1],\n- [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in\n- [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])\n- return ip\n+ try:\n+ ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")][:1],\n+ [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in\n+ [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])\n+ return ip\n+ except OSError: #occurs when cannot connect to '8.8.8.8' \n+ return \"127.0.0.1\" #loopback\n\\ No newline at end of file\n", "issue": "Support WIFI network that does not have internet access\nREF: https://github.com/wroscoe/donkey/blob/dev/donkeycar/util/web.py\r\n\r\nThe system determines its IP address using a ping to 8.8.8.8\r\nThis approach fails when the WIFI network does not have internet access.\r\n\r\n\r\n\n", "before_files": [{"content": "import socket\n\ndef get_ip_address():\n ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")][:1],\n [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in\n [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])\n return ip\n", "path": "donkeycar/util/web.py"}]} | 716 | 311 |
gh_patches_debug_26766 | rasdani/github-patches | git_diff | modin-project__modin-1045 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Capitalization of "S" in "S3://" results in inconsistent behaviors when reading from S3 path
<!--
General questions should be asked on the mailing list [email protected].
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux
- **Modin installed from (source or binary)**: binary
- **Modin version**: 0.7.0
- **Python version**: 3.6.8
- **Exact command to reproduce**:
<!--
You can obtain the Modin version with
python -c "import modin; print(modin.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
Reading data from a S3 path, e.g. `read_csv`, `read_json`, behaves differently based on the capitalization of "S" in the path. See below code example.
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
```
import pandas as pd
import ray
import modin.pandas as mpd
filepath = "s3://my-bucket/data/traffic.json"
filepath_2 = "s3://my-bucket/data/BikeSharingDaily.csv"
filepath_3 = "S3://my-bucket/data/BikeSharingDaily.csv"
# working
df_native = pd.read_json(filepath, lines=True)
df_native_2 = pd.read_csv(filepath_2)
# not working (FileNotFoundError: [Errno 2] No such file or directory: 's3://my-bucket/data/traffic.json')
df_modin = mpd.read_json(filepath, lines=True)
# working (but it prints, defaulting to pandas implementation)
df_modin_2 = mpd.read_csv(filepath_2)
# working (no additional print)
df_modin_3 = mpd.read_csv(filepath_3)
```
</issue>
<code>
[start of modin/engines/base/io/file_reader.py]
1 import os
2 import re
3
4 S3_ADDRESS_REGEX = re.compile("s3://(.*?)/(.*)")
5 NOT_IMPLEMENTED_MESSAGE = "Implement in children classes!"
6
7
8 class FileReader:
9 frame_cls = None
10 frame_partition_cls = None
11 query_compiler_cls = None
12
13 @classmethod
14 def get_path(cls, file_path):
15 if S3_ADDRESS_REGEX.search(file_path):
16 return file_path
17 else:
18 return os.path.abspath(file_path)
19
20 @classmethod
21 def file_open(cls, file_path, mode="rb", compression="infer"):
22 if isinstance(file_path, str):
23 match = S3_ADDRESS_REGEX.search(file_path)
24 if match:
25 import s3fs as S3FS
26 from botocore.exceptions import NoCredentialsError
27
28 s3fs = S3FS.S3FileSystem(anon=False)
29 try:
30 return s3fs.open(file_path)
31 except NoCredentialsError:
32 s3fs = S3FS.S3FileSystem(anon=True)
33 return s3fs.open(file_path)
34 elif compression == "gzip":
35 import gzip
36
37 return gzip.open(file_path, mode=mode)
38 elif compression == "bz2":
39 import bz2
40
41 return bz2.BZ2File(file_path, mode=mode)
42 elif compression == "xz":
43 import lzma
44
45 return lzma.LZMAFile(file_path, mode=mode)
46 elif compression == "zip":
47 import zipfile
48
49 zf = zipfile.ZipFile(file_path, mode=mode.replace("b", ""))
50 if zf.mode == "w":
51 return zf
52 elif zf.mode == "r":
53 zip_names = zf.namelist()
54 if len(zip_names) == 1:
55 f = zf.open(zip_names.pop())
56 return f
57 elif len(zip_names) == 0:
58 raise ValueError(
59 "Zero files found in ZIP file {}".format(file_path)
60 )
61 else:
62 raise ValueError(
63 "Multiple files found in ZIP file."
64 " Only one file per ZIP: {}".format(zip_names)
65 )
66
67 return open(file_path, mode=mode)
68
69 @classmethod
70 def file_size(cls, f):
71 cur_pos = f.tell()
72 f.seek(0, os.SEEK_END)
73 size = f.tell()
74 f.seek(cur_pos, os.SEEK_SET)
75 return size
76
77 @classmethod
78 def file_exists(cls, file_path):
79 if isinstance(file_path, str):
80 match = S3_ADDRESS_REGEX.search(file_path)
81 if match:
82 import s3fs as S3FS
83 from botocore.exceptions import NoCredentialsError
84
85 s3fs = S3FS.S3FileSystem(anon=False)
86 exists = False
87 try:
88 exists = s3fs.exists(file_path) or exists
89 except NoCredentialsError:
90 pass
91 s3fs = S3FS.S3FileSystem(anon=True)
92 return exists or s3fs.exists(file_path)
93 return os.path.exists(file_path)
94
95 @classmethod
96 def deploy(cls, func, args, num_return_vals):
97 raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
98
99 def parse(self, func, args, num_return_vals):
100 raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
101
102 @classmethod
103 def materialize(cls, obj_id):
104 raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
105
[end of modin/engines/base/io/file_reader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/engines/base/io/file_reader.py b/modin/engines/base/io/file_reader.py
--- a/modin/engines/base/io/file_reader.py
+++ b/modin/engines/base/io/file_reader.py
@@ -1,7 +1,7 @@
import os
import re
-S3_ADDRESS_REGEX = re.compile("s3://(.*?)/(.*)")
+S3_ADDRESS_REGEX = re.compile("[sS]3://(.*?)/(.*)")
NOT_IMPLEMENTED_MESSAGE = "Implement in children classes!"
@@ -21,7 +21,9 @@
def file_open(cls, file_path, mode="rb", compression="infer"):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
- if match:
+ if match is not None:
+ if file_path[0] == "S":
+ file_path = "{}{}".format("s", file_path[1:])
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
@@ -78,7 +80,9 @@
def file_exists(cls, file_path):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
- if match:
+ if match is not None:
+ if file_path[0] == "S":
+ file_path = "{}{}".format("s", file_path[1:])
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
| {"golden_diff": "diff --git a/modin/engines/base/io/file_reader.py b/modin/engines/base/io/file_reader.py\n--- a/modin/engines/base/io/file_reader.py\n+++ b/modin/engines/base/io/file_reader.py\n@@ -1,7 +1,7 @@\n import os\n import re\n \n-S3_ADDRESS_REGEX = re.compile(\"s3://(.*?)/(.*)\")\n+S3_ADDRESS_REGEX = re.compile(\"[sS]3://(.*?)/(.*)\")\n NOT_IMPLEMENTED_MESSAGE = \"Implement in children classes!\"\n \n \n@@ -21,7 +21,9 @@\n def file_open(cls, file_path, mode=\"rb\", compression=\"infer\"):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n- if match:\n+ if match is not None:\n+ if file_path[0] == \"S\":\n+ file_path = \"{}{}\".format(\"s\", file_path[1:])\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n \n@@ -78,7 +80,9 @@\n def file_exists(cls, file_path):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n- if match:\n+ if match is not None:\n+ if file_path[0] == \"S\":\n+ file_path = \"{}{}\".format(\"s\", file_path[1:])\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n", "issue": "Capitalization of \"S\" in \"S3://\" results in inconsistent behaviors when reading from S3 path\n<!--\r\nGeneral questions should be asked on the mailing list [email protected].\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux\r\n- **Modin installed from (source or binary)**: binary\r\n- **Modin version**: 0.7.0\r\n- **Python version**: 3.6.8\r\n- **Exact command to reproduce**: \r\n\r\n<!--\r\nYou can obtain the Modin version with\r\n\r\npython -c \"import modin; print(modin.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nReading data from a S3 path, e.g. `read_csv`, `read_json`, behaves differently based on the capitalization of \"S\" in the path. See below code example.\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n```\r\nimport pandas as pd\r\nimport ray\r\nimport modin.pandas as mpd\r\n\r\nfilepath = \"s3://my-bucket/data/traffic.json\"\r\nfilepath_2 = \"s3://my-bucket/data/BikeSharingDaily.csv\"\r\nfilepath_3 = \"S3://my-bucket/data/BikeSharingDaily.csv\"\r\n\r\n# working\r\ndf_native = pd.read_json(filepath, lines=True)\r\ndf_native_2 = pd.read_csv(filepath_2)\r\n\r\n# not working (FileNotFoundError: [Errno 2] No such file or directory: 's3://my-bucket/data/traffic.json')\r\ndf_modin = mpd.read_json(filepath, lines=True)\r\n\r\n# working (but it prints, defaulting to pandas implementation)\r\ndf_modin_2 = mpd.read_csv(filepath_2)\r\n\r\n# working (no additional print)\r\ndf_modin_3 = mpd.read_csv(filepath_3)\r\n```\n", "before_files": [{"content": "import os\nimport re\n\nS3_ADDRESS_REGEX = re.compile(\"s3://(.*?)/(.*)\")\nNOT_IMPLEMENTED_MESSAGE = \"Implement in children classes!\"\n\n\nclass FileReader:\n frame_cls = None\n frame_partition_cls = None\n query_compiler_cls = None\n\n @classmethod\n def get_path(cls, file_path):\n if S3_ADDRESS_REGEX.search(file_path):\n return file_path\n else:\n return os.path.abspath(file_path)\n\n @classmethod\n def file_open(cls, file_path, mode=\"rb\", compression=\"infer\"):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n if match:\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n\n s3fs = S3FS.S3FileSystem(anon=False)\n try:\n return s3fs.open(file_path)\n except NoCredentialsError:\n s3fs = S3FS.S3FileSystem(anon=True)\n return s3fs.open(file_path)\n elif compression == \"gzip\":\n import gzip\n\n return gzip.open(file_path, mode=mode)\n elif compression == \"bz2\":\n import bz2\n\n return bz2.BZ2File(file_path, mode=mode)\n elif compression == \"xz\":\n import lzma\n\n return lzma.LZMAFile(file_path, mode=mode)\n elif compression == \"zip\":\n import zipfile\n\n zf = zipfile.ZipFile(file_path, mode=mode.replace(\"b\", \"\"))\n if zf.mode == \"w\":\n return zf\n elif zf.mode == \"r\":\n zip_names = zf.namelist()\n if len(zip_names) == 1:\n f = zf.open(zip_names.pop())\n return f\n elif len(zip_names) == 0:\n raise ValueError(\n \"Zero files found in ZIP file {}\".format(file_path)\n )\n else:\n raise ValueError(\n \"Multiple files found in ZIP file.\"\n \" Only one file per ZIP: {}\".format(zip_names)\n )\n\n return open(file_path, mode=mode)\n\n @classmethod\n def file_size(cls, f):\n cur_pos = f.tell()\n f.seek(0, os.SEEK_END)\n size = f.tell()\n f.seek(cur_pos, os.SEEK_SET)\n return size\n\n @classmethod\n def file_exists(cls, file_path):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n if match:\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n\n s3fs = S3FS.S3FileSystem(anon=False)\n exists = False\n try:\n exists = s3fs.exists(file_path) or exists\n except NoCredentialsError:\n pass\n s3fs = S3FS.S3FileSystem(anon=True)\n return exists or s3fs.exists(file_path)\n return os.path.exists(file_path)\n\n @classmethod\n def deploy(cls, func, args, num_return_vals):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)\n\n def parse(self, func, args, num_return_vals):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)\n\n @classmethod\n def materialize(cls, obj_id):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)\n", "path": "modin/engines/base/io/file_reader.py"}]} | 1,931 | 330 |
gh_patches_debug_35876 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-127 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add testing agains each feat PT version
## 🚀 Feature
Add a conda setup for testing against all PyTorch feature releases such as 1.4, 1.5, 1.6, ...
### Motivation
have better validation if some functions are not supported in old PT versions
### Pitch
<!-- A clear and concise description of what you want to happen. -->
### Alternatives
use CI action with conda setup, probably no need for pull large docker image
### Additional context
take inspiration from past Conda matrix in PL
</issue>
<code>
[start of integrations/lightning_models.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import torch
15 from pytorch_lightning import LightningModule
16 from torch.utils.data import Dataset
17
18
19 class RandomDictStringDataset(Dataset):
20
21 def __init__(self, size, length):
22 self.len = length
23 self.data = torch.randn(length, size)
24
25 def __getitem__(self, index):
26 return {"id": str(index), "x": self.data[index]}
27
28 def __len__(self):
29 return self.len
30
31
32 class RandomDataset(Dataset):
33
34 def __init__(self, size, length):
35 self.len = length
36 self.data = torch.randn(length, size)
37
38 def __getitem__(self, index):
39 return self.data[index]
40
41 def __len__(self):
42 return self.len
43
44
45 class BoringModel(LightningModule):
46
47 def __init__(self):
48 """
49 Testing PL Module
50
51 Use as follows:
52 - subclass
53 - modify the behavior for what you want
54
55 class TestModel(BaseTestModel):
56 def training_step(...):
57 # do your own thing
58
59 or:
60
61 model = BaseTestModel()
62 model.training_epoch_end = None
63
64 """
65 super().__init__()
66 self.layer = torch.nn.Linear(32, 2)
67
68 def forward(self, x):
69 return self.layer(x)
70
71 @staticmethod
72 def loss(_, prediction):
73 # An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
74 return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
75
76 def step(self, x):
77 x = self(x)
78 out = torch.nn.functional.mse_loss(x, torch.ones_like(x))
79 return out
80
81 def training_step(self, batch, batch_idx):
82 output = self.layer(batch)
83 loss = self.loss(batch, output)
84 return {"loss": loss}
85
86 def training_step_end(self, training_step_outputs):
87 return training_step_outputs
88
89 def training_epoch_end(self, outputs) -> None:
90 torch.stack([x["loss"] for x in outputs]).mean()
91
92 def validation_step(self, batch, batch_idx):
93 output = self.layer(batch)
94 loss = self.loss(batch, output)
95 return {"x": loss}
96
97 def validation_epoch_end(self, outputs) -> None:
98 torch.stack([x['x'] for x in outputs]).mean()
99
100 def test_step(self, batch, batch_idx):
101 output = self.layer(batch)
102 loss = self.loss(batch, output)
103 return {"y": loss}
104
105 def test_epoch_end(self, outputs) -> None:
106 torch.stack([x["y"] for x in outputs]).mean()
107
108 def configure_optimizers(self):
109 optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
110 lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
111 return [optimizer], [lr_scheduler]
112
113 def train_dataloader(self):
114 return torch.utils.data.DataLoader(RandomDataset(32, 64))
115
116 def val_dataloader(self):
117 return torch.utils.data.DataLoader(RandomDataset(32, 64))
118
119 def test_dataloader(self):
120 return torch.utils.data.DataLoader(RandomDataset(32, 64))
121
[end of integrations/lightning_models.py]
[start of torchmetrics/utilities/imports.py]
1 from distutils.version import LooseVersion
2
3 import torch
4
5 _TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion("1.4.0")
6 _TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion("1.5.0")
7 _TORCH_LOWER_1_6 = LooseVersion(torch.__version__) < LooseVersion("1.6.0")
8
[end of torchmetrics/utilities/imports.py]
[start of integrations/__init__.py]
[end of integrations/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/integrations/__init__.py b/integrations/__init__.py
--- a/integrations/__init__.py
+++ b/integrations/__init__.py
@@ -0,0 +1,3 @@
+from torchmetrics.utilities.imports import _module_available
+
+_PL_AVAILABLE = _module_available('pytorch_lightning')
diff --git a/integrations/lightning_models.py b/integrations/lightning_models.py
--- a/integrations/lightning_models.py
+++ b/integrations/lightning_models.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
import torch
from pytorch_lightning import LightningModule
from torch.utils.data import Dataset
diff --git a/torchmetrics/utilities/imports.py b/torchmetrics/utilities/imports.py
--- a/torchmetrics/utilities/imports.py
+++ b/torchmetrics/utilities/imports.py
@@ -1,6 +1,64 @@
+# Copyright The PyTorch Lightning team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from distutils.version import LooseVersion
+from importlib import import_module
+from importlib.util import find_spec
import torch
+from pkg_resources import DistributionNotFound
+
+
+def _module_available(module_path: str) -> bool:
+ """
+ Check if a path is available in your environment
+
+ >>> _module_available('os')
+ True
+ >>> _module_available('bla.bla')
+ False
+ """
+ try:
+ return find_spec(module_path) is not None
+ except AttributeError:
+ # Python 3.6
+ return False
+ except ModuleNotFoundError:
+ # Python 3.7+
+ return False
+
+
+def _compare_version(package: str, op, version) -> bool:
+ """
+ Compare package version with some requirements
+
+ >>> import operator
+ >>> _compare_version("torch", operator.ge, "0.1")
+ True
+ """
+ try:
+ pkg = import_module(package)
+ except (ModuleNotFoundError, DistributionNotFound):
+ return False
+ try:
+ pkg_version = LooseVersion(pkg.__version__)
+ except AttributeError:
+ return False
+ if not (hasattr(pkg_version, "vstring") and hasattr(pkg_version, "version")):
+ # this is mock by sphinx, so it shall return True ro generate all summaries
+ return True
+ return op(pkg_version, LooseVersion(version))
+
_TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion("1.4.0")
_TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion("1.5.0")
| {"golden_diff": "diff --git a/integrations/__init__.py b/integrations/__init__.py\n--- a/integrations/__init__.py\n+++ b/integrations/__init__.py\n@@ -0,0 +1,3 @@\n+from torchmetrics.utilities.imports import _module_available\n+\n+_PL_AVAILABLE = _module_available('pytorch_lightning')\ndiff --git a/integrations/lightning_models.py b/integrations/lightning_models.py\n--- a/integrations/lightning_models.py\n+++ b/integrations/lightning_models.py\n@@ -11,6 +11,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+\n import torch\n from pytorch_lightning import LightningModule\n from torch.utils.data import Dataset\ndiff --git a/torchmetrics/utilities/imports.py b/torchmetrics/utilities/imports.py\n--- a/torchmetrics/utilities/imports.py\n+++ b/torchmetrics/utilities/imports.py\n@@ -1,6 +1,64 @@\n+# Copyright The PyTorch Lightning team.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n from distutils.version import LooseVersion\n+from importlib import import_module\n+from importlib.util import find_spec\n \n import torch\n+from pkg_resources import DistributionNotFound\n+\n+\n+def _module_available(module_path: str) -> bool:\n+ \"\"\"\n+ Check if a path is available in your environment\n+\n+ >>> _module_available('os')\n+ True\n+ >>> _module_available('bla.bla')\n+ False\n+ \"\"\"\n+ try:\n+ return find_spec(module_path) is not None\n+ except AttributeError:\n+ # Python 3.6\n+ return False\n+ except ModuleNotFoundError:\n+ # Python 3.7+\n+ return False\n+\n+\n+def _compare_version(package: str, op, version) -> bool:\n+ \"\"\"\n+ Compare package version with some requirements\n+\n+ >>> import operator\n+ >>> _compare_version(\"torch\", operator.ge, \"0.1\")\n+ True\n+ \"\"\"\n+ try:\n+ pkg = import_module(package)\n+ except (ModuleNotFoundError, DistributionNotFound):\n+ return False\n+ try:\n+ pkg_version = LooseVersion(pkg.__version__)\n+ except AttributeError:\n+ return False\n+ if not (hasattr(pkg_version, \"vstring\") and hasattr(pkg_version, \"version\")):\n+ # this is mock by sphinx, so it shall return True ro generate all summaries\n+ return True\n+ return op(pkg_version, LooseVersion(version))\n+\n \n _TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion(\"1.4.0\")\n _TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion(\"1.5.0\")\n", "issue": "Add testing agains each feat PT version\n## \ud83d\ude80 Feature\r\n\r\nAdd a conda setup for testing against all PyTorch feature releases such as 1.4, 1.5, 1.6, ...\r\n\r\n### Motivation\r\n\r\nhave better validation if some functions are not supported in old PT versions\r\n\r\n### Pitch\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n### Alternatives\r\n\r\nuse CI action with conda setup, probably no need for pull large docker image\r\n\r\n### Additional context\r\n\r\ntake inspiration from past Conda matrix in PL\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport torch\nfrom pytorch_lightning import LightningModule\nfrom torch.utils.data import Dataset\n\n\nclass RandomDictStringDataset(Dataset):\n\n def __init__(self, size, length):\n self.len = length\n self.data = torch.randn(length, size)\n\n def __getitem__(self, index):\n return {\"id\": str(index), \"x\": self.data[index]}\n\n def __len__(self):\n return self.len\n\n\nclass RandomDataset(Dataset):\n\n def __init__(self, size, length):\n self.len = length\n self.data = torch.randn(length, size)\n\n def __getitem__(self, index):\n return self.data[index]\n\n def __len__(self):\n return self.len\n\n\nclass BoringModel(LightningModule):\n\n def __init__(self):\n \"\"\"\n Testing PL Module\n\n Use as follows:\n - subclass\n - modify the behavior for what you want\n\n class TestModel(BaseTestModel):\n def training_step(...):\n # do your own thing\n\n or:\n\n model = BaseTestModel()\n model.training_epoch_end = None\n\n \"\"\"\n super().__init__()\n self.layer = torch.nn.Linear(32, 2)\n\n def forward(self, x):\n return self.layer(x)\n\n @staticmethod\n def loss(_, prediction):\n # An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls\n return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))\n\n def step(self, x):\n x = self(x)\n out = torch.nn.functional.mse_loss(x, torch.ones_like(x))\n return out\n\n def training_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"loss\": loss}\n\n def training_step_end(self, training_step_outputs):\n return training_step_outputs\n\n def training_epoch_end(self, outputs) -> None:\n torch.stack([x[\"loss\"] for x in outputs]).mean()\n\n def validation_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"x\": loss}\n\n def validation_epoch_end(self, outputs) -> None:\n torch.stack([x['x'] for x in outputs]).mean()\n\n def test_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"y\": loss}\n\n def test_epoch_end(self, outputs) -> None:\n torch.stack([x[\"y\"] for x in outputs]).mean()\n\n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)\n return [optimizer], [lr_scheduler]\n\n def train_dataloader(self):\n return torch.utils.data.DataLoader(RandomDataset(32, 64))\n\n def val_dataloader(self):\n return torch.utils.data.DataLoader(RandomDataset(32, 64))\n\n def test_dataloader(self):\n return torch.utils.data.DataLoader(RandomDataset(32, 64))\n", "path": "integrations/lightning_models.py"}, {"content": "from distutils.version import LooseVersion\n\nimport torch\n\n_TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion(\"1.4.0\")\n_TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion(\"1.5.0\")\n_TORCH_LOWER_1_6 = LooseVersion(torch.__version__) < LooseVersion(\"1.6.0\")\n", "path": "torchmetrics/utilities/imports.py"}, {"content": "", "path": "integrations/__init__.py"}]} | 1,875 | 744 |
gh_patches_debug_26374 | rasdani/github-patches | git_diff | nextcloud__appstore-697 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
App details are not updated to the latest upload if only pre-releases or nightlies exist
Hi,
I've put a new app on the nextcloud app repo, with what seems to be a correct info.xml, but it doesn't take description categories and screenshot into account,
see there
https://apps.nextcloud.com/apps/carnet
Any ideas why ? Thanks !
App details are not updated to the latest upload if only pre-releases or nightlies exist
Hi,
I've put a new app on the nextcloud app repo, with what seems to be a correct info.xml, but it doesn't take description categories and screenshot into account,
see there
https://apps.nextcloud.com/apps/carnet
Any ideas why ? Thanks !
</issue>
<code>
[start of nextcloudappstore/api/v1/release/importer.py]
1 from typing import Dict, Any, Set, Tuple # type: ignore
2
3 from django.utils import timezone
4 from django.conf import settings # type: ignore
5 from semantic_version import Version # type: ignore
6
7 from nextcloudappstore.core.facades import any_match
8 from nextcloudappstore.core.models import App, Screenshot, Category, \
9 AppRelease, ShellCommand, License, Database, DatabaseDependency, \
10 PhpExtensionDependency, PhpExtension, AppAuthor
11 from nextcloudappstore.core.versioning import to_spec, to_raw_spec
12
13
14 def none_to_empty_string(value: str) -> str:
15 if value is None:
16 return ''
17 else:
18 return value.strip()
19
20
21 class Importer:
22 def __init__(self, importers: Dict[str, 'Importer'],
23 ignored_fields: Set[str]) -> None:
24 self.importers = importers
25 self.ignored_fields = ignored_fields
26
27 def import_data(self, key: str, value: Any, obj: Any) -> None:
28 obj = self._get_object(key, value, obj)
29 value, obj = self._before_import(key, value, obj)
30 for key, val in value.items():
31 if key not in self.ignored_fields:
32 self.importers[key].import_data(key, val, obj)
33 obj.save()
34
35 def _get_object(self, key: str, value: Any, obj: Any) -> Any:
36 raise NotImplementedError
37
38 def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,
39 Any]:
40 raise NotImplementedError
41
42
43 class ScalarImporter(Importer):
44 def __init__(self) -> None:
45 super().__init__({}, set())
46
47
48 class PhpExtensionImporter(ScalarImporter):
49 def import_data(self, key: str, value: Any, obj: Any) -> None:
50 for ext in value:
51 version_spec = to_spec(ext['php_extension']['min_version'],
52 ext['php_extension']['max_version'])
53 raw_version_spec = to_raw_spec(
54 ext['php_extension']['raw_min_version'],
55 ext['php_extension']['raw_max_version'])
56 extension, created = PhpExtension.objects.get_or_create(
57 id=ext['php_extension']['id'])
58 PhpExtensionDependency.objects.create(
59 version_spec=version_spec,
60 raw_version_spec=raw_version_spec,
61 app_release=obj, php_extension=extension,
62 )
63
64
65 class DatabaseImporter(ScalarImporter):
66 def import_data(self, key: str, value: Any, obj: Any) -> None:
67 for db in value:
68 version_spec = to_spec(db['database']['min_version'],
69 db['database']['max_version'])
70 raw_version_spec = to_raw_spec(db['database']['raw_min_version'],
71 db['database']['raw_max_version'])
72 # all dbs should be known already
73 database = Database.objects.get(id=db['database']['id'])
74 DatabaseDependency.objects.create(
75 version_spec=version_spec,
76 raw_version_spec=raw_version_spec,
77 app_release=obj, database=database,
78 )
79
80
81 class LicenseImporter(ScalarImporter):
82 def import_data(self, key: str, value: Any, obj: Any) -> None:
83 def map_models(data: Dict) -> License:
84 id = data['license']['id']
85 model, created = License.objects.get_or_create(id=id)
86 return model
87
88 obj.licenses.set(list(map(map_models, value)))
89
90
91 class ShellCommandImporter(ScalarImporter):
92 def import_data(self, key: str, value: Any, obj: Any) -> None:
93 def map_commands(data: Dict) -> ShellCommand:
94 name = data['shell_command']['name']
95 command, created = ShellCommand.objects.get_or_create(name=name)
96 return command
97
98 obj.shell_commands.set(list(map(map_commands, value)))
99
100
101 class AuthorImporter(ScalarImporter):
102 def import_data(self, key: str, value: Any, obj: Any) -> None:
103 def map_authors(data: Dict) -> AppAuthor:
104 author = data['author']
105 return AppAuthor.objects.create(
106 name=author['name'],
107 mail=none_to_empty_string(author['mail']),
108 homepage=none_to_empty_string(author['homepage'])
109 )
110
111 obj.authors.set(list(map(map_authors, value)))
112
113
114 class DefaultAttributeImporter(ScalarImporter):
115 def import_data(self, key: str, value: Any, obj: Any) -> None:
116 setattr(obj, key, value)
117
118
119 class StringAttributeImporter(ScalarImporter):
120 def import_data(self, key: str, value: Any, obj: Any) -> None:
121 setattr(obj, key, none_to_empty_string(value))
122
123
124 class ScreenshotsImporter(ScalarImporter):
125 def import_data(self, key: str, value: Any, obj: Any) -> None:
126 def create_screenshot(img: Dict[str, str]) -> Screenshot:
127 return Screenshot.objects.create(
128 url=img['url'], app=obj, ordering=img['ordering'],
129 small_thumbnail=none_to_empty_string(img['small_thumbnail']))
130
131 shots = map(lambda val: create_screenshot(val['screenshot']), value)
132 obj.screenshots.set(list(shots))
133
134
135 class CategoryImporter(ScalarImporter):
136 def import_data(self, key: str, value: Any, obj: Any) -> None:
137 def map_categories(cat: Dict) -> Category:
138 id = cat['category']['id']
139 category, created = Category.objects.get_or_create(id=id)
140 return category
141
142 obj.categories.set(list(map(map_categories, value)))
143
144
145 class L10NImporter(ScalarImporter):
146 def import_data(self, key: str, value: Any, obj: Any) -> None:
147 for lang, translation in value.items():
148 obj.set_current_language(lang)
149 setattr(obj, key, translation.strip())
150 obj.save()
151
152
153 class AppReleaseImporter(Importer):
154 def __init__(self, php_extension_importer: PhpExtensionImporter,
155 database_importer: DatabaseImporter,
156 license_importer: LicenseImporter,
157 shell_command_importer: ShellCommandImporter,
158 string_attribute_importer: StringAttributeImporter,
159 default_attribute_importer: DefaultAttributeImporter,
160 l10n_importer: L10NImporter) -> None:
161 super().__init__({
162 'php_extensions': php_extension_importer,
163 'databases': database_importer,
164 'licenses': license_importer,
165 'php_version_spec': string_attribute_importer,
166 'platform_version_spec': string_attribute_importer,
167 'raw_php_version_spec': string_attribute_importer,
168 'raw_platform_version_spec': string_attribute_importer,
169 'min_int_size': default_attribute_importer,
170 'shell_commands': shell_command_importer,
171 'signature': string_attribute_importer,
172 'download': string_attribute_importer,
173 'changelog': l10n_importer,
174 'is_nightly': default_attribute_importer,
175 }, {
176 'version',
177 'raw_version',
178 'php_min_version',
179 'php_max_version',
180 'raw_php_min_version',
181 'raw_php_max_version',
182 'platform_min_version',
183 'platform_max_version',
184 'raw_platform_min_version',
185 'raw_platform_max_version',
186 })
187
188 def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,
189 Any]:
190 # combine versions into specs
191 value['platform_version_spec'] = to_spec(
192 value['platform_min_version'], value['platform_max_version'])
193 value['php_version_spec'] = to_spec(value['php_min_version'],
194 value['php_max_version'])
195 value['raw_platform_version_spec'] = to_raw_spec(
196 value['raw_platform_min_version'],
197 value['raw_platform_max_version'])
198 value['raw_php_version_spec'] = to_raw_spec(
199 value['raw_php_min_version'],
200 value['raw_php_max_version'])
201 obj.licenses.clear()
202 obj.shell_commands.clear()
203 obj.licenses.clear()
204 obj.php_extensions.clear()
205 obj.databases.clear()
206 obj.signature_digest = settings.CERTIFICATE_DIGEST
207 return value, obj
208
209 def _get_object(self, key: str, value: Any, obj: Any) -> Any:
210 release, created = AppRelease.objects.get_or_create(
211 version=value['version'], app=obj, is_nightly=value['is_nightly']
212 )
213 return release
214
215
216 class AppImporter(Importer):
217 def __init__(self, release_importer: AppReleaseImporter,
218 screenshots_importer: ScreenshotsImporter,
219 attribute_importer: StringAttributeImporter,
220 l10n_importer: L10NImporter,
221 category_importer: CategoryImporter,
222 author_importer: AuthorImporter,
223 default_attribute_importer: DefaultAttributeImporter) -> None:
224 super().__init__({
225 'release': release_importer,
226 'screenshots': screenshots_importer,
227 'user_docs': attribute_importer,
228 'admin_docs': attribute_importer,
229 'website': attribute_importer,
230 'discussion': attribute_importer,
231 'developer_docs': attribute_importer,
232 'issue_tracker': attribute_importer,
233 'certificate': attribute_importer,
234 'name': l10n_importer,
235 'summary': l10n_importer,
236 'description': l10n_importer,
237 'categories': category_importer,
238 'authors': author_importer
239 }, {'id'})
240
241 def _get_object(self, key: str, value: Any, obj: Any) -> Any:
242 # only update app if newest or equal to newest release
243 app, created = App.objects.get_or_create(pk=value['id'])
244 return app
245
246 def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,
247 Any]:
248 obj.last_release = timezone.now()
249
250 if 'is_nightly' not in value['release']:
251 value['release']['is_nightly'] = False
252 if value['release']['is_nightly']:
253 AppRelease.objects.filter(app__id=obj.id, is_nightly=True).delete()
254
255 # only new releases update an app's data
256 if self._should_update_everything(value):
257 # clear all relations
258 obj.screenshots.all().delete()
259 obj.authors.all().delete()
260 obj.categories.clear()
261 for translation in obj.translations.all():
262 translation.delete()
263 else:
264 value = {'id': value['id'], 'release': value['release']}
265
266 return value, obj
267
268 def _should_update_everything(self, value: Any) -> bool:
269 releases = AppRelease.objects.filter(app__id=value['id'])
270
271 # if its the first release it should always set the required initial
272 # data
273 if len(releases) == 0:
274 return True
275
276 current_version = value['release']['version']
277
278 # we do not care about nightlies here so it's fine to just use a
279 # normal semver
280 uploaded_version = Version(current_version)
281 is_prerelease = '-' in current_version
282 is_nightly = value['release']['is_nightly']
283 is_stable = not is_prerelease and not is_nightly
284
285 def is_newer_version(release: Any) -> bool:
286 return uploaded_version >= Version(release.version)
287
288 # the main page should only be updated when stable and new releases
289 # are uploaded
290 is_latest_version = any_match(is_newer_version, releases)
291 return is_latest_version and is_stable
292
[end of nextcloudappstore/api/v1/release/importer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nextcloudappstore/api/v1/release/importer.py b/nextcloudappstore/api/v1/release/importer.py
--- a/nextcloudappstore/api/v1/release/importer.py
+++ b/nextcloudappstore/api/v1/release/importer.py
@@ -273,6 +273,16 @@
if len(releases) == 0:
return True
+ # if the app has no stable releases update everything
+ has_stable_release = False
+ for release in releases:
+ if '-' not in release.version and not release.is_nightly:
+ has_stable_release = True
+ break
+
+ if not has_stable_release:
+ return True
+
current_version = value['release']['version']
# we do not care about nightlies here so it's fine to just use a
@@ -282,10 +292,14 @@
is_nightly = value['release']['is_nightly']
is_stable = not is_prerelease and not is_nightly
+ # let's go out quickly
+ if not is_stable:
+ return False
+
def is_newer_version(release: Any) -> bool:
return uploaded_version >= Version(release.version)
# the main page should only be updated when stable and new releases
# are uploaded
is_latest_version = any_match(is_newer_version, releases)
- return is_latest_version and is_stable
+ return is_latest_version
| {"golden_diff": "diff --git a/nextcloudappstore/api/v1/release/importer.py b/nextcloudappstore/api/v1/release/importer.py\n--- a/nextcloudappstore/api/v1/release/importer.py\n+++ b/nextcloudappstore/api/v1/release/importer.py\n@@ -273,6 +273,16 @@\n if len(releases) == 0:\n return True\n \n+ # if the app has no stable releases update everything\n+ has_stable_release = False\n+ for release in releases:\n+ if '-' not in release.version and not release.is_nightly:\n+ has_stable_release = True\n+ break\n+\n+ if not has_stable_release:\n+ return True\n+\n current_version = value['release']['version']\n \n # we do not care about nightlies here so it's fine to just use a\n@@ -282,10 +292,14 @@\n is_nightly = value['release']['is_nightly']\n is_stable = not is_prerelease and not is_nightly\n \n+ # let's go out quickly\n+ if not is_stable:\n+ return False\n+\n def is_newer_version(release: Any) -> bool:\n return uploaded_version >= Version(release.version)\n \n # the main page should only be updated when stable and new releases\n # are uploaded\n is_latest_version = any_match(is_newer_version, releases)\n- return is_latest_version and is_stable\n+ return is_latest_version\n", "issue": "App details are not updated to the latest upload if only pre-releases or nightlies exist\nHi, \r\n\r\nI've put a new app on the nextcloud app repo, with what seems to be a correct info.xml, but it doesn't take description categories and screenshot into account, \r\n\r\nsee there\r\n\r\nhttps://apps.nextcloud.com/apps/carnet\r\n\r\nAny ideas why ? Thanks !\nApp details are not updated to the latest upload if only pre-releases or nightlies exist\nHi, \r\n\r\nI've put a new app on the nextcloud app repo, with what seems to be a correct info.xml, but it doesn't take description categories and screenshot into account, \r\n\r\nsee there\r\n\r\nhttps://apps.nextcloud.com/apps/carnet\r\n\r\nAny ideas why ? Thanks !\n", "before_files": [{"content": "from typing import Dict, Any, Set, Tuple # type: ignore\n\nfrom django.utils import timezone\nfrom django.conf import settings # type: ignore\nfrom semantic_version import Version # type: ignore\n\nfrom nextcloudappstore.core.facades import any_match\nfrom nextcloudappstore.core.models import App, Screenshot, Category, \\\n AppRelease, ShellCommand, License, Database, DatabaseDependency, \\\n PhpExtensionDependency, PhpExtension, AppAuthor\nfrom nextcloudappstore.core.versioning import to_spec, to_raw_spec\n\n\ndef none_to_empty_string(value: str) -> str:\n if value is None:\n return ''\n else:\n return value.strip()\n\n\nclass Importer:\n def __init__(self, importers: Dict[str, 'Importer'],\n ignored_fields: Set[str]) -> None:\n self.importers = importers\n self.ignored_fields = ignored_fields\n\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n obj = self._get_object(key, value, obj)\n value, obj = self._before_import(key, value, obj)\n for key, val in value.items():\n if key not in self.ignored_fields:\n self.importers[key].import_data(key, val, obj)\n obj.save()\n\n def _get_object(self, key: str, value: Any, obj: Any) -> Any:\n raise NotImplementedError\n\n def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,\n Any]:\n raise NotImplementedError\n\n\nclass ScalarImporter(Importer):\n def __init__(self) -> None:\n super().__init__({}, set())\n\n\nclass PhpExtensionImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n for ext in value:\n version_spec = to_spec(ext['php_extension']['min_version'],\n ext['php_extension']['max_version'])\n raw_version_spec = to_raw_spec(\n ext['php_extension']['raw_min_version'],\n ext['php_extension']['raw_max_version'])\n extension, created = PhpExtension.objects.get_or_create(\n id=ext['php_extension']['id'])\n PhpExtensionDependency.objects.create(\n version_spec=version_spec,\n raw_version_spec=raw_version_spec,\n app_release=obj, php_extension=extension,\n )\n\n\nclass DatabaseImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n for db in value:\n version_spec = to_spec(db['database']['min_version'],\n db['database']['max_version'])\n raw_version_spec = to_raw_spec(db['database']['raw_min_version'],\n db['database']['raw_max_version'])\n # all dbs should be known already\n database = Database.objects.get(id=db['database']['id'])\n DatabaseDependency.objects.create(\n version_spec=version_spec,\n raw_version_spec=raw_version_spec,\n app_release=obj, database=database,\n )\n\n\nclass LicenseImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def map_models(data: Dict) -> License:\n id = data['license']['id']\n model, created = License.objects.get_or_create(id=id)\n return model\n\n obj.licenses.set(list(map(map_models, value)))\n\n\nclass ShellCommandImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def map_commands(data: Dict) -> ShellCommand:\n name = data['shell_command']['name']\n command, created = ShellCommand.objects.get_or_create(name=name)\n return command\n\n obj.shell_commands.set(list(map(map_commands, value)))\n\n\nclass AuthorImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def map_authors(data: Dict) -> AppAuthor:\n author = data['author']\n return AppAuthor.objects.create(\n name=author['name'],\n mail=none_to_empty_string(author['mail']),\n homepage=none_to_empty_string(author['homepage'])\n )\n\n obj.authors.set(list(map(map_authors, value)))\n\n\nclass DefaultAttributeImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n setattr(obj, key, value)\n\n\nclass StringAttributeImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n setattr(obj, key, none_to_empty_string(value))\n\n\nclass ScreenshotsImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def create_screenshot(img: Dict[str, str]) -> Screenshot:\n return Screenshot.objects.create(\n url=img['url'], app=obj, ordering=img['ordering'],\n small_thumbnail=none_to_empty_string(img['small_thumbnail']))\n\n shots = map(lambda val: create_screenshot(val['screenshot']), value)\n obj.screenshots.set(list(shots))\n\n\nclass CategoryImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n def map_categories(cat: Dict) -> Category:\n id = cat['category']['id']\n category, created = Category.objects.get_or_create(id=id)\n return category\n\n obj.categories.set(list(map(map_categories, value)))\n\n\nclass L10NImporter(ScalarImporter):\n def import_data(self, key: str, value: Any, obj: Any) -> None:\n for lang, translation in value.items():\n obj.set_current_language(lang)\n setattr(obj, key, translation.strip())\n obj.save()\n\n\nclass AppReleaseImporter(Importer):\n def __init__(self, php_extension_importer: PhpExtensionImporter,\n database_importer: DatabaseImporter,\n license_importer: LicenseImporter,\n shell_command_importer: ShellCommandImporter,\n string_attribute_importer: StringAttributeImporter,\n default_attribute_importer: DefaultAttributeImporter,\n l10n_importer: L10NImporter) -> None:\n super().__init__({\n 'php_extensions': php_extension_importer,\n 'databases': database_importer,\n 'licenses': license_importer,\n 'php_version_spec': string_attribute_importer,\n 'platform_version_spec': string_attribute_importer,\n 'raw_php_version_spec': string_attribute_importer,\n 'raw_platform_version_spec': string_attribute_importer,\n 'min_int_size': default_attribute_importer,\n 'shell_commands': shell_command_importer,\n 'signature': string_attribute_importer,\n 'download': string_attribute_importer,\n 'changelog': l10n_importer,\n 'is_nightly': default_attribute_importer,\n }, {\n 'version',\n 'raw_version',\n 'php_min_version',\n 'php_max_version',\n 'raw_php_min_version',\n 'raw_php_max_version',\n 'platform_min_version',\n 'platform_max_version',\n 'raw_platform_min_version',\n 'raw_platform_max_version',\n })\n\n def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,\n Any]:\n # combine versions into specs\n value['platform_version_spec'] = to_spec(\n value['platform_min_version'], value['platform_max_version'])\n value['php_version_spec'] = to_spec(value['php_min_version'],\n value['php_max_version'])\n value['raw_platform_version_spec'] = to_raw_spec(\n value['raw_platform_min_version'],\n value['raw_platform_max_version'])\n value['raw_php_version_spec'] = to_raw_spec(\n value['raw_php_min_version'],\n value['raw_php_max_version'])\n obj.licenses.clear()\n obj.shell_commands.clear()\n obj.licenses.clear()\n obj.php_extensions.clear()\n obj.databases.clear()\n obj.signature_digest = settings.CERTIFICATE_DIGEST\n return value, obj\n\n def _get_object(self, key: str, value: Any, obj: Any) -> Any:\n release, created = AppRelease.objects.get_or_create(\n version=value['version'], app=obj, is_nightly=value['is_nightly']\n )\n return release\n\n\nclass AppImporter(Importer):\n def __init__(self, release_importer: AppReleaseImporter,\n screenshots_importer: ScreenshotsImporter,\n attribute_importer: StringAttributeImporter,\n l10n_importer: L10NImporter,\n category_importer: CategoryImporter,\n author_importer: AuthorImporter,\n default_attribute_importer: DefaultAttributeImporter) -> None:\n super().__init__({\n 'release': release_importer,\n 'screenshots': screenshots_importer,\n 'user_docs': attribute_importer,\n 'admin_docs': attribute_importer,\n 'website': attribute_importer,\n 'discussion': attribute_importer,\n 'developer_docs': attribute_importer,\n 'issue_tracker': attribute_importer,\n 'certificate': attribute_importer,\n 'name': l10n_importer,\n 'summary': l10n_importer,\n 'description': l10n_importer,\n 'categories': category_importer,\n 'authors': author_importer\n }, {'id'})\n\n def _get_object(self, key: str, value: Any, obj: Any) -> Any:\n # only update app if newest or equal to newest release\n app, created = App.objects.get_or_create(pk=value['id'])\n return app\n\n def _before_import(self, key: str, value: Any, obj: Any) -> Tuple[Any,\n Any]:\n obj.last_release = timezone.now()\n\n if 'is_nightly' not in value['release']:\n value['release']['is_nightly'] = False\n if value['release']['is_nightly']:\n AppRelease.objects.filter(app__id=obj.id, is_nightly=True).delete()\n\n # only new releases update an app's data\n if self._should_update_everything(value):\n # clear all relations\n obj.screenshots.all().delete()\n obj.authors.all().delete()\n obj.categories.clear()\n for translation in obj.translations.all():\n translation.delete()\n else:\n value = {'id': value['id'], 'release': value['release']}\n\n return value, obj\n\n def _should_update_everything(self, value: Any) -> bool:\n releases = AppRelease.objects.filter(app__id=value['id'])\n\n # if its the first release it should always set the required initial\n # data\n if len(releases) == 0:\n return True\n\n current_version = value['release']['version']\n\n # we do not care about nightlies here so it's fine to just use a\n # normal semver\n uploaded_version = Version(current_version)\n is_prerelease = '-' in current_version\n is_nightly = value['release']['is_nightly']\n is_stable = not is_prerelease and not is_nightly\n\n def is_newer_version(release: Any) -> bool:\n return uploaded_version >= Version(release.version)\n\n # the main page should only be updated when stable and new releases\n # are uploaded\n is_latest_version = any_match(is_newer_version, releases)\n return is_latest_version and is_stable\n", "path": "nextcloudappstore/api/v1/release/importer.py"}]} | 3,934 | 337 |
gh_patches_debug_58686 | rasdani/github-patches | git_diff | lightly-ai__lightly-1177 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CUDA errors in NTXentLoss with gloo backend in multi-gpu training
I was wondering if the `gloo` distributed communication package for multi-gpu training is officially supported by lightly. It seems like e.g. NTXentLoss doesn't work with `gloo` (I'm using pytorch lightning): I get CUDA errors, even when setting `gather_distributed = False`.
I can fix the issue when using `gather_distributed = False` by replacing the line
https://github.com/lightly-ai/lightly/blob/master/lightly/loss/ntx_ent_loss.py#L164
by
```python
labels = labels + batch_size * (dist.rank() if gather_distributed else 0)
```
but then of course I can't use `gather_distributed = True` anymore.
Using the `nccl` backend, everything works fine, but `nccl` is not working well on some of our machines, so unfortunately I'm stuck with `gloo`. I think using `gloo` might be too much of an exception to fix the problem for `gather_distributed = True`, but maybe it'd help to just replace the line above and mention somewhere in the documentation that `gather_distributed` is not supported for `gloo`?
</issue>
<code>
[start of lightly/loss/ntx_ent_loss.py]
1 """ Contrastive Loss Functions """
2
3 # Copyright (c) 2020. Lightly AG and its affiliates.
4 # All Rights Reserved
5
6 import torch
7 from torch import nn
8
9 from lightly.loss.memory_bank import MemoryBankModule
10 from lightly.utils import dist
11
12
13 class NTXentLoss(MemoryBankModule):
14 """Implementation of the Contrastive Cross Entropy Loss.
15
16 This implementation follows the SimCLR[0] paper. If you enable the memory
17 bank by setting the `memory_bank_size` value > 0 the loss behaves like
18 the one described in the MoCo[1] paper.
19
20 - [0] SimCLR, 2020, https://arxiv.org/abs/2002.05709
21 - [1] MoCo, 2020, https://arxiv.org/abs/1911.05722
22
23 Attributes:
24 temperature:
25 Scale logits by the inverse of the temperature.
26 memory_bank_size:
27 Number of negative samples to store in the memory bank.
28 Use 0 for SimCLR. For MoCo we typically use numbers like 4096 or 65536.
29 gather_distributed:
30 If True then negatives from all gpus are gathered before the
31 loss calculation. This flag has no effect if memory_bank_size > 0.
32
33 Raises:
34 ValueError: If abs(temperature) < 1e-8 to prevent divide by zero.
35
36 Examples:
37
38 >>> # initialize loss function without memory bank
39 >>> loss_fn = NTXentLoss(memory_bank_size=0)
40 >>>
41 >>> # generate two random transforms of images
42 >>> t0 = transforms(images)
43 >>> t1 = transforms(images)
44 >>>
45 >>> # feed through SimCLR or MoCo model
46 >>> batch = torch.cat((t0, t1), dim=0)
47 >>> output = model(batch)
48 >>>
49 >>> # calculate loss
50 >>> loss = loss_fn(output)
51
52 """
53
54 def __init__(
55 self,
56 temperature: float = 0.5,
57 memory_bank_size: int = 0,
58 gather_distributed: bool = False,
59 ):
60 super(NTXentLoss, self).__init__(size=memory_bank_size)
61 self.temperature = temperature
62 self.gather_distributed = gather_distributed
63 self.cross_entropy = nn.CrossEntropyLoss(reduction="mean")
64 self.eps = 1e-8
65
66 if abs(self.temperature) < self.eps:
67 raise ValueError(
68 "Illegal temperature: abs({}) < 1e-8".format(self.temperature)
69 )
70
71 def forward(self, out0: torch.Tensor, out1: torch.Tensor):
72 """Forward pass through Contrastive Cross-Entropy Loss.
73
74 If used with a memory bank, the samples from the memory bank are used
75 as negative examples. Otherwise, within-batch samples are used as
76 negative samples.
77
78 Args:
79 out0:
80 Output projections of the first set of transformed images.
81 Shape: (batch_size, embedding_size)
82 out1:
83 Output projections of the second set of transformed images.
84 Shape: (batch_size, embedding_size)
85
86 Returns:
87 Contrastive Cross Entropy Loss value.
88
89 """
90
91 device = out0.device
92 batch_size, _ = out0.shape
93
94 # normalize the output to length 1
95 out0 = nn.functional.normalize(out0, dim=1)
96 out1 = nn.functional.normalize(out1, dim=1)
97
98 # ask memory bank for negative samples and extend it with out1 if
99 # out1 requires a gradient, otherwise keep the same vectors in the
100 # memory bank (this allows for keeping the memory bank constant e.g.
101 # for evaluating the loss on the test set)
102 # out1: shape: (batch_size, embedding_size)
103 # negatives: shape: (embedding_size, memory_bank_size)
104 out1, negatives = super(NTXentLoss, self).forward(
105 out1, update=out0.requires_grad
106 )
107
108 # We use the cosine similarity, which is a dot product (einsum) here,
109 # as all vectors are already normalized to unit length.
110 # Notation in einsum: n = batch_size, c = embedding_size and k = memory_bank_size.
111
112 if negatives is not None:
113 # use negatives from memory bank
114 negatives = negatives.to(device)
115
116 # sim_pos is of shape (batch_size, 1) and sim_pos[i] denotes the similarity
117 # of the i-th sample in the batch to its positive pair
118 sim_pos = torch.einsum("nc,nc->n", out0, out1).unsqueeze(-1)
119
120 # sim_neg is of shape (batch_size, memory_bank_size) and sim_neg[i,j] denotes the similarity
121 # of the i-th sample to the j-th negative sample
122 sim_neg = torch.einsum("nc,ck->nk", out0, negatives)
123
124 # set the labels to the first "class", i.e. sim_pos,
125 # so that it is maximized in relation to sim_neg
126 logits = torch.cat([sim_pos, sim_neg], dim=1) / self.temperature
127 labels = torch.zeros(logits.shape[0], device=device, dtype=torch.long)
128
129 else:
130 # user other samples from batch as negatives
131 # and create diagonal mask that only selects similarities between
132 # views of the same image
133 if self.gather_distributed and dist.world_size() > 1:
134 # gather hidden representations from other processes
135 out0_large = torch.cat(dist.gather(out0), 0)
136 out1_large = torch.cat(dist.gather(out1), 0)
137 diag_mask = dist.eye_rank(batch_size, device=out0.device)
138 else:
139 # single process
140 out0_large = out0
141 out1_large = out1
142 diag_mask = torch.eye(batch_size, device=out0.device, dtype=torch.bool)
143
144 # calculate similiarities
145 # here n = batch_size and m = batch_size * world_size
146 # the resulting vectors have shape (n, m)
147 logits_00 = torch.einsum("nc,mc->nm", out0, out0_large) / self.temperature
148 logits_01 = torch.einsum("nc,mc->nm", out0, out1_large) / self.temperature
149 logits_10 = torch.einsum("nc,mc->nm", out1, out0_large) / self.temperature
150 logits_11 = torch.einsum("nc,mc->nm", out1, out1_large) / self.temperature
151
152 # remove simliarities between same views of the same image
153 logits_00 = logits_00[~diag_mask].view(batch_size, -1)
154 logits_11 = logits_11[~diag_mask].view(batch_size, -1)
155
156 # concatenate logits
157 # the logits tensor in the end has shape (2*n, 2*m-1)
158 logits_0100 = torch.cat([logits_01, logits_00], dim=1)
159 logits_1011 = torch.cat([logits_10, logits_11], dim=1)
160 logits = torch.cat([logits_0100, logits_1011], dim=0)
161
162 # create labels
163 labels = torch.arange(batch_size, device=device, dtype=torch.long)
164 labels = labels + dist.rank() * batch_size
165 labels = labels.repeat(2)
166
167 loss = self.cross_entropy(logits, labels)
168
169 return loss
170
[end of lightly/loss/ntx_ent_loss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightly/loss/ntx_ent_loss.py b/lightly/loss/ntx_ent_loss.py
--- a/lightly/loss/ntx_ent_loss.py
+++ b/lightly/loss/ntx_ent_loss.py
@@ -161,7 +161,8 @@
# create labels
labels = torch.arange(batch_size, device=device, dtype=torch.long)
- labels = labels + dist.rank() * batch_size
+ if self.gather_distributed:
+ labels = labels + dist.rank() * batch_size
labels = labels.repeat(2)
loss = self.cross_entropy(logits, labels)
| {"golden_diff": "diff --git a/lightly/loss/ntx_ent_loss.py b/lightly/loss/ntx_ent_loss.py\n--- a/lightly/loss/ntx_ent_loss.py\n+++ b/lightly/loss/ntx_ent_loss.py\n@@ -161,7 +161,8 @@\n \n # create labels\n labels = torch.arange(batch_size, device=device, dtype=torch.long)\n- labels = labels + dist.rank() * batch_size\n+ if self.gather_distributed:\n+ labels = labels + dist.rank() * batch_size\n labels = labels.repeat(2)\n \n loss = self.cross_entropy(logits, labels)\n", "issue": "CUDA errors in NTXentLoss with gloo backend in multi-gpu training \nI was wondering if the `gloo` distributed communication package for multi-gpu training is officially supported by lightly. It seems like e.g. NTXentLoss doesn't work with `gloo` (I'm using pytorch lightning): I get CUDA errors, even when setting `gather_distributed = False`.\r\nI can fix the issue when using `gather_distributed = False` by replacing the line\r\nhttps://github.com/lightly-ai/lightly/blob/master/lightly/loss/ntx_ent_loss.py#L164\r\nby\r\n```python\r\nlabels = labels + batch_size * (dist.rank() if gather_distributed else 0)\r\n```\r\nbut then of course I can't use `gather_distributed = True` anymore. \r\n\r\nUsing the `nccl` backend, everything works fine, but `nccl` is not working well on some of our machines, so unfortunately I'm stuck with `gloo`. I think using `gloo` might be too much of an exception to fix the problem for `gather_distributed = True`, but maybe it'd help to just replace the line above and mention somewhere in the documentation that `gather_distributed` is not supported for `gloo`?\n", "before_files": [{"content": "\"\"\" Contrastive Loss Functions \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport torch\nfrom torch import nn\n\nfrom lightly.loss.memory_bank import MemoryBankModule\nfrom lightly.utils import dist\n\n\nclass NTXentLoss(MemoryBankModule):\n \"\"\"Implementation of the Contrastive Cross Entropy Loss.\n\n This implementation follows the SimCLR[0] paper. If you enable the memory\n bank by setting the `memory_bank_size` value > 0 the loss behaves like\n the one described in the MoCo[1] paper.\n\n - [0] SimCLR, 2020, https://arxiv.org/abs/2002.05709\n - [1] MoCo, 2020, https://arxiv.org/abs/1911.05722\n\n Attributes:\n temperature:\n Scale logits by the inverse of the temperature.\n memory_bank_size:\n Number of negative samples to store in the memory bank.\n Use 0 for SimCLR. For MoCo we typically use numbers like 4096 or 65536.\n gather_distributed:\n If True then negatives from all gpus are gathered before the\n loss calculation. This flag has no effect if memory_bank_size > 0.\n\n Raises:\n ValueError: If abs(temperature) < 1e-8 to prevent divide by zero.\n\n Examples:\n\n >>> # initialize loss function without memory bank\n >>> loss_fn = NTXentLoss(memory_bank_size=0)\n >>>\n >>> # generate two random transforms of images\n >>> t0 = transforms(images)\n >>> t1 = transforms(images)\n >>>\n >>> # feed through SimCLR or MoCo model\n >>> batch = torch.cat((t0, t1), dim=0)\n >>> output = model(batch)\n >>>\n >>> # calculate loss\n >>> loss = loss_fn(output)\n\n \"\"\"\n\n def __init__(\n self,\n temperature: float = 0.5,\n memory_bank_size: int = 0,\n gather_distributed: bool = False,\n ):\n super(NTXentLoss, self).__init__(size=memory_bank_size)\n self.temperature = temperature\n self.gather_distributed = gather_distributed\n self.cross_entropy = nn.CrossEntropyLoss(reduction=\"mean\")\n self.eps = 1e-8\n\n if abs(self.temperature) < self.eps:\n raise ValueError(\n \"Illegal temperature: abs({}) < 1e-8\".format(self.temperature)\n )\n\n def forward(self, out0: torch.Tensor, out1: torch.Tensor):\n \"\"\"Forward pass through Contrastive Cross-Entropy Loss.\n\n If used with a memory bank, the samples from the memory bank are used\n as negative examples. Otherwise, within-batch samples are used as\n negative samples.\n\n Args:\n out0:\n Output projections of the first set of transformed images.\n Shape: (batch_size, embedding_size)\n out1:\n Output projections of the second set of transformed images.\n Shape: (batch_size, embedding_size)\n\n Returns:\n Contrastive Cross Entropy Loss value.\n\n \"\"\"\n\n device = out0.device\n batch_size, _ = out0.shape\n\n # normalize the output to length 1\n out0 = nn.functional.normalize(out0, dim=1)\n out1 = nn.functional.normalize(out1, dim=1)\n\n # ask memory bank for negative samples and extend it with out1 if\n # out1 requires a gradient, otherwise keep the same vectors in the\n # memory bank (this allows for keeping the memory bank constant e.g.\n # for evaluating the loss on the test set)\n # out1: shape: (batch_size, embedding_size)\n # negatives: shape: (embedding_size, memory_bank_size)\n out1, negatives = super(NTXentLoss, self).forward(\n out1, update=out0.requires_grad\n )\n\n # We use the cosine similarity, which is a dot product (einsum) here,\n # as all vectors are already normalized to unit length.\n # Notation in einsum: n = batch_size, c = embedding_size and k = memory_bank_size.\n\n if negatives is not None:\n # use negatives from memory bank\n negatives = negatives.to(device)\n\n # sim_pos is of shape (batch_size, 1) and sim_pos[i] denotes the similarity\n # of the i-th sample in the batch to its positive pair\n sim_pos = torch.einsum(\"nc,nc->n\", out0, out1).unsqueeze(-1)\n\n # sim_neg is of shape (batch_size, memory_bank_size) and sim_neg[i,j] denotes the similarity\n # of the i-th sample to the j-th negative sample\n sim_neg = torch.einsum(\"nc,ck->nk\", out0, negatives)\n\n # set the labels to the first \"class\", i.e. sim_pos,\n # so that it is maximized in relation to sim_neg\n logits = torch.cat([sim_pos, sim_neg], dim=1) / self.temperature\n labels = torch.zeros(logits.shape[0], device=device, dtype=torch.long)\n\n else:\n # user other samples from batch as negatives\n # and create diagonal mask that only selects similarities between\n # views of the same image\n if self.gather_distributed and dist.world_size() > 1:\n # gather hidden representations from other processes\n out0_large = torch.cat(dist.gather(out0), 0)\n out1_large = torch.cat(dist.gather(out1), 0)\n diag_mask = dist.eye_rank(batch_size, device=out0.device)\n else:\n # single process\n out0_large = out0\n out1_large = out1\n diag_mask = torch.eye(batch_size, device=out0.device, dtype=torch.bool)\n\n # calculate similiarities\n # here n = batch_size and m = batch_size * world_size\n # the resulting vectors have shape (n, m)\n logits_00 = torch.einsum(\"nc,mc->nm\", out0, out0_large) / self.temperature\n logits_01 = torch.einsum(\"nc,mc->nm\", out0, out1_large) / self.temperature\n logits_10 = torch.einsum(\"nc,mc->nm\", out1, out0_large) / self.temperature\n logits_11 = torch.einsum(\"nc,mc->nm\", out1, out1_large) / self.temperature\n\n # remove simliarities between same views of the same image\n logits_00 = logits_00[~diag_mask].view(batch_size, -1)\n logits_11 = logits_11[~diag_mask].view(batch_size, -1)\n\n # concatenate logits\n # the logits tensor in the end has shape (2*n, 2*m-1)\n logits_0100 = torch.cat([logits_01, logits_00], dim=1)\n logits_1011 = torch.cat([logits_10, logits_11], dim=1)\n logits = torch.cat([logits_0100, logits_1011], dim=0)\n\n # create labels\n labels = torch.arange(batch_size, device=device, dtype=torch.long)\n labels = labels + dist.rank() * batch_size\n labels = labels.repeat(2)\n\n loss = self.cross_entropy(logits, labels)\n\n return loss\n", "path": "lightly/loss/ntx_ent_loss.py"}]} | 2,886 | 141 |
gh_patches_debug_3999 | rasdani/github-patches | git_diff | mne-tools__mne-bids-pipeline-743 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: Too many JSON files error
https://mne.discourse.group/t/mne-bids-pipeline-too-many-json-files-error/6436
> This is a MEG dataset which I converted to BIDS format using mne-bids. The json files which appear to cause the trouble are [_beh.json] sidecar files for the behavioural data I saved to the beh/ subfolder like so:
>
> |MNE-BIDS_data/
> |— README
> |— dataset_description.json
> |— participants.json
> |— participants.tsv
> |— sub-01/
> |------ sub-01_scans.tsv
> |------ beh/
> |--------- sub-01_task-main_run-01_beh.json
> |--------- sub-01_task-main_run-01_beh.tsv
</issue>
<code>
[start of mne_bids_pipeline/steps/init/_02_find_empty_room.py]
1 """Find empty-room data matches."""
2
3 from types import SimpleNamespace
4 from typing import Dict, Optional
5
6 from mne.utils import _pl
7 from mne_bids import BIDSPath
8
9 from ..._config_utils import (
10 get_datatype,
11 get_task,
12 get_sessions,
13 get_subjects,
14 get_runs,
15 )
16 from ..._io import _empty_room_match_path, _write_json
17 from ..._logging import gen_log_kwargs, logger
18 from ..._run import _update_for_splits, failsafe_run, save_logs
19
20
21 def get_input_fnames_find_empty_room(
22 *, subject: str, session: Optional[str], run: Optional[str], cfg: SimpleNamespace
23 ) -> Dict[str, BIDSPath]:
24 """Get paths of files required by filter_data function."""
25 bids_path_in = BIDSPath(
26 subject=subject,
27 run=run,
28 session=session,
29 task=cfg.task,
30 acquisition=cfg.acq,
31 recording=cfg.rec,
32 space=cfg.space,
33 datatype=cfg.datatype,
34 processing=cfg.proc,
35 root=cfg.bids_root,
36 check=False,
37 )
38 in_files: Dict[str, BIDSPath] = dict()
39 in_files[f"raw_run-{run}"] = bids_path_in
40 _update_for_splits(in_files, f"raw_run-{run}", single=True)
41 if hasattr(bids_path_in, "find_matching_sidecar"):
42 in_files["sidecar"] = (
43 bids_path_in.copy()
44 .update(datatype=None)
45 .find_matching_sidecar(extension=".json")
46 )
47 try:
48 fname = bids_path_in.find_empty_room(use_sidecar_only=True)
49 except Exception:
50 fname = None
51 if fname is None and hasattr(bids_path_in, "get_empty_room_candidates"):
52 for ci, path in enumerate(bids_path_in.get_empty_room_candidates()):
53 in_files[f"empty_room_candidate_{ci}"] = path
54 return in_files
55
56
57 @failsafe_run(
58 get_input_fnames=get_input_fnames_find_empty_room,
59 )
60 def find_empty_room(
61 *,
62 cfg: SimpleNamespace,
63 exec_params: SimpleNamespace,
64 subject: str,
65 session: Optional[str],
66 run: Optional[str],
67 in_files: Dict[str, BIDSPath],
68 ) -> Dict[str, BIDSPath]:
69 raw_path = in_files.pop(f"raw_run-{run}")
70 in_files.pop("sidecar", None)
71 try:
72 fname = raw_path.find_empty_room(use_sidecar_only=True)
73 except (FileNotFoundError, AssertionError, ValueError):
74 fname = ""
75 if fname is None:
76 # sidecar is very fast and checking all can be slow (seconds), so only
77 # log when actually looking through files
78 ending = "empty-room files"
79 if len(in_files): # MNE-BIDS < 0.12 missing get_empty_room_candidates
80 ending = f"{len(in_files)} empty-room file{_pl(in_files)}"
81 msg = f"Nearest-date matching {ending}"
82 logger.info(**gen_log_kwargs(message=msg))
83 try:
84 fname = raw_path.find_empty_room()
85 except (
86 ValueError, # non-MEG data
87 AssertionError, # MNE-BIDS check assert exists()
88 FileNotFoundError,
89 ): # MNE-BIDS PR-1080 exists()
90 fname = None
91 in_files.clear() # MNE-BIDS find_empty_room should have looked at all
92 elif fname == "":
93 fname = None # not downloaded, or EEG data
94 elif not fname.fpath.exists():
95 fname = None # path found by sidecar but does not exist
96 out_files = dict()
97 out_files["empty_room_match"] = _empty_room_match_path(raw_path, cfg)
98 _write_json(out_files["empty_room_match"], dict(fname=fname))
99 return out_files
100
101
102 def get_config(
103 *,
104 config,
105 ) -> SimpleNamespace:
106 cfg = SimpleNamespace(
107 proc=config.proc,
108 task=get_task(config),
109 datatype=get_datatype(config),
110 acq=config.acq,
111 rec=config.rec,
112 space=config.space,
113 bids_root=config.bids_root,
114 deriv_root=config.deriv_root,
115 )
116 return cfg
117
118
119 def main(*, config) -> None:
120 """Run find_empty_room."""
121 if not config.process_empty_room:
122 msg = "Skipping, process_empty_room is set to False …"
123 logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
124 return
125 if get_datatype(config) != "meg":
126 msg = "Skipping, empty-room data only relevant for MEG …"
127 logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
128 return
129 # This will be I/O bound if the sidecar is not complete, so let's not run
130 # in parallel.
131 logs = list()
132 for subject in get_subjects(config):
133 if config.use_maxwell_filter:
134 run = config.mf_reference_run
135 else:
136 run = get_runs(config=config, subject=subject)[0]
137 logs.append(
138 find_empty_room(
139 cfg=get_config(
140 config=config,
141 ),
142 exec_params=config.exec_params,
143 subject=subject,
144 session=get_sessions(config)[0],
145 run=run,
146 )
147 )
148 save_logs(config=config, logs=logs)
149
[end of mne_bids_pipeline/steps/init/_02_find_empty_room.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mne_bids_pipeline/steps/init/_02_find_empty_room.py b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
--- a/mne_bids_pipeline/steps/init/_02_find_empty_room.py
+++ b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
@@ -41,7 +41,7 @@
if hasattr(bids_path_in, "find_matching_sidecar"):
in_files["sidecar"] = (
bids_path_in.copy()
- .update(datatype=None)
+ .update(datatype=None, suffix="meg")
.find_matching_sidecar(extension=".json")
)
try:
| {"golden_diff": "diff --git a/mne_bids_pipeline/steps/init/_02_find_empty_room.py b/mne_bids_pipeline/steps/init/_02_find_empty_room.py\n--- a/mne_bids_pipeline/steps/init/_02_find_empty_room.py\n+++ b/mne_bids_pipeline/steps/init/_02_find_empty_room.py\n@@ -41,7 +41,7 @@\n if hasattr(bids_path_in, \"find_matching_sidecar\"):\n in_files[\"sidecar\"] = (\n bids_path_in.copy()\n- .update(datatype=None)\n+ .update(datatype=None, suffix=\"meg\")\n .find_matching_sidecar(extension=\".json\")\n )\n try:\n", "issue": "BUG: Too many JSON files error\nhttps://mne.discourse.group/t/mne-bids-pipeline-too-many-json-files-error/6436\r\n\r\n> This is a MEG dataset which I converted to BIDS format using mne-bids. The json files which appear to cause the trouble are [_beh.json] sidecar files for the behavioural data I saved to the beh/ subfolder like so:\r\n>\r\n> |MNE-BIDS_data/\r\n> |\u2014 README\r\n> |\u2014 dataset_description.json\r\n> |\u2014 participants.json\r\n> |\u2014 participants.tsv\r\n> |\u2014 sub-01/\r\n> |------ sub-01_scans.tsv\r\n> |------ beh/\r\n> |--------- sub-01_task-main_run-01_beh.json\r\n> |--------- sub-01_task-main_run-01_beh.tsv\n", "before_files": [{"content": "\"\"\"Find empty-room data matches.\"\"\"\n\nfrom types import SimpleNamespace\nfrom typing import Dict, Optional\n\nfrom mne.utils import _pl\nfrom mne_bids import BIDSPath\n\nfrom ..._config_utils import (\n get_datatype,\n get_task,\n get_sessions,\n get_subjects,\n get_runs,\n)\nfrom ..._io import _empty_room_match_path, _write_json\nfrom ..._logging import gen_log_kwargs, logger\nfrom ..._run import _update_for_splits, failsafe_run, save_logs\n\n\ndef get_input_fnames_find_empty_room(\n *, subject: str, session: Optional[str], run: Optional[str], cfg: SimpleNamespace\n) -> Dict[str, BIDSPath]:\n \"\"\"Get paths of files required by filter_data function.\"\"\"\n bids_path_in = BIDSPath(\n subject=subject,\n run=run,\n session=session,\n task=cfg.task,\n acquisition=cfg.acq,\n recording=cfg.rec,\n space=cfg.space,\n datatype=cfg.datatype,\n processing=cfg.proc,\n root=cfg.bids_root,\n check=False,\n )\n in_files: Dict[str, BIDSPath] = dict()\n in_files[f\"raw_run-{run}\"] = bids_path_in\n _update_for_splits(in_files, f\"raw_run-{run}\", single=True)\n if hasattr(bids_path_in, \"find_matching_sidecar\"):\n in_files[\"sidecar\"] = (\n bids_path_in.copy()\n .update(datatype=None)\n .find_matching_sidecar(extension=\".json\")\n )\n try:\n fname = bids_path_in.find_empty_room(use_sidecar_only=True)\n except Exception:\n fname = None\n if fname is None and hasattr(bids_path_in, \"get_empty_room_candidates\"):\n for ci, path in enumerate(bids_path_in.get_empty_room_candidates()):\n in_files[f\"empty_room_candidate_{ci}\"] = path\n return in_files\n\n\n@failsafe_run(\n get_input_fnames=get_input_fnames_find_empty_room,\n)\ndef find_empty_room(\n *,\n cfg: SimpleNamespace,\n exec_params: SimpleNamespace,\n subject: str,\n session: Optional[str],\n run: Optional[str],\n in_files: Dict[str, BIDSPath],\n) -> Dict[str, BIDSPath]:\n raw_path = in_files.pop(f\"raw_run-{run}\")\n in_files.pop(\"sidecar\", None)\n try:\n fname = raw_path.find_empty_room(use_sidecar_only=True)\n except (FileNotFoundError, AssertionError, ValueError):\n fname = \"\"\n if fname is None:\n # sidecar is very fast and checking all can be slow (seconds), so only\n # log when actually looking through files\n ending = \"empty-room files\"\n if len(in_files): # MNE-BIDS < 0.12 missing get_empty_room_candidates\n ending = f\"{len(in_files)} empty-room file{_pl(in_files)}\"\n msg = f\"Nearest-date matching {ending}\"\n logger.info(**gen_log_kwargs(message=msg))\n try:\n fname = raw_path.find_empty_room()\n except (\n ValueError, # non-MEG data\n AssertionError, # MNE-BIDS check assert exists()\n FileNotFoundError,\n ): # MNE-BIDS PR-1080 exists()\n fname = None\n in_files.clear() # MNE-BIDS find_empty_room should have looked at all\n elif fname == \"\":\n fname = None # not downloaded, or EEG data\n elif not fname.fpath.exists():\n fname = None # path found by sidecar but does not exist\n out_files = dict()\n out_files[\"empty_room_match\"] = _empty_room_match_path(raw_path, cfg)\n _write_json(out_files[\"empty_room_match\"], dict(fname=fname))\n return out_files\n\n\ndef get_config(\n *,\n config,\n) -> SimpleNamespace:\n cfg = SimpleNamespace(\n proc=config.proc,\n task=get_task(config),\n datatype=get_datatype(config),\n acq=config.acq,\n rec=config.rec,\n space=config.space,\n bids_root=config.bids_root,\n deriv_root=config.deriv_root,\n )\n return cfg\n\n\ndef main(*, config) -> None:\n \"\"\"Run find_empty_room.\"\"\"\n if not config.process_empty_room:\n msg = \"Skipping, process_empty_room is set to False \u2026\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"skip\"))\n return\n if get_datatype(config) != \"meg\":\n msg = \"Skipping, empty-room data only relevant for MEG \u2026\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"skip\"))\n return\n # This will be I/O bound if the sidecar is not complete, so let's not run\n # in parallel.\n logs = list()\n for subject in get_subjects(config):\n if config.use_maxwell_filter:\n run = config.mf_reference_run\n else:\n run = get_runs(config=config, subject=subject)[0]\n logs.append(\n find_empty_room(\n cfg=get_config(\n config=config,\n ),\n exec_params=config.exec_params,\n subject=subject,\n session=get_sessions(config)[0],\n run=run,\n )\n )\n save_logs(config=config, logs=logs)\n", "path": "mne_bids_pipeline/steps/init/_02_find_empty_room.py"}]} | 2,210 | 148 |
gh_patches_debug_14606 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-5301 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
La comparaison des versions a perdu sa sidebar
## Étapes pour reproduire
- prenez un tuto avec plusieurs éditions
- cliquer sur "comparer les versions"
- sélectionner deux versions
**Comportement observé**
la sidebar n'apparaît pas dans la page de comparaison des versions
**Comportement désiré**
La sidebar est là.
</issue>
<code>
[start of zds/utils/templatetags/htmldiff.py]
1 from difflib import HtmlDiff
2 from django import template
3 from django.utils.html import format_html
4 from django.utils.safestring import mark_safe
5 from django.utils.translation import ugettext_lazy as _
6
7
8 register = template.Library()
9
10
11 @register.simple_tag
12 def htmldiff(string1, string2):
13
14 try:
15 txt1 = string1.decode('utf-8').splitlines()
16 # string1 is an empty SafeText from template
17 except AttributeError:
18 txt1 = string1.splitlines()
19
20 try:
21 txt2 = string2.decode('utf-8').splitlines()
22 except AttributeError:
23 txt2 = string2.splitlines()
24
25 diff = HtmlDiff(tabsize=4, wrapcolumn=80)
26 result = diff.make_table(txt1, txt2, context=True, numlines=2)
27
28 if 'No Differences Found' in result:
29 return format_html('<p>{}</p>', _('Pas de changements.'))
30 else:
31 return format_html('<div class="diff_delta">{}</div>', mark_safe(result))
32
[end of zds/utils/templatetags/htmldiff.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/utils/templatetags/htmldiff.py b/zds/utils/templatetags/htmldiff.py
--- a/zds/utils/templatetags/htmldiff.py
+++ b/zds/utils/templatetags/htmldiff.py
@@ -22,10 +22,12 @@
except AttributeError:
txt2 = string2.splitlines()
- diff = HtmlDiff(tabsize=4, wrapcolumn=80)
+ diff = HtmlDiff(tabsize=4)
result = diff.make_table(txt1, txt2, context=True, numlines=2)
if 'No Differences Found' in result:
return format_html('<p>{}</p>', _('Pas de changements.'))
else:
- return format_html('<div class="diff_delta">{}</div>', mark_safe(result))
+ # the diff.make_table() replaces all spaces by non-breakable ones, which prevent line breaks:
+ r = mark_safe(result.replace('<td nowrap="nowrap">', '<td>').replace(' ', ' '))
+ return format_html('<div class="diff_delta">{}</div>', r)
| {"golden_diff": "diff --git a/zds/utils/templatetags/htmldiff.py b/zds/utils/templatetags/htmldiff.py\n--- a/zds/utils/templatetags/htmldiff.py\n+++ b/zds/utils/templatetags/htmldiff.py\n@@ -22,10 +22,12 @@\n except AttributeError:\n txt2 = string2.splitlines()\n \n- diff = HtmlDiff(tabsize=4, wrapcolumn=80)\n+ diff = HtmlDiff(tabsize=4)\n result = diff.make_table(txt1, txt2, context=True, numlines=2)\n \n if 'No Differences Found' in result:\n return format_html('<p>{}</p>', _('Pas de changements.'))\n else:\n- return format_html('<div class=\"diff_delta\">{}</div>', mark_safe(result))\n+ # the diff.make_table() replaces all spaces by non-breakable ones, which prevent line breaks:\n+ r = mark_safe(result.replace('<td nowrap=\"nowrap\">', '<td>').replace(' ', ' '))\n+ return format_html('<div class=\"diff_delta\">{}</div>', r)\n", "issue": "La comparaison des versions a perdu sa sidebar\n## \u00c9tapes pour reproduire \r\n\r\n- prenez un tuto avec plusieurs \u00e9ditions\r\n- cliquer sur \"comparer les versions\"\r\n- s\u00e9lectionner deux versions\r\n\r\n**Comportement observ\u00e9**\r\n\r\nla sidebar n'appara\u00eet pas dans la page de comparaison des versions\r\n\r\n**Comportement d\u00e9sir\u00e9**\r\n\r\nLa sidebar est l\u00e0.\n", "before_files": [{"content": "from difflib import HtmlDiff\nfrom django import template\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\n\n\nregister = template.Library()\n\n\[email protected]_tag\ndef htmldiff(string1, string2):\n\n try:\n txt1 = string1.decode('utf-8').splitlines()\n # string1 is an empty SafeText from template\n except AttributeError:\n txt1 = string1.splitlines()\n\n try:\n txt2 = string2.decode('utf-8').splitlines()\n except AttributeError:\n txt2 = string2.splitlines()\n\n diff = HtmlDiff(tabsize=4, wrapcolumn=80)\n result = diff.make_table(txt1, txt2, context=True, numlines=2)\n\n if 'No Differences Found' in result:\n return format_html('<p>{}</p>', _('Pas de changements.'))\n else:\n return format_html('<div class=\"diff_delta\">{}</div>', mark_safe(result))\n", "path": "zds/utils/templatetags/htmldiff.py"}]} | 918 | 252 |
gh_patches_debug_19627 | rasdani/github-patches | git_diff | beeware__toga-1910 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Android system file browser cannot be invoked
### Describe the bug
I tried to invoke Android's system file browser UI (SAF) but get following error message:
`No appropriate Activity found to handle this intent.`
### Steps to reproduce
Try following code:
```
from android.content import Intent
intent = Intent(Intent.ACTION_OPEN_DOCUMENT)
intent.addCategory(Intent.CATEGORY_OPENABLE)
intent.setType("*/*")
result = await self.app._impl.intent_result(intent)
```
### Expected behavior
The system file browser should open because under my Android 13, it is definitely there.
With following code, the file browser is shown, but the app doesn't wait for the result:
```
intent = Intent(Intent.ACTION_OPEN_DOCUMENT)
intent.addCategory(Intent.CATEGORY_OPENABLE)
intent.setType("*/*")
result = await self.app._impl.native.startActivityForResult(intent, 1234)
```
I get the error back `object NoneType can't be used in 'await' expression`
### Screenshots
_No response_
### Environment
- Operating System: Android 13
- Python version: 3.11
- Software versions:
- Toga: 0.3.1dev93
### Logs
```
```
### Additional context
The same problem exists with Intent.ACTION_CREATE_DOCUMENT and this Intent also can be invoked directly with startActivityForResult
The problem stems from this code in app.py:
```
if not intent.resolveActivity(self.native.getPackageManager()):
raise RuntimeError("No appropriate Activity found to handle this intent.")
```
This may be a good way to find Activities of standard apps, but it seems to fail in the case of internal Activities like the system file browser.
It might be better to remove this code and instead catch and return exceptions that occur when invoking startActivityForResult unguarded
</issue>
<code>
[start of android/src/toga_android/app.py]
1 import asyncio
2
3 from rubicon.java import android_events
4
5 import toga
6 from toga.command import Group
7
8 from .libs.activity import IPythonApp, MainActivity
9 from .libs.android.graphics import Drawable
10 from .libs.android.view import Menu, MenuItem
11 from .window import Window
12
13 # `MainWindow` is defined here in `app.py`, not `window.py`, to mollify the test suite.
14 MainWindow = Window
15
16
17 class TogaApp(IPythonApp):
18 last_intent_requestcode = (
19 -1
20 ) # always increment before using it for invoking new Intents
21 running_intents = {} # dictionary for currently running Intents
22 menuitem_mapping = {} # dictionary for mapping menuitems to commands
23
24 def __init__(self, app):
25 super().__init__()
26 self._impl = app
27 MainActivity.setPythonApp(self)
28 print("Python app launched & stored in Android Activity class")
29
30 def onCreate(self):
31 print("Toga app: onCreate")
32
33 def onStart(self):
34 print("Toga app: onStart")
35
36 def onResume(self):
37 print("Toga app: onResume")
38
39 def onPause(self):
40 print("Toga app: onPause")
41
42 def onStop(self):
43 print("Toga app: onStop")
44
45 def onDestroy(self):
46 print("Toga app: onDestroy")
47
48 def onRestart(self):
49 print("Toga app: onRestart")
50
51 def onActivityResult(self, requestCode, resultCode, resultData):
52 """Callback method, called from MainActivity when an Intent ends.
53
54 :param int requestCode: The integer request code originally supplied to startActivityForResult(),
55 allowing you to identify who this result came from.
56 :param int resultCode: The integer result code returned by the child activity through its setResult().
57 :param Intent resultData: An Intent, which can return result data to the caller (various data can be attached
58 to Intent "extras").
59 """
60 print(
61 f"Toga app: onActivityResult, requestCode={requestCode}, resultData={resultData}"
62 )
63 try:
64 # remove Intent from the list of running Intents,
65 # and set the result of the intent.
66 result_future = self.running_intents.pop(requestCode)
67 result_future.set_result(
68 {"resultCode": resultCode, "resultData": resultData}
69 )
70 except KeyError:
71 print("No intent matching request code {requestCode}")
72
73 def onConfigurationChanged(self, new_config):
74 pass
75
76 def onOptionsItemSelected(self, menuitem):
77 consumed = False
78 try:
79 cmd = self.menuitem_mapping[menuitem.getItemId()]
80 consumed = True
81 if cmd.action is not None:
82 cmd.action(menuitem)
83 except KeyError:
84 print("menu item id not found in menuitem_mapping dictionary!")
85 return consumed
86
87 def onPrepareOptionsMenu(self, menu):
88 menu.clear()
89 itemid = 0
90 menulist = {} # dictionary with all menus
91 self.menuitem_mapping.clear()
92
93 # create option menu
94 for cmd in self._impl.interface.commands:
95 if cmd == toga.SECTION_BREAK or cmd == toga.GROUP_BREAK:
96 continue
97 if cmd in self._impl.interface.main_window.toolbar:
98 continue # do not show toolbar commands in the option menu (except when overflowing)
99
100 grouppath = cmd.group.path
101 if grouppath[0] != Group.COMMANDS:
102 # only the Commands group (and its subgroups) are supported
103 # other groups should eventually go into the navigation drawer
104 continue
105 if cmd.group.key in menulist:
106 menugroup = menulist[cmd.group.key]
107 else:
108 # create all missing submenus
109 parentmenu = menu
110 for group in grouppath:
111 groupkey = group.key
112 if groupkey in menulist:
113 menugroup = menulist[groupkey]
114 else:
115 if group.text == toga.Group.COMMANDS.text:
116 menulist[groupkey] = menu
117 menugroup = menu
118 else:
119 itemid += 1
120 order = Menu.NONE if group.order is None else group.order
121 menugroup = parentmenu.addSubMenu(
122 Menu.NONE, itemid, order, group.text
123 ) # groupId, itemId, order, title
124 menulist[groupkey] = menugroup
125 parentmenu = menugroup
126 # create menu item
127 itemid += 1
128 order = Menu.NONE if cmd.order is None else cmd.order
129 menuitem = menugroup.add(
130 Menu.NONE, itemid, order, cmd.text
131 ) # groupId, itemId, order, title
132 menuitem.setShowAsActionFlags(MenuItem.SHOW_AS_ACTION_NEVER)
133 menuitem.setEnabled(cmd.enabled)
134 self.menuitem_mapping[
135 itemid
136 ] = cmd # store itemid for use in onOptionsItemSelected
137
138 # create toolbar actions
139 if self._impl.interface.main_window:
140 for cmd in self._impl.interface.main_window.toolbar:
141 if cmd == toga.SECTION_BREAK or cmd == toga.GROUP_BREAK:
142 continue
143 itemid += 1
144 order = Menu.NONE if cmd.order is None else cmd.order
145 menuitem = menu.add(
146 Menu.NONE, itemid, order, cmd.text
147 ) # groupId, itemId, order, title
148 menuitem.setShowAsActionFlags(
149 MenuItem.SHOW_AS_ACTION_IF_ROOM
150 ) # toolbar button / item in options menu on overflow
151 menuitem.setEnabled(cmd.enabled)
152 if cmd.icon:
153 icon = Drawable.createFromPath(str(cmd.icon._impl.path))
154 if icon:
155 menuitem.setIcon(icon)
156 else:
157 print("Could not create icon: " + str(cmd.icon._impl.path))
158 self.menuitem_mapping[
159 itemid
160 ] = cmd # store itemid for use in onOptionsItemSelected
161
162 return True
163
164 @property
165 def native(self):
166 # We access `MainActivity.singletonThis` freshly each time, rather than
167 # storing a reference in `__init__()`, because it's not safe to use the
168 # same reference over time because `rubicon-java` creates a JNI local
169 # reference.
170 return MainActivity.singletonThis
171
172
173 class App:
174 def __init__(self, interface):
175 self.interface = interface
176 self.interface._impl = self
177 self._listener = None
178
179 self.loop = android_events.AndroidEventLoop()
180
181 @property
182 def native(self):
183 return self._listener.native if self._listener else None
184
185 def create(self):
186 # The `_listener` listens for activity event callbacks. For simplicity,
187 # the app's `.native` is the listener's native Java class.
188 self._listener = TogaApp(self)
189 # Call user code to populate the main window
190 self.interface.startup()
191
192 def open_document(self, fileURL):
193 print("Can't open document %s (yet)" % fileURL)
194
195 def main_loop(self):
196 # In order to support user asyncio code, start the Python/Android cooperative event loop.
197 self.loop.run_forever_cooperatively()
198
199 # On Android, Toga UI integrates automatically into the main Android event loop by virtue
200 # of the Android Activity system.
201 self.create()
202
203 def set_main_window(self, window):
204 pass
205
206 def show_about_dialog(self):
207 self.interface.factory.not_implemented("App.show_about_dialog()")
208
209 def exit(self):
210 pass
211
212 async def intent_result(self, intent):
213 """Calls an Intent and waits for its result.
214
215 A RuntimeError will be raised when the Intent cannot be invoked.
216
217 :param Intent intent: The Intent to call
218 :returns: A Dictionary containing "resultCode" (int) and "resultData" (Intent or None)
219 :rtype: dict
220 """
221 if not intent.resolveActivity(self.native.getPackageManager()):
222 raise RuntimeError("No appropriate Activity found to handle this intent.")
223 self._listener.last_intent_requestcode += 1
224 code = self._listener.last_intent_requestcode
225
226 result_future = asyncio.Future()
227 self._listener.running_intents[code] = result_future
228
229 self.native.startActivityForResult(intent, code)
230 await result_future
231 return result_future.result()
232
[end of android/src/toga_android/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/android/src/toga_android/app.py b/android/src/toga_android/app.py
--- a/android/src/toga_android/app.py
+++ b/android/src/toga_android/app.py
@@ -218,14 +218,15 @@
:returns: A Dictionary containing "resultCode" (int) and "resultData" (Intent or None)
:rtype: dict
"""
- if not intent.resolveActivity(self.native.getPackageManager()):
- raise RuntimeError("No appropriate Activity found to handle this intent.")
- self._listener.last_intent_requestcode += 1
- code = self._listener.last_intent_requestcode
+ try:
+ self._listener.last_intent_requestcode += 1
+ code = self._listener.last_intent_requestcode
- result_future = asyncio.Future()
- self._listener.running_intents[code] = result_future
+ result_future = asyncio.Future()
+ self._listener.running_intents[code] = result_future
- self.native.startActivityForResult(intent, code)
- await result_future
- return result_future.result()
+ self.native.startActivityForResult(intent, code)
+ await result_future
+ return result_future.result()
+ except AttributeError:
+ raise RuntimeError("No appropriate Activity found to handle this intent.")
| {"golden_diff": "diff --git a/android/src/toga_android/app.py b/android/src/toga_android/app.py\n--- a/android/src/toga_android/app.py\n+++ b/android/src/toga_android/app.py\n@@ -218,14 +218,15 @@\n :returns: A Dictionary containing \"resultCode\" (int) and \"resultData\" (Intent or None)\n :rtype: dict\n \"\"\"\n- if not intent.resolveActivity(self.native.getPackageManager()):\n- raise RuntimeError(\"No appropriate Activity found to handle this intent.\")\n- self._listener.last_intent_requestcode += 1\n- code = self._listener.last_intent_requestcode\n+ try:\n+ self._listener.last_intent_requestcode += 1\n+ code = self._listener.last_intent_requestcode\n \n- result_future = asyncio.Future()\n- self._listener.running_intents[code] = result_future\n+ result_future = asyncio.Future()\n+ self._listener.running_intents[code] = result_future\n \n- self.native.startActivityForResult(intent, code)\n- await result_future\n- return result_future.result()\n+ self.native.startActivityForResult(intent, code)\n+ await result_future\n+ return result_future.result()\n+ except AttributeError:\n+ raise RuntimeError(\"No appropriate Activity found to handle this intent.\")\n", "issue": "Android system file browser cannot be invoked\n### Describe the bug\r\n\r\nI tried to invoke Android's system file browser UI (SAF) but get following error message:\r\n`No appropriate Activity found to handle this intent.`\r\n\r\n\r\n### Steps to reproduce\r\n\r\nTry following code:\r\n```\r\nfrom android.content import Intent\r\n\r\nintent = Intent(Intent.ACTION_OPEN_DOCUMENT)\r\nintent.addCategory(Intent.CATEGORY_OPENABLE) \r\nintent.setType(\"*/*\")\r\nresult = await self.app._impl.intent_result(intent) \r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\nThe system file browser should open because under my Android 13, it is definitely there.\r\n\r\nWith following code, the file browser is shown, but the app doesn't wait for the result:\r\n```\r\nintent = Intent(Intent.ACTION_OPEN_DOCUMENT)\r\nintent.addCategory(Intent.CATEGORY_OPENABLE) \r\nintent.setType(\"*/*\")\r\nresult = await self.app._impl.native.startActivityForResult(intent, 1234) \r\n```\r\nI get the error back `object NoneType can't be used in 'await' expression`\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n- Operating System: Android 13\r\n- Python version: 3.11\r\n- Software versions:\r\n - Toga: 0.3.1dev93\r\n \r\n\r\n### Logs\r\n\r\n```\r\n\r\n```\r\n\r\n\r\n### Additional context\r\nThe same problem exists with Intent.ACTION_CREATE_DOCUMENT and this Intent also can be invoked directly with startActivityForResult\r\n\r\nThe problem stems from this code in app.py:\r\n\r\n```\r\nif not intent.resolveActivity(self.native.getPackageManager()):\r\n raise RuntimeError(\"No appropriate Activity found to handle this intent.\")\r\n```\r\nThis may be a good way to find Activities of standard apps, but it seems to fail in the case of internal Activities like the system file browser.\r\n\r\nIt might be better to remove this code and instead catch and return exceptions that occur when invoking startActivityForResult unguarded\n", "before_files": [{"content": "import asyncio\n\nfrom rubicon.java import android_events\n\nimport toga\nfrom toga.command import Group\n\nfrom .libs.activity import IPythonApp, MainActivity\nfrom .libs.android.graphics import Drawable\nfrom .libs.android.view import Menu, MenuItem\nfrom .window import Window\n\n# `MainWindow` is defined here in `app.py`, not `window.py`, to mollify the test suite.\nMainWindow = Window\n\n\nclass TogaApp(IPythonApp):\n last_intent_requestcode = (\n -1\n ) # always increment before using it for invoking new Intents\n running_intents = {} # dictionary for currently running Intents\n menuitem_mapping = {} # dictionary for mapping menuitems to commands\n\n def __init__(self, app):\n super().__init__()\n self._impl = app\n MainActivity.setPythonApp(self)\n print(\"Python app launched & stored in Android Activity class\")\n\n def onCreate(self):\n print(\"Toga app: onCreate\")\n\n def onStart(self):\n print(\"Toga app: onStart\")\n\n def onResume(self):\n print(\"Toga app: onResume\")\n\n def onPause(self):\n print(\"Toga app: onPause\")\n\n def onStop(self):\n print(\"Toga app: onStop\")\n\n def onDestroy(self):\n print(\"Toga app: onDestroy\")\n\n def onRestart(self):\n print(\"Toga app: onRestart\")\n\n def onActivityResult(self, requestCode, resultCode, resultData):\n \"\"\"Callback method, called from MainActivity when an Intent ends.\n\n :param int requestCode: The integer request code originally supplied to startActivityForResult(),\n allowing you to identify who this result came from.\n :param int resultCode: The integer result code returned by the child activity through its setResult().\n :param Intent resultData: An Intent, which can return result data to the caller (various data can be attached\n to Intent \"extras\").\n \"\"\"\n print(\n f\"Toga app: onActivityResult, requestCode={requestCode}, resultData={resultData}\"\n )\n try:\n # remove Intent from the list of running Intents,\n # and set the result of the intent.\n result_future = self.running_intents.pop(requestCode)\n result_future.set_result(\n {\"resultCode\": resultCode, \"resultData\": resultData}\n )\n except KeyError:\n print(\"No intent matching request code {requestCode}\")\n\n def onConfigurationChanged(self, new_config):\n pass\n\n def onOptionsItemSelected(self, menuitem):\n consumed = False\n try:\n cmd = self.menuitem_mapping[menuitem.getItemId()]\n consumed = True\n if cmd.action is not None:\n cmd.action(menuitem)\n except KeyError:\n print(\"menu item id not found in menuitem_mapping dictionary!\")\n return consumed\n\n def onPrepareOptionsMenu(self, menu):\n menu.clear()\n itemid = 0\n menulist = {} # dictionary with all menus\n self.menuitem_mapping.clear()\n\n # create option menu\n for cmd in self._impl.interface.commands:\n if cmd == toga.SECTION_BREAK or cmd == toga.GROUP_BREAK:\n continue\n if cmd in self._impl.interface.main_window.toolbar:\n continue # do not show toolbar commands in the option menu (except when overflowing)\n\n grouppath = cmd.group.path\n if grouppath[0] != Group.COMMANDS:\n # only the Commands group (and its subgroups) are supported\n # other groups should eventually go into the navigation drawer\n continue\n if cmd.group.key in menulist:\n menugroup = menulist[cmd.group.key]\n else:\n # create all missing submenus\n parentmenu = menu\n for group in grouppath:\n groupkey = group.key\n if groupkey in menulist:\n menugroup = menulist[groupkey]\n else:\n if group.text == toga.Group.COMMANDS.text:\n menulist[groupkey] = menu\n menugroup = menu\n else:\n itemid += 1\n order = Menu.NONE if group.order is None else group.order\n menugroup = parentmenu.addSubMenu(\n Menu.NONE, itemid, order, group.text\n ) # groupId, itemId, order, title\n menulist[groupkey] = menugroup\n parentmenu = menugroup\n # create menu item\n itemid += 1\n order = Menu.NONE if cmd.order is None else cmd.order\n menuitem = menugroup.add(\n Menu.NONE, itemid, order, cmd.text\n ) # groupId, itemId, order, title\n menuitem.setShowAsActionFlags(MenuItem.SHOW_AS_ACTION_NEVER)\n menuitem.setEnabled(cmd.enabled)\n self.menuitem_mapping[\n itemid\n ] = cmd # store itemid for use in onOptionsItemSelected\n\n # create toolbar actions\n if self._impl.interface.main_window:\n for cmd in self._impl.interface.main_window.toolbar:\n if cmd == toga.SECTION_BREAK or cmd == toga.GROUP_BREAK:\n continue\n itemid += 1\n order = Menu.NONE if cmd.order is None else cmd.order\n menuitem = menu.add(\n Menu.NONE, itemid, order, cmd.text\n ) # groupId, itemId, order, title\n menuitem.setShowAsActionFlags(\n MenuItem.SHOW_AS_ACTION_IF_ROOM\n ) # toolbar button / item in options menu on overflow\n menuitem.setEnabled(cmd.enabled)\n if cmd.icon:\n icon = Drawable.createFromPath(str(cmd.icon._impl.path))\n if icon:\n menuitem.setIcon(icon)\n else:\n print(\"Could not create icon: \" + str(cmd.icon._impl.path))\n self.menuitem_mapping[\n itemid\n ] = cmd # store itemid for use in onOptionsItemSelected\n\n return True\n\n @property\n def native(self):\n # We access `MainActivity.singletonThis` freshly each time, rather than\n # storing a reference in `__init__()`, because it's not safe to use the\n # same reference over time because `rubicon-java` creates a JNI local\n # reference.\n return MainActivity.singletonThis\n\n\nclass App:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self._listener = None\n\n self.loop = android_events.AndroidEventLoop()\n\n @property\n def native(self):\n return self._listener.native if self._listener else None\n\n def create(self):\n # The `_listener` listens for activity event callbacks. For simplicity,\n # the app's `.native` is the listener's native Java class.\n self._listener = TogaApp(self)\n # Call user code to populate the main window\n self.interface.startup()\n\n def open_document(self, fileURL):\n print(\"Can't open document %s (yet)\" % fileURL)\n\n def main_loop(self):\n # In order to support user asyncio code, start the Python/Android cooperative event loop.\n self.loop.run_forever_cooperatively()\n\n # On Android, Toga UI integrates automatically into the main Android event loop by virtue\n # of the Android Activity system.\n self.create()\n\n def set_main_window(self, window):\n pass\n\n def show_about_dialog(self):\n self.interface.factory.not_implemented(\"App.show_about_dialog()\")\n\n def exit(self):\n pass\n\n async def intent_result(self, intent):\n \"\"\"Calls an Intent and waits for its result.\n\n A RuntimeError will be raised when the Intent cannot be invoked.\n\n :param Intent intent: The Intent to call\n :returns: A Dictionary containing \"resultCode\" (int) and \"resultData\" (Intent or None)\n :rtype: dict\n \"\"\"\n if not intent.resolveActivity(self.native.getPackageManager()):\n raise RuntimeError(\"No appropriate Activity found to handle this intent.\")\n self._listener.last_intent_requestcode += 1\n code = self._listener.last_intent_requestcode\n\n result_future = asyncio.Future()\n self._listener.running_intents[code] = result_future\n\n self.native.startActivityForResult(intent, code)\n await result_future\n return result_future.result()\n", "path": "android/src/toga_android/app.py"}]} | 3,270 | 276 |
gh_patches_debug_19182 | rasdani/github-patches | git_diff | mne-tools__mne-python-6426 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Why do we have matplotlib < 3.1 in requirements.txt?
@massich can you try if 3.1 works? It would be nice to support the current matplotlib version.
</issue>
<code>
[start of mne/conftest.py]
1 # -*- coding: utf-8 -*-
2 # Author: Eric Larson <[email protected]>
3 #
4 # License: BSD (3-clause)
5
6 import os.path as op
7 import warnings
8 import pytest
9 # For some unknown reason, on Travis-xenial there are segfaults caused on
10 # the line pytest -> pdb.Pdb.__init__ -> "import readline". Forcing an
11 # import here seems to prevent them (!?). This suggests a potential problem
12 # with some other library stepping on memory where it shouldn't. It only
13 # seems to happen on the Linux runs that install Mayavi. Anectodally,
14 # @larsoner has had problems a couple of years ago where a mayavi import
15 # seemed to corrupt SciPy linalg function results (!), likely due to the
16 # associated VTK import, so this could be another manifestation of that.
17 try:
18 import readline # noqa
19 except Exception:
20 pass
21
22 import numpy as np
23 import mne
24 from mne.datasets import testing
25 from mne.fixes import _get_args
26
27 test_path = testing.data_path(download=False)
28 s_path = op.join(test_path, 'MEG', 'sample')
29 fname_evoked = op.join(s_path, 'sample_audvis_trunc-ave.fif')
30 fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif')
31 fname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
32
33
34 def pytest_configure(config):
35 """Configure pytest options."""
36 # Markers
37 for marker in ('slowtest', 'ultraslowtest'):
38 config.addinivalue_line('markers', marker)
39
40 # Fixtures
41 for fixture in ('matplotlib_config',):
42 config.addinivalue_line('usefixtures', fixture)
43
44 # Warnings
45 # - Once SciPy updates not to have non-integer and non-tuple errors (1.2.0)
46 # we should remove them from here.
47 # - This list should also be considered alongside reset_warnings in
48 # doc/conf.py.
49 warning_lines = """
50 error::
51 ignore::ImportWarning
52 ignore:the matrix subclass:PendingDeprecationWarning
53 ignore:numpy.dtype size changed:RuntimeWarning
54 ignore:.*HasTraits.trait_.*:DeprecationWarning
55 ignore:.*takes no parameters:DeprecationWarning
56 ignore:joblib not installed:RuntimeWarning
57 ignore:Using a non-tuple sequence for multidimensional indexing:FutureWarning
58 ignore:using a non-integer number instead of an integer will result in an error:DeprecationWarning
59 ignore:Importing from numpy.testing.decorators is deprecated:DeprecationWarning
60 ignore:np.loads is deprecated, use pickle.loads instead:DeprecationWarning
61 ignore:The oldnumeric module will be dropped:DeprecationWarning
62 ignore:Collection picker None could not be converted to float:UserWarning
63 ignore:covariance is not positive-semidefinite:RuntimeWarning
64 ignore:Can only plot ICA components:RuntimeWarning
65 ignore:Matplotlib is building the font cache using fc-list:UserWarning
66 ignore:Using or importing the ABCs from 'collections':DeprecationWarning
67 ignore:`formatargspec` is deprecated:DeprecationWarning
68 # This is only necessary until sklearn updates their wheels for NumPy 1.16
69 ignore:numpy.ufunc size changed:RuntimeWarning
70 ignore:.*mne-realtime.*:DeprecationWarning
71 """ # noqa: E501
72 for warning_line in warning_lines.split('\n'):
73 warning_line = warning_line.strip()
74 if warning_line and not warning_line.startswith('#'):
75 config.addinivalue_line('filterwarnings', warning_line)
76
77
78 @pytest.fixture(scope='session')
79 def matplotlib_config():
80 """Configure matplotlib for viz tests."""
81 import matplotlib
82 # "force" should not really be necessary but should not hurt
83 kwargs = dict()
84 if 'warn' in _get_args(matplotlib.use):
85 kwargs['warn'] = False
86 matplotlib.use('agg', force=True, **kwargs) # don't pop up windows
87 import matplotlib.pyplot as plt
88 assert plt.get_backend() == 'agg'
89 # overwrite some params that can horribly slow down tests that
90 # users might have changed locally (but should not otherwise affect
91 # functionality)
92 plt.ioff()
93 plt.rcParams['figure.dpi'] = 100
94 try:
95 from traits.etsconfig.api import ETSConfig
96 except Exception:
97 pass
98 else:
99 ETSConfig.toolkit = 'qt4'
100 try:
101 with warnings.catch_warnings(record=True): # traits
102 from mayavi import mlab
103 except Exception:
104 pass
105 else:
106 mlab.options.backend = 'test'
107
108
109 @pytest.fixture(scope='function', params=[testing._pytest_param()])
110 def evoked():
111 """Get evoked data."""
112 evoked = mne.read_evokeds(fname_evoked, condition='Left Auditory',
113 baseline=(None, 0))
114 evoked.crop(0, 0.2)
115 return evoked
116
117
118 @pytest.fixture(scope='function', params=[testing._pytest_param()])
119 def noise_cov():
120 """Get a noise cov from the testing dataset."""
121 return mne.read_cov(fname_cov)
122
123
124 @pytest.fixture(scope='function')
125 def bias_params_free(evoked, noise_cov):
126 """Provide inputs for free bias functions."""
127 fwd = mne.read_forward_solution(fname_fwd)
128 return _bias_params(evoked, noise_cov, fwd)
129
130
131 @pytest.fixture(scope='function')
132 def bias_params_fixed(evoked, noise_cov):
133 """Provide inputs for fixed bias functions."""
134 fwd = mne.read_forward_solution(fname_fwd)
135 fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True)
136 return _bias_params(evoked, noise_cov, fwd)
137
138
139 def _bias_params(evoked, noise_cov, fwd):
140 evoked.pick_types(meg=True, eeg=True, exclude=())
141 # restrict to limited set of verts (small src here) and one hemi for speed
142 vertices = [fwd['src'][0]['vertno'].copy(), []]
143 stc = mne.SourceEstimate(np.zeros((sum(len(v) for v in vertices), 1)),
144 vertices, 0., 1.)
145 fwd = mne.forward.restrict_forward_to_stc(fwd, stc)
146 assert fwd['sol']['row_names'] == noise_cov['names']
147 assert noise_cov['names'] == evoked.ch_names
148 evoked = mne.EvokedArray(fwd['sol']['data'].copy(), evoked.info)
149 data_cov = noise_cov.copy()
150 data_cov['data'] = np.dot(fwd['sol']['data'], fwd['sol']['data'].T)
151 assert data_cov['data'].shape[0] == len(noise_cov['names'])
152 want = np.arange(fwd['sol']['data'].shape[1])
153 if not mne.forward.is_fixed_orient(fwd):
154 want //= 3
155 return evoked, fwd, noise_cov, data_cov, want
156
157
158 @pytest.fixture(scope="module", params=[
159 "mayavi",
160 "pyvista",
161 ])
162 def backend_name(request):
163 """Get the backend name."""
164 yield request.param
165
166
167 @pytest.yield_fixture
168 def renderer(backend_name):
169 """Yield the 3D backends."""
170 from mne.viz.backends.renderer import _use_test_3d_backend
171 from mne.viz.backends.tests._utils import has_mayavi, has_pyvista
172 if backend_name == 'mayavi':
173 if not has_mayavi():
174 pytest.skip("Test skipped, requires mayavi.")
175 elif backend_name == 'pyvista':
176 if not has_pyvista():
177 pytest.skip("Test skipped, requires pyvista.")
178 with _use_test_3d_backend(backend_name):
179 from mne.viz.backends import renderer
180 yield renderer
181 renderer._close_all()
182
[end of mne/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mne/conftest.py b/mne/conftest.py
--- a/mne/conftest.py
+++ b/mne/conftest.py
@@ -22,7 +22,6 @@
import numpy as np
import mne
from mne.datasets import testing
-from mne.fixes import _get_args
test_path = testing.data_path(download=False)
s_path = op.join(test_path, 'MEG', 'sample')
@@ -81,9 +80,8 @@
import matplotlib
# "force" should not really be necessary but should not hurt
kwargs = dict()
- if 'warn' in _get_args(matplotlib.use):
- kwargs['warn'] = False
- matplotlib.use('agg', force=True, **kwargs) # don't pop up windows
+ with warnings.catch_warnings(record=True): # ignore warning
+ matplotlib.use('agg', force=True, **kwargs) # don't pop up windows
import matplotlib.pyplot as plt
assert plt.get_backend() == 'agg'
# overwrite some params that can horribly slow down tests that
| {"golden_diff": "diff --git a/mne/conftest.py b/mne/conftest.py\n--- a/mne/conftest.py\n+++ b/mne/conftest.py\n@@ -22,7 +22,6 @@\n import numpy as np\n import mne\n from mne.datasets import testing\n-from mne.fixes import _get_args\n \n test_path = testing.data_path(download=False)\n s_path = op.join(test_path, 'MEG', 'sample')\n@@ -81,9 +80,8 @@\n import matplotlib\n # \"force\" should not really be necessary but should not hurt\n kwargs = dict()\n- if 'warn' in _get_args(matplotlib.use):\n- kwargs['warn'] = False\n- matplotlib.use('agg', force=True, **kwargs) # don't pop up windows\n+ with warnings.catch_warnings(record=True): # ignore warning\n+ matplotlib.use('agg', force=True, **kwargs) # don't pop up windows\n import matplotlib.pyplot as plt\n assert plt.get_backend() == 'agg'\n # overwrite some params that can horribly slow down tests that\n", "issue": "Why do we have matplotlib < 3.1 in requirements.txt?\n@massich can you try if 3.1 works? It would be nice to support the current matplotlib version.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Author: Eric Larson <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport warnings\nimport pytest\n# For some unknown reason, on Travis-xenial there are segfaults caused on\n# the line pytest -> pdb.Pdb.__init__ -> \"import readline\". Forcing an\n# import here seems to prevent them (!?). This suggests a potential problem\n# with some other library stepping on memory where it shouldn't. It only\n# seems to happen on the Linux runs that install Mayavi. Anectodally,\n# @larsoner has had problems a couple of years ago where a mayavi import\n# seemed to corrupt SciPy linalg function results (!), likely due to the\n# associated VTK import, so this could be another manifestation of that.\ntry:\n import readline # noqa\nexcept Exception:\n pass\n\nimport numpy as np\nimport mne\nfrom mne.datasets import testing\nfrom mne.fixes import _get_args\n\ntest_path = testing.data_path(download=False)\ns_path = op.join(test_path, 'MEG', 'sample')\nfname_evoked = op.join(s_path, 'sample_audvis_trunc-ave.fif')\nfname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif')\nfname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')\n\n\ndef pytest_configure(config):\n \"\"\"Configure pytest options.\"\"\"\n # Markers\n for marker in ('slowtest', 'ultraslowtest'):\n config.addinivalue_line('markers', marker)\n\n # Fixtures\n for fixture in ('matplotlib_config',):\n config.addinivalue_line('usefixtures', fixture)\n\n # Warnings\n # - Once SciPy updates not to have non-integer and non-tuple errors (1.2.0)\n # we should remove them from here.\n # - This list should also be considered alongside reset_warnings in\n # doc/conf.py.\n warning_lines = \"\"\"\n error::\n ignore::ImportWarning\n ignore:the matrix subclass:PendingDeprecationWarning\n ignore:numpy.dtype size changed:RuntimeWarning\n ignore:.*HasTraits.trait_.*:DeprecationWarning\n ignore:.*takes no parameters:DeprecationWarning\n ignore:joblib not installed:RuntimeWarning\n ignore:Using a non-tuple sequence for multidimensional indexing:FutureWarning\n ignore:using a non-integer number instead of an integer will result in an error:DeprecationWarning\n ignore:Importing from numpy.testing.decorators is deprecated:DeprecationWarning\n ignore:np.loads is deprecated, use pickle.loads instead:DeprecationWarning\n ignore:The oldnumeric module will be dropped:DeprecationWarning\n ignore:Collection picker None could not be converted to float:UserWarning\n ignore:covariance is not positive-semidefinite:RuntimeWarning\n ignore:Can only plot ICA components:RuntimeWarning\n ignore:Matplotlib is building the font cache using fc-list:UserWarning\n ignore:Using or importing the ABCs from 'collections':DeprecationWarning\n ignore:`formatargspec` is deprecated:DeprecationWarning\n # This is only necessary until sklearn updates their wheels for NumPy 1.16\n ignore:numpy.ufunc size changed:RuntimeWarning\n ignore:.*mne-realtime.*:DeprecationWarning\n \"\"\" # noqa: E501\n for warning_line in warning_lines.split('\\n'):\n warning_line = warning_line.strip()\n if warning_line and not warning_line.startswith('#'):\n config.addinivalue_line('filterwarnings', warning_line)\n\n\[email protected](scope='session')\ndef matplotlib_config():\n \"\"\"Configure matplotlib for viz tests.\"\"\"\n import matplotlib\n # \"force\" should not really be necessary but should not hurt\n kwargs = dict()\n if 'warn' in _get_args(matplotlib.use):\n kwargs['warn'] = False\n matplotlib.use('agg', force=True, **kwargs) # don't pop up windows\n import matplotlib.pyplot as plt\n assert plt.get_backend() == 'agg'\n # overwrite some params that can horribly slow down tests that\n # users might have changed locally (but should not otherwise affect\n # functionality)\n plt.ioff()\n plt.rcParams['figure.dpi'] = 100\n try:\n from traits.etsconfig.api import ETSConfig\n except Exception:\n pass\n else:\n ETSConfig.toolkit = 'qt4'\n try:\n with warnings.catch_warnings(record=True): # traits\n from mayavi import mlab\n except Exception:\n pass\n else:\n mlab.options.backend = 'test'\n\n\[email protected](scope='function', params=[testing._pytest_param()])\ndef evoked():\n \"\"\"Get evoked data.\"\"\"\n evoked = mne.read_evokeds(fname_evoked, condition='Left Auditory',\n baseline=(None, 0))\n evoked.crop(0, 0.2)\n return evoked\n\n\[email protected](scope='function', params=[testing._pytest_param()])\ndef noise_cov():\n \"\"\"Get a noise cov from the testing dataset.\"\"\"\n return mne.read_cov(fname_cov)\n\n\[email protected](scope='function')\ndef bias_params_free(evoked, noise_cov):\n \"\"\"Provide inputs for free bias functions.\"\"\"\n fwd = mne.read_forward_solution(fname_fwd)\n return _bias_params(evoked, noise_cov, fwd)\n\n\[email protected](scope='function')\ndef bias_params_fixed(evoked, noise_cov):\n \"\"\"Provide inputs for fixed bias functions.\"\"\"\n fwd = mne.read_forward_solution(fname_fwd)\n fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True)\n return _bias_params(evoked, noise_cov, fwd)\n\n\ndef _bias_params(evoked, noise_cov, fwd):\n evoked.pick_types(meg=True, eeg=True, exclude=())\n # restrict to limited set of verts (small src here) and one hemi for speed\n vertices = [fwd['src'][0]['vertno'].copy(), []]\n stc = mne.SourceEstimate(np.zeros((sum(len(v) for v in vertices), 1)),\n vertices, 0., 1.)\n fwd = mne.forward.restrict_forward_to_stc(fwd, stc)\n assert fwd['sol']['row_names'] == noise_cov['names']\n assert noise_cov['names'] == evoked.ch_names\n evoked = mne.EvokedArray(fwd['sol']['data'].copy(), evoked.info)\n data_cov = noise_cov.copy()\n data_cov['data'] = np.dot(fwd['sol']['data'], fwd['sol']['data'].T)\n assert data_cov['data'].shape[0] == len(noise_cov['names'])\n want = np.arange(fwd['sol']['data'].shape[1])\n if not mne.forward.is_fixed_orient(fwd):\n want //= 3\n return evoked, fwd, noise_cov, data_cov, want\n\n\[email protected](scope=\"module\", params=[\n \"mayavi\",\n \"pyvista\",\n])\ndef backend_name(request):\n \"\"\"Get the backend name.\"\"\"\n yield request.param\n\n\[email protected]_fixture\ndef renderer(backend_name):\n \"\"\"Yield the 3D backends.\"\"\"\n from mne.viz.backends.renderer import _use_test_3d_backend\n from mne.viz.backends.tests._utils import has_mayavi, has_pyvista\n if backend_name == 'mayavi':\n if not has_mayavi():\n pytest.skip(\"Test skipped, requires mayavi.\")\n elif backend_name == 'pyvista':\n if not has_pyvista():\n pytest.skip(\"Test skipped, requires pyvista.\")\n with _use_test_3d_backend(backend_name):\n from mne.viz.backends import renderer\n yield renderer\n renderer._close_all()\n", "path": "mne/conftest.py"}]} | 2,763 | 243 |
gh_patches_debug_3292 | rasdani/github-patches | git_diff | getmoto__moto-2305 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
API Gateway generates invalid IDs
Generated IDs in API Gateway resources look like `A-Z601A-Z47201`. They contain `A-Z` instead of letters.
</issue>
<code>
[start of moto/apigateway/utils.py]
1 from __future__ import unicode_literals
2 import six
3 import random
4
5
6 def create_id():
7 size = 10
8 chars = list(range(10)) + ['A-Z']
9 return ''.join(six.text_type(random.choice(chars)) for x in range(size))
10
[end of moto/apigateway/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/moto/apigateway/utils.py b/moto/apigateway/utils.py
--- a/moto/apigateway/utils.py
+++ b/moto/apigateway/utils.py
@@ -1,9 +1,10 @@
from __future__ import unicode_literals
import six
import random
+import string
def create_id():
size = 10
- chars = list(range(10)) + ['A-Z']
+ chars = list(range(10)) + list(string.ascii_lowercase)
return ''.join(six.text_type(random.choice(chars)) for x in range(size))
| {"golden_diff": "diff --git a/moto/apigateway/utils.py b/moto/apigateway/utils.py\n--- a/moto/apigateway/utils.py\n+++ b/moto/apigateway/utils.py\n@@ -1,9 +1,10 @@\n from __future__ import unicode_literals\n import six\n import random\n+import string\n \n \n def create_id():\n size = 10\n- chars = list(range(10)) + ['A-Z']\n+ chars = list(range(10)) + list(string.ascii_lowercase)\n return ''.join(six.text_type(random.choice(chars)) for x in range(size))\n", "issue": "API Gateway generates invalid IDs\nGenerated IDs in API Gateway resources look like `A-Z601A-Z47201`. They contain `A-Z` instead of letters.\n", "before_files": [{"content": "from __future__ import unicode_literals\nimport six\nimport random\n\n\ndef create_id():\n size = 10\n chars = list(range(10)) + ['A-Z']\n return ''.join(six.text_type(random.choice(chars)) for x in range(size))\n", "path": "moto/apigateway/utils.py"}]} | 645 | 130 |
gh_patches_debug_28024 | rasdani/github-patches | git_diff | google__flax-1311 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
threading.Condition.notifyAll has been deprecated in favour of notify_all in Python 3.10
### Problem you have encountered:
`threading.Condition.notifyAll` has been deprecated in favour of `notify_all` in Python 3.10. Ref : python/cpython#25174
### What you expected to happen:
use `notify_all` in below places.
```
rg -t py -w 'currentThread|notifyAll|activeCount|isDaemon|setDaemon'
flax/training/prefetch_iterator.py
58: self._cond.notifyAll()
68: self._cond.notifyAll()
80: self._cond.notifyAll()
88: self._cond.notifyAll()
```
</issue>
<code>
[start of flax/training/prefetch_iterator.py]
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Utility for constructing an iterator which prefetches data asynchronously.
16 """
17
18 import threading
19 import warnings
20
21
22 class PrefetchIterator:
23 """Wraps an iterator to provide async prefetching.
24
25 DEPRECATION WARNING:
26 TensorFlow datasets no longer require manual prefetching.
27
28 Previously this class was used to make data loading using TensorFlow datasets
29 more efficient. Now TF data handles prefetching with NumPy iterators
30 correctly.
31
32 Example::
33
34 tf_iter = dataset.as_numpy_iterator() # only loads data while calling next
35 tf_iter = PrefetchIterator(tf_iter) # prefetches data in the background
36
37 """
38
39 def __init__(self, data_iter, buffer_size=1):
40 """Construct a PrefetchIterator.
41
42 Args:
43 data_iter: the Iterator that should be prefetched.
44 buffer_size: how many items to prefetch (default: 1).
45 """
46 warnings.warn('PrefetchIterator is deprecated. Use the standard `tf.data`'
47 ' prefetch method instead', DeprecationWarning)
48
49 self._data_iter = data_iter
50 self.buffer_size = buffer_size
51 self._cond = threading.Condition()
52 self._buffer = []
53 self._active = True
54 self._thread = threading.Thread(target=self._prefetch_loop, daemon=True)
55 self._thread.start()
56 self._error = None
57
58 def __iter__(self):
59 return self
60
61 def __next__(self):
62 with self._cond:
63 self._cond.wait_for(lambda: self._buffer or not self._active)
64 if self._buffer:
65 item = self._buffer.pop(0)
66 self._cond.notifyAll()
67 return item
68 if self._error:
69 raise self._error # pylint: disable=raising-bad-type
70 assert not self._active
71 raise StopIteration()
72
73 def close(self):
74 with self._cond:
75 self._active = False
76 self._cond.notifyAll()
77
78 def _prefetch_loop(self):
79 """Prefetch loop that prefetches a tf dataset."""
80 def _predicate():
81 return len(self._buffer) < self.buffer_size or not self._active
82
83 while True:
84 try:
85 item = next(self._data_iter)
86 with self._cond:
87 self._buffer.append(item)
88 self._cond.notifyAll()
89 self._cond.wait_for(_predicate)
90 if not self._active:
91 return
92 except Exception as e: # pylint: disable=broad-except
93 with self._cond:
94 self._error = e
95 self._active = False
96 self._cond.notifyAll()
97 return
98
[end of flax/training/prefetch_iterator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/training/prefetch_iterator.py b/flax/training/prefetch_iterator.py
--- a/flax/training/prefetch_iterator.py
+++ b/flax/training/prefetch_iterator.py
@@ -55,7 +55,7 @@
self._cond.wait_for(lambda: self._buffer or not self._active)
if self._buffer:
item = self._buffer.pop(0)
- self._cond.notifyAll()
+ self._cond.notify_all()
return item
if self._error:
raise self._error # pylint: disable=raising-bad-type
@@ -65,7 +65,7 @@
def close(self):
with self._cond:
self._active = False
- self._cond.notifyAll()
+ self._cond.notify_all()
def _prefetch_loop(self):
"""Prefetch loop that prefetches a tf dataset."""
@@ -77,7 +77,7 @@
item = next(self._data_iter)
with self._cond:
self._buffer.append(item)
- self._cond.notifyAll()
+ self._cond.notify_all()
self._cond.wait_for(_predicate)
if not self._active:
return
@@ -85,5 +85,5 @@
with self._cond:
self._error = e
self._active = False
- self._cond.notifyAll()
+ self._cond.notify_all()
return
| {"golden_diff": "diff --git a/flax/training/prefetch_iterator.py b/flax/training/prefetch_iterator.py\n--- a/flax/training/prefetch_iterator.py\n+++ b/flax/training/prefetch_iterator.py\n@@ -55,7 +55,7 @@\n self._cond.wait_for(lambda: self._buffer or not self._active)\n if self._buffer:\n item = self._buffer.pop(0)\n- self._cond.notifyAll()\n+ self._cond.notify_all()\n return item\n if self._error:\n raise self._error # pylint: disable=raising-bad-type\n@@ -65,7 +65,7 @@\n def close(self):\n with self._cond:\n self._active = False\n- self._cond.notifyAll()\n+ self._cond.notify_all()\n \n def _prefetch_loop(self):\n \"\"\"Prefetch loop that prefetches a tf dataset.\"\"\"\n@@ -77,7 +77,7 @@\n item = next(self._data_iter)\n with self._cond:\n self._buffer.append(item)\n- self._cond.notifyAll()\n+ self._cond.notify_all()\n self._cond.wait_for(_predicate)\n if not self._active:\n return\n@@ -85,5 +85,5 @@\n with self._cond:\n self._error = e\n self._active = False\n- self._cond.notifyAll()\n+ self._cond.notify_all()\n return\n", "issue": " threading.Condition.notifyAll has been deprecated in favour of notify_all in Python 3.10\n### Problem you have encountered:\r\n\r\n `threading.Condition.notifyAll` has been deprecated in favour of `notify_all` in Python 3.10. Ref : python/cpython#25174\r\n\r\n### What you expected to happen:\r\n\r\nuse `notify_all` in below places.\r\n\r\n```\r\nrg -t py -w 'currentThread|notifyAll|activeCount|isDaemon|setDaemon' \r\nflax/training/prefetch_iterator.py\r\n58: self._cond.notifyAll()\r\n68: self._cond.notifyAll()\r\n80: self._cond.notifyAll()\r\n88: self._cond.notifyAll()\r\n```\n", "before_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility for constructing an iterator which prefetches data asynchronously.\n\"\"\"\n\nimport threading\nimport warnings\n\n\nclass PrefetchIterator:\n \"\"\"Wraps an iterator to provide async prefetching.\n\n DEPRECATION WARNING:\n TensorFlow datasets no longer require manual prefetching.\n\n Previously this class was used to make data loading using TensorFlow datasets\n more efficient. Now TF data handles prefetching with NumPy iterators\n correctly.\n\n Example::\n\n tf_iter = dataset.as_numpy_iterator() # only loads data while calling next\n tf_iter = PrefetchIterator(tf_iter) # prefetches data in the background\n\n \"\"\"\n\n def __init__(self, data_iter, buffer_size=1):\n \"\"\"Construct a PrefetchIterator.\n\n Args:\n data_iter: the Iterator that should be prefetched.\n buffer_size: how many items to prefetch (default: 1).\n \"\"\"\n warnings.warn('PrefetchIterator is deprecated. Use the standard `tf.data`'\n ' prefetch method instead', DeprecationWarning)\n\n self._data_iter = data_iter\n self.buffer_size = buffer_size\n self._cond = threading.Condition()\n self._buffer = []\n self._active = True\n self._thread = threading.Thread(target=self._prefetch_loop, daemon=True)\n self._thread.start()\n self._error = None\n\n def __iter__(self):\n return self\n\n def __next__(self):\n with self._cond:\n self._cond.wait_for(lambda: self._buffer or not self._active)\n if self._buffer:\n item = self._buffer.pop(0)\n self._cond.notifyAll()\n return item\n if self._error:\n raise self._error # pylint: disable=raising-bad-type\n assert not self._active\n raise StopIteration()\n\n def close(self):\n with self._cond:\n self._active = False\n self._cond.notifyAll()\n\n def _prefetch_loop(self):\n \"\"\"Prefetch loop that prefetches a tf dataset.\"\"\"\n def _predicate():\n return len(self._buffer) < self.buffer_size or not self._active\n\n while True:\n try:\n item = next(self._data_iter)\n with self._cond:\n self._buffer.append(item)\n self._cond.notifyAll()\n self._cond.wait_for(_predicate)\n if not self._active:\n return\n except Exception as e: # pylint: disable=broad-except\n with self._cond:\n self._error = e\n self._active = False\n self._cond.notifyAll()\n return\n", "path": "flax/training/prefetch_iterator.py"}]} | 1,583 | 322 |
gh_patches_debug_823 | rasdani/github-patches | git_diff | angr__angr-1303 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cachetools broke their API
There's a new major version of cachetools (providing LRUCache), 3.0.0. This has caused everything to break. I have pinned our version to `cachetools<3` for the time being, but we should migrate.
My guess is that this is because we were using the `missing` argument to LRUCache (in claripy, specifically), and I am fairly sure the intended replacement is to [implement the `__missing__` method](https://cachetools.readthedocs.io/en/latest/#extending-cache-classes). Unsure if there are more implications, which is why this issue is open under angr instead of claripy.
</issue>
<code>
[start of setup.py]
1 # pylint: disable=no-name-in-module,import-error,unused-variable
2 import os
3 import sys
4 import subprocess
5 import pkg_resources
6 import shutil
7 import platform
8
9 if bytes is str:
10 raise Exception("""
11
12 =-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=
13
14 angr has transitioned to python 3. Due to the small size of the team behind it,
15 we can't reasonably maintain compatibility between both python 2 and python 3.
16 If you want to continue using the most recent version of angr (you definitely
17 want that, trust us) you should upgrade to python 3. It's like getting your
18 vaccinations. It hurts a little bit initially but in the end it's worth it.
19
20 If you are staying on python 2 and would like to make sure you don't get
21 incompatible versions, make sure your pip is at least version 9.0, and it will
22 use our metadata to implicitly avoid them.
23
24 For more information, see here: https://docs.angr.io/MIGRATION.html
25
26 Good luck!
27 """)
28
29 try:
30 from setuptools import setup
31 from setuptools import find_packages
32 packages = find_packages()
33 except ImportError:
34 from distutils.core import setup
35 packages = [x.strip('./').replace('/','.') for x in os.popen('find -name "__init__.py" | xargs -n1 dirname').read().strip().split('\n')]
36
37 from distutils.util import get_platform
38 from distutils.errors import LibError
39 from distutils.command.build import build as _build
40
41 if sys.platform == 'darwin':
42 library_file = "angr_native.dylib"
43 elif sys.platform in ('win32', 'cygwin'):
44 library_file = "angr_native.dll"
45 else:
46 library_file = "angr_native.so"
47
48 def _build_native():
49 try:
50 import unicorn
51 import pyvex
52 except ImportError:
53 raise LibError("You must install unicorn and pyvex before building angr")
54
55 env = os.environ.copy()
56 env_data = (('UNICORN_INCLUDE_PATH', 'unicorn', 'include'),
57 ('UNICORN_LIB_PATH', 'unicorn', 'lib'),
58 ('UNICORN_LIB_FILE', 'unicorn', 'lib\\unicorn.lib'),
59 ('PYVEX_INCLUDE_PATH', 'pyvex', 'include'),
60 ('PYVEX_LIB_PATH', 'pyvex', 'lib'),
61 ('PYVEX_LIB_FILE', 'pyvex', 'lib\\pyvex.lib'))
62 for var, pkg, fnm in env_data:
63 try:
64 env[var] = pkg_resources.resource_filename(pkg, fnm)
65 except KeyError:
66 pass
67
68 cmd1 = ['nmake', '/f', 'Makefile-win']
69 cmd2 = ['make']
70 for cmd in (cmd1, cmd2):
71 try:
72 if subprocess.call(cmd, cwd='native', env=env) != 0:
73 raise LibError('Unable to build angr_native')
74 break
75 except OSError:
76 continue
77 else:
78 raise LibError('Unable to build angr_native')
79
80 shutil.rmtree('angr/lib', ignore_errors=True)
81 os.mkdir('angr/lib')
82 shutil.copy(os.path.join('native', library_file), 'angr/lib')
83
84 class build(_build):
85 def run(self, *args):
86 self.execute(_build_native, (), msg='Building angr_native')
87 _build.run(self, *args)
88
89 cmdclass = {
90 'build': build,
91 }
92
93 try:
94 from setuptools.command.develop import develop as _develop
95 class develop(_develop):
96 def run(self, *args):
97 self.execute(_build_native, (), msg='Building angr_native')
98 _develop.run(self, *args)
99
100 cmdclass['develop'] = develop
101 except ImportError:
102 pass
103
104 if 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:
105 sys.argv.append('--plat-name')
106 name = get_platform()
107 if 'linux' in name:
108 # linux_* platform tags are disallowed because the python ecosystem is fubar
109 # linux builds should be built in the centos 5 vm for maximum compatibility
110 sys.argv.append('manylinux1_' + platform.machine())
111 else:
112 # https://www.python.org/dev/peps/pep-0425/
113 sys.argv.append(name.replace('.', '_').replace('-', '_'))
114
115 setup(
116 name='angr',
117 version='8.18.10.25',
118 python_requires='>=3.5',
119 description='A multi-architecture binary analysis toolkit, with the ability to perform dynamic symbolic execution and various static analyses on binaries',
120 url='https://github.com/angr/angr',
121 packages=packages,
122 install_requires=[
123 'ana',
124 'sortedcontainers',
125 'cachetools<3',
126 'capstone>=3.0.5rc2',
127 'cooldict',
128 'dpkt',
129 'futures; python_version == "2.7"',
130 'mulpyplexer',
131 'networkx>=2.0',
132 'progressbar',
133 'rpyc',
134 'cffi>=1.7.0',
135 'unicorn',
136 'archinfo==8.18.10.25',
137 'claripy==8.18.10.25',
138 'cle==8.18.10.25',
139 'pyvex==8.18.10.25',
140 'ailment==8.18.10.25',
141 'GitPython',
142 'pycparser>=2.18',
143 'itanium_demangler',
144 ],
145 setup_requires=['unicorn', 'pyvex'],
146 cmdclass=cmdclass,
147 include_package_data=True,
148 package_data={
149 'angr': ['lib/*']
150 }
151 )
152
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -122,7 +122,7 @@
install_requires=[
'ana',
'sortedcontainers',
- 'cachetools<3',
+ 'cachetools',
'capstone>=3.0.5rc2',
'cooldict',
'dpkt',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -122,7 +122,7 @@\n install_requires=[\n 'ana',\n 'sortedcontainers',\n- 'cachetools<3',\n+ 'cachetools',\n 'capstone>=3.0.5rc2',\n 'cooldict',\n 'dpkt',\n", "issue": "Cachetools broke their API\nThere's a new major version of cachetools (providing LRUCache), 3.0.0. This has caused everything to break. I have pinned our version to `cachetools<3` for the time being, but we should migrate.\r\n\r\nMy guess is that this is because we were using the `missing` argument to LRUCache (in claripy, specifically), and I am fairly sure the intended replacement is to [implement the `__missing__` method](https://cachetools.readthedocs.io/en/latest/#extending-cache-classes). Unsure if there are more implications, which is why this issue is open under angr instead of claripy.\n", "before_files": [{"content": "# pylint: disable=no-name-in-module,import-error,unused-variable\nimport os\nimport sys\nimport subprocess\nimport pkg_resources\nimport shutil\nimport platform\n\nif bytes is str:\n raise Exception(\"\"\"\n\n=-=-=-=-=-=-=-=-=-=-=-=-= WELCOME TO THE FUTURE! =-=-=-=-=-=-=-=-=-=-=-=-=-=\n\nangr has transitioned to python 3. Due to the small size of the team behind it,\nwe can't reasonably maintain compatibility between both python 2 and python 3.\nIf you want to continue using the most recent version of angr (you definitely\nwant that, trust us) you should upgrade to python 3. It's like getting your\nvaccinations. It hurts a little bit initially but in the end it's worth it.\n\nIf you are staying on python 2 and would like to make sure you don't get\nincompatible versions, make sure your pip is at least version 9.0, and it will\nuse our metadata to implicitly avoid them.\n\nFor more information, see here: https://docs.angr.io/MIGRATION.html\n\nGood luck!\n\"\"\")\n\ntry:\n from setuptools import setup\n from setuptools import find_packages\n packages = find_packages()\nexcept ImportError:\n from distutils.core import setup\n packages = [x.strip('./').replace('/','.') for x in os.popen('find -name \"__init__.py\" | xargs -n1 dirname').read().strip().split('\\n')]\n\nfrom distutils.util import get_platform\nfrom distutils.errors import LibError\nfrom distutils.command.build import build as _build\n\nif sys.platform == 'darwin':\n library_file = \"angr_native.dylib\"\nelif sys.platform in ('win32', 'cygwin'):\n library_file = \"angr_native.dll\"\nelse:\n library_file = \"angr_native.so\"\n\ndef _build_native():\n try:\n import unicorn\n import pyvex\n except ImportError:\n raise LibError(\"You must install unicorn and pyvex before building angr\")\n\n env = os.environ.copy()\n env_data = (('UNICORN_INCLUDE_PATH', 'unicorn', 'include'),\n ('UNICORN_LIB_PATH', 'unicorn', 'lib'),\n ('UNICORN_LIB_FILE', 'unicorn', 'lib\\\\unicorn.lib'),\n ('PYVEX_INCLUDE_PATH', 'pyvex', 'include'),\n ('PYVEX_LIB_PATH', 'pyvex', 'lib'),\n ('PYVEX_LIB_FILE', 'pyvex', 'lib\\\\pyvex.lib'))\n for var, pkg, fnm in env_data:\n try:\n env[var] = pkg_resources.resource_filename(pkg, fnm)\n except KeyError:\n pass\n\n cmd1 = ['nmake', '/f', 'Makefile-win']\n cmd2 = ['make']\n for cmd in (cmd1, cmd2):\n try:\n if subprocess.call(cmd, cwd='native', env=env) != 0:\n raise LibError('Unable to build angr_native')\n break\n except OSError:\n continue\n else:\n raise LibError('Unable to build angr_native')\n\n shutil.rmtree('angr/lib', ignore_errors=True)\n os.mkdir('angr/lib')\n shutil.copy(os.path.join('native', library_file), 'angr/lib')\n\nclass build(_build):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _build.run(self, *args)\n\ncmdclass = {\n 'build': build,\n}\n\ntry:\n from setuptools.command.develop import develop as _develop\n class develop(_develop):\n def run(self, *args):\n self.execute(_build_native, (), msg='Building angr_native')\n _develop.run(self, *args)\n\n cmdclass['develop'] = develop\nexcept ImportError:\n pass\n\nif 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:\n sys.argv.append('--plat-name')\n name = get_platform()\n if 'linux' in name:\n # linux_* platform tags are disallowed because the python ecosystem is fubar\n # linux builds should be built in the centos 5 vm for maximum compatibility\n sys.argv.append('manylinux1_' + platform.machine())\n else:\n # https://www.python.org/dev/peps/pep-0425/\n sys.argv.append(name.replace('.', '_').replace('-', '_'))\n\nsetup(\n name='angr',\n version='8.18.10.25',\n python_requires='>=3.5',\n description='A multi-architecture binary analysis toolkit, with the ability to perform dynamic symbolic execution and various static analyses on binaries',\n url='https://github.com/angr/angr',\n packages=packages,\n install_requires=[\n 'ana',\n 'sortedcontainers',\n 'cachetools<3',\n 'capstone>=3.0.5rc2',\n 'cooldict',\n 'dpkt',\n 'futures; python_version == \"2.7\"',\n 'mulpyplexer',\n 'networkx>=2.0',\n 'progressbar',\n 'rpyc',\n 'cffi>=1.7.0',\n 'unicorn',\n 'archinfo==8.18.10.25',\n 'claripy==8.18.10.25',\n 'cle==8.18.10.25',\n 'pyvex==8.18.10.25',\n 'ailment==8.18.10.25',\n 'GitPython',\n 'pycparser>=2.18',\n 'itanium_demangler',\n ],\n setup_requires=['unicorn', 'pyvex'],\n cmdclass=cmdclass,\n include_package_data=True,\n package_data={\n 'angr': ['lib/*']\n }\n)\n", "path": "setup.py"}]} | 2,301 | 89 |
gh_patches_debug_11730 | rasdani/github-patches | git_diff | ckan__ckan-7077 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search indexing logic called twice after update or create dataset
**CKAN version**
2.10
**Describe the bug**
When updating or creating a dataset, we are indexing the dataset twice in a row, ie the [`index_package()`](https://github.com/ckan/ckan/blob/9f1b5cfaff8c135b589e2ea0275f1286c2e02711/ckan/lib/search/index.py#L108) function gets called twice during the same operation (and of course any `IPackageController.before_index()` hook gets called twice as well.
The root cause is the the obscure code run in the [`DomainObjectModificationExtension`](https://github.com/ckan/ckan/blob/9f1b5cfaff8c135b589e2ea0275f1286c2e02711/ckan/model/modification.py#L27), which considers the same Package object both new and changed at the same time, and fires two separate notification events.
</issue>
<code>
[start of ckan/model/modification.py]
1 # encoding: utf-8
2
3 import logging
4 from typing import Any
5
6 from ckan.lib.search import SearchIndexError
7
8 import ckan.plugins as plugins
9 import ckan.model as model
10
11 log = logging.getLogger(__name__)
12
13 __all__ = ['DomainObjectModificationExtension']
14
15
16 class DomainObjectModificationExtension(plugins.SingletonPlugin):
17 """
18 Notify observers about domain object modifications before commit.
19
20 Observers are other plugins implementing the IDomainObjectModification
21 interface.
22 """
23
24 def before_commit(self, session: Any):
25 self.notify_observers(session, self.notify)
26
27 def notify_observers(self, session: Any, method: Any):
28 session.flush()
29 if not hasattr(session, '_object_cache'):
30 return
31
32 obj_cache = session._object_cache
33 new = obj_cache['new']
34 changed = obj_cache['changed']
35 deleted = obj_cache['deleted']
36
37 for obj in set(new):
38 if isinstance(obj, (model.Package, model.Resource)):
39 method(obj, model.DomainObjectOperation.new)
40 for obj in set(deleted):
41 if isinstance(obj, (model.Package, model.Resource)):
42 method(obj, model.DomainObjectOperation.deleted)
43 for obj in set(changed):
44 if isinstance(obj, model.Resource):
45 method(obj, model.DomainObjectOperation.changed)
46 if getattr(obj, 'url_changed', False):
47 for item in plugins.PluginImplementations(plugins.IResourceUrlChange):
48 item.notify(obj)
49
50 changed_pkgs = set(obj for obj in changed
51 if isinstance(obj, model.Package))
52
53 for obj in new | changed | deleted:
54 if not isinstance(obj, model.Package):
55 try:
56 changed_pkgs.update(obj.related_packages())
57 except AttributeError:
58 continue
59
60 for obj in changed_pkgs:
61 method(obj, model.DomainObjectOperation.changed)
62
63 def notify(self, entity: Any, operation: Any):
64 for observer in plugins.PluginImplementations(
65 plugins.IDomainObjectModification):
66 try:
67 observer.notify(entity, operation)
68 except SearchIndexError as search_error:
69 log.exception(search_error)
70 # Reraise, since it's pretty crucial to ckan if it can't index
71 # a dataset
72 raise
73 except Exception as ex:
74 log.exception(ex)
75 # Don't reraise other exceptions since they are generally of
76 # secondary importance so shouldn't disrupt the commit.
77
[end of ckan/model/modification.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckan/model/modification.py b/ckan/model/modification.py
--- a/ckan/model/modification.py
+++ b/ckan/model/modification.py
@@ -47,8 +47,12 @@
for item in plugins.PluginImplementations(plugins.IResourceUrlChange):
item.notify(obj)
- changed_pkgs = set(obj for obj in changed
- if isinstance(obj, model.Package))
+
+ changed_pkgs = set()
+ new_pkg_ids = [obj.id for obj in new if isinstance(obj, model.Package)]
+ for obj in changed:
+ if isinstance(obj, model.Package) and obj.id not in new_pkg_ids:
+ changed_pkgs.add(obj)
for obj in new | changed | deleted:
if not isinstance(obj, model.Package):
| {"golden_diff": "diff --git a/ckan/model/modification.py b/ckan/model/modification.py\n--- a/ckan/model/modification.py\n+++ b/ckan/model/modification.py\n@@ -47,8 +47,12 @@\n for item in plugins.PluginImplementations(plugins.IResourceUrlChange):\n item.notify(obj)\n \n- changed_pkgs = set(obj for obj in changed\n- if isinstance(obj, model.Package))\n+\n+ changed_pkgs = set()\n+ new_pkg_ids = [obj.id for obj in new if isinstance(obj, model.Package)]\n+ for obj in changed:\n+ if isinstance(obj, model.Package) and obj.id not in new_pkg_ids:\n+ changed_pkgs.add(obj)\n \n for obj in new | changed | deleted:\n if not isinstance(obj, model.Package):\n", "issue": "Search indexing logic called twice after update or create dataset\n**CKAN version**\r\n2.10\r\n\r\n\r\n**Describe the bug**\r\n\r\nWhen updating or creating a dataset, we are indexing the dataset twice in a row, ie the [`index_package()`](https://github.com/ckan/ckan/blob/9f1b5cfaff8c135b589e2ea0275f1286c2e02711/ckan/lib/search/index.py#L108) function gets called twice during the same operation (and of course any `IPackageController.before_index()` hook gets called twice as well.\r\n\r\nThe root cause is the the obscure code run in the [`DomainObjectModificationExtension`](https://github.com/ckan/ckan/blob/9f1b5cfaff8c135b589e2ea0275f1286c2e02711/ckan/model/modification.py#L27), which considers the same Package object both new and changed at the same time, and fires two separate notification events.\n", "before_files": [{"content": "# encoding: utf-8\n\nimport logging\nfrom typing import Any\n\nfrom ckan.lib.search import SearchIndexError\n\nimport ckan.plugins as plugins\nimport ckan.model as model\n\nlog = logging.getLogger(__name__)\n\n__all__ = ['DomainObjectModificationExtension']\n\n\nclass DomainObjectModificationExtension(plugins.SingletonPlugin):\n \"\"\"\n Notify observers about domain object modifications before commit.\n\n Observers are other plugins implementing the IDomainObjectModification\n interface.\n \"\"\"\n\n def before_commit(self, session: Any):\n self.notify_observers(session, self.notify)\n\n def notify_observers(self, session: Any, method: Any):\n session.flush()\n if not hasattr(session, '_object_cache'):\n return\n\n obj_cache = session._object_cache\n new = obj_cache['new']\n changed = obj_cache['changed']\n deleted = obj_cache['deleted']\n\n for obj in set(new):\n if isinstance(obj, (model.Package, model.Resource)):\n method(obj, model.DomainObjectOperation.new)\n for obj in set(deleted):\n if isinstance(obj, (model.Package, model.Resource)):\n method(obj, model.DomainObjectOperation.deleted)\n for obj in set(changed):\n if isinstance(obj, model.Resource):\n method(obj, model.DomainObjectOperation.changed)\n if getattr(obj, 'url_changed', False):\n for item in plugins.PluginImplementations(plugins.IResourceUrlChange):\n item.notify(obj)\n\n changed_pkgs = set(obj for obj in changed\n if isinstance(obj, model.Package))\n\n for obj in new | changed | deleted:\n if not isinstance(obj, model.Package):\n try:\n changed_pkgs.update(obj.related_packages())\n except AttributeError:\n continue\n\n for obj in changed_pkgs:\n method(obj, model.DomainObjectOperation.changed)\n\n def notify(self, entity: Any, operation: Any):\n for observer in plugins.PluginImplementations(\n plugins.IDomainObjectModification):\n try:\n observer.notify(entity, operation)\n except SearchIndexError as search_error:\n log.exception(search_error)\n # Reraise, since it's pretty crucial to ckan if it can't index\n # a dataset\n raise\n except Exception as ex:\n log.exception(ex)\n # Don't reraise other exceptions since they are generally of\n # secondary importance so shouldn't disrupt the commit.\n", "path": "ckan/model/modification.py"}]} | 1,428 | 178 |
gh_patches_debug_21020 | rasdani/github-patches | git_diff | deepchecks__deepchecks-1106 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Warning has been seen to appear below OK checks in suite condition table
**Describe the bug**
In a certain case, the condition table has been seen to have a warning appear blow the OK checks
**Expected behavior**
Condition order in the table should be:
Fail with Display
Fail without Display
Warning with Display
Warning without Display
Pass with Display
Pass without Display
Other then that order should conform to order of checks in the check_result.
**Screenshots**


</issue>
<code>
[start of deepchecks/core/display_pandas.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Handle displays of pandas objects."""
12 from deepchecks.utils.strings import get_docs_summary, get_ellipsis
13
14 from typing import List, Union, TYPE_CHECKING
15 import warnings
16
17 import pandas as pd
18 from pandas.io.formats.style import Styler
19
20
21 if TYPE_CHECKING:
22 from deepchecks.core.check_result import CheckResult
23
24
25 __all__ = ['dataframe_to_html', 'get_conditions_table']
26
27
28 def dataframe_to_html(df: Union[pd.DataFrame, Styler]):
29 """Convert dataframe to html.
30
31 Parameters
32 ----------
33 df: Union[pd.DataFrame, Styler]
34 Dataframe to convert to html
35
36 Returns
37 -------
38 pd.DataFrame:
39 dataframe with styling
40 """
41 try:
42 if isinstance(df, pd.DataFrame):
43 df_styler = df.style
44 else:
45 df_styler = df
46 # Using deprecated pandas method so hiding the warning
47 with warnings.catch_warnings():
48 warnings.simplefilter(action='ignore', category=FutureWarning)
49 df_styler.set_precision(2)
50
51 table_css_props = [
52 ('text-align', 'left'), # Align everything to the left
53 ('white-space', 'pre-wrap') # Define how to handle white space characters (like \n)
54 ]
55 df_styler.set_table_styles([dict(selector='table,thead,tbody,th,td', props=table_css_props)])
56 return df_styler.render()
57 # Because of MLC-154. Dataframe with Multi-index or non unique indices does not have a style
58 # attribute, hence we need to display as a regular pd html format.
59 except ValueError:
60 return df.to_html()
61
62
63 def get_conditions_table(check_results: Union['CheckResult', List['CheckResult']],
64 unique_id: str = None, max_info_len: int = 3000, icon_html: bool = True) -> Styler:
65 """Return the conditions table as DataFrame.
66
67 Parameters
68 ----------
69 check_results : Union['CheckResult', List['CheckResult']]
70 check results to show conditions of.
71 unique_id : str
72 the unique id to append for the check names to create links (won't create links if None/empty).
73 max_info_len : int
74 max length of the additional info.
75 icon_html : bool , default: True
76 if to show the html condition result icon or the enum
77 Returns
78 -------
79 pd.Dataframe:
80 the condition table.
81 """
82 if not isinstance(check_results, List):
83 show_check_column = False
84 check_results = [check_results]
85 else:
86 show_check_column = True
87
88 table = []
89 for check_result in check_results:
90 for cond_result in check_result.conditions_results:
91 sort_value = cond_result.priority
92 icon = cond_result.get_icon() if icon_html else cond_result.category.value
93 check_header = check_result.get_header()
94 if unique_id and check_result.have_display():
95 check_id = f'{check_result.check.__class__.__name__}_{unique_id}'
96 link = f'<a href=#{check_id}>{check_header}</a>'
97 else:
98 link = check_header
99 sort_value = 1 if sort_value == 1 else 5 # if it failed but has no display still show on top
100 table.append([icon, link, cond_result.name,
101 cond_result.details, sort_value])
102
103 conditions_table = pd.DataFrame(data=table,
104 columns=['Status', 'Check', 'Condition', 'More Info', 'sort'])
105 conditions_table.sort_values(by=['sort'], inplace=True)
106 conditions_table.drop('sort', axis=1, inplace=True)
107 if show_check_column is False:
108 conditions_table.drop('Check', axis=1, inplace=True)
109 conditions_table['More Info'] = conditions_table['More Info'].map(lambda x: get_ellipsis(x, max_info_len))
110 with warnings.catch_warnings():
111 warnings.simplefilter(action='ignore', category=FutureWarning)
112 return conditions_table.style.hide_index()
113
114
115 def get_result_navigation_display(check_results: List['CheckResult'], unique_id: str):
116 """Display the results as a table with links for navigation.
117
118 Parameters
119 ----------
120 check_results : Union['CheckResult', List['CheckResult']]
121 check results to show navigation for.
122 unique_id : str
123 the unique id to append for the check names to create links.
124 Returns
125 -------
126 str
127 html representation of the navigation table.
128 """
129 table = []
130 for check_result in check_results:
131 if check_result.have_display():
132 check_header = check_result.get_header()
133 check_id = f'{check_result.check.__class__.__name__}_{unique_id}'
134 link = f'<a href=#{check_id}>{check_header}</a>'
135 summary = get_docs_summary(check_result.check)
136 table.append([link, summary])
137
138 nav_table = pd.DataFrame(data=table,
139 columns=['Check', 'Summary'])
140 with warnings.catch_warnings():
141 warnings.simplefilter(action='ignore', category=FutureWarning)
142 return dataframe_to_html(nav_table.style.hide_index())
143
[end of deepchecks/core/display_pandas.py]
[start of deepchecks/utils/typing.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Type definitions."""
12 # pylint: disable=invalid-hash-returned,invalid-name
13 from typing_extensions import Protocol, runtime_checkable
14 from typing import List
15
16
17 __all__ = ['Hashable', 'BasicModel', 'ClassificationModel']
18
19
20 @runtime_checkable
21 class Hashable(Protocol):
22 """Trait for any hashable type that also defines comparison operators."""
23
24 def __hash__(self) -> int: # noqa: D105
25 ...
26
27 def __le__(self, __value) -> bool: # noqa: D105
28 ...
29
30 def __lt__(self, __value) -> bool: # noqa: D105
31 ...
32
33 def __ge__(self, __value) -> bool: # noqa: D105
34 ...
35
36 def __gt__(self, __value) -> bool: # noqa: D105
37 ...
38
39 def __eq__(self, __value) -> bool: # noqa: D105
40 ...
41
42
43 @runtime_checkable
44 class BasicModel(Protocol):
45 """Traits of a model that are necessary for deepchecks."""
46
47 def predict(self, X) -> List[Hashable]:
48 """Predict on given X."""
49 ...
50
51
52 @runtime_checkable
53 class ClassificationModel(BasicModel, Protocol):
54 """Traits of a classification model that are used by deepchecks."""
55
56 def predict_proba(self, X) -> List[Hashable]:
57 """Predict probabilities on given X."""
58 ...
59
[end of deepchecks/utils/typing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deepchecks/core/display_pandas.py b/deepchecks/core/display_pandas.py
--- a/deepchecks/core/display_pandas.py
+++ b/deepchecks/core/display_pandas.py
@@ -96,7 +96,8 @@
link = f'<a href=#{check_id}>{check_header}</a>'
else:
link = check_header
- sort_value = 1 if sort_value == 1 else 5 # if it failed but has no display still show on top
+ # if it has no display show on bottom for the category (lower priority)
+ sort_value += 0.1
table.append([icon, link, cond_result.name,
cond_result.details, sort_value])
diff --git a/deepchecks/utils/typing.py b/deepchecks/utils/typing.py
--- a/deepchecks/utils/typing.py
+++ b/deepchecks/utils/typing.py
@@ -46,7 +46,6 @@
def predict(self, X) -> List[Hashable]:
"""Predict on given X."""
- ...
@runtime_checkable
@@ -55,4 +54,3 @@
def predict_proba(self, X) -> List[Hashable]:
"""Predict probabilities on given X."""
- ...
| {"golden_diff": "diff --git a/deepchecks/core/display_pandas.py b/deepchecks/core/display_pandas.py\n--- a/deepchecks/core/display_pandas.py\n+++ b/deepchecks/core/display_pandas.py\n@@ -96,7 +96,8 @@\n link = f'<a href=#{check_id}>{check_header}</a>'\n else:\n link = check_header\n- sort_value = 1 if sort_value == 1 else 5 # if it failed but has no display still show on top\n+ # if it has no display show on bottom for the category (lower priority)\n+ sort_value += 0.1\n table.append([icon, link, cond_result.name,\n cond_result.details, sort_value])\n \ndiff --git a/deepchecks/utils/typing.py b/deepchecks/utils/typing.py\n--- a/deepchecks/utils/typing.py\n+++ b/deepchecks/utils/typing.py\n@@ -46,7 +46,6 @@\n \n def predict(self, X) -> List[Hashable]:\n \"\"\"Predict on given X.\"\"\"\n- ...\n \n \n @runtime_checkable\n@@ -55,4 +54,3 @@\n \n def predict_proba(self, X) -> List[Hashable]:\n \"\"\"Predict probabilities on given X.\"\"\"\n- ...\n", "issue": "[BUG] Warning has been seen to appear below OK checks in suite condition table\n**Describe the bug**\r\nIn a certain case, the condition table has been seen to have a warning appear blow the OK checks\r\n\r\n**Expected behavior**\r\nCondition order in the table should be:\r\nFail with Display\r\nFail without Display\r\nWarning with Display\r\nWarning without Display\r\nPass with Display\r\nPass without Display\r\n\r\nOther then that order should conform to order of checks in the check_result. \r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Handle displays of pandas objects.\"\"\"\nfrom deepchecks.utils.strings import get_docs_summary, get_ellipsis\n\nfrom typing import List, Union, TYPE_CHECKING\nimport warnings\n\nimport pandas as pd\nfrom pandas.io.formats.style import Styler\n\n\nif TYPE_CHECKING:\n from deepchecks.core.check_result import CheckResult\n\n\n__all__ = ['dataframe_to_html', 'get_conditions_table']\n\n\ndef dataframe_to_html(df: Union[pd.DataFrame, Styler]):\n \"\"\"Convert dataframe to html.\n\n Parameters\n ----------\n df: Union[pd.DataFrame, Styler]\n Dataframe to convert to html\n\n Returns\n -------\n pd.DataFrame:\n dataframe with styling\n \"\"\"\n try:\n if isinstance(df, pd.DataFrame):\n df_styler = df.style\n else:\n df_styler = df\n # Using deprecated pandas method so hiding the warning\n with warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n df_styler.set_precision(2)\n\n table_css_props = [\n ('text-align', 'left'), # Align everything to the left\n ('white-space', 'pre-wrap') # Define how to handle white space characters (like \\n)\n ]\n df_styler.set_table_styles([dict(selector='table,thead,tbody,th,td', props=table_css_props)])\n return df_styler.render()\n # Because of MLC-154. Dataframe with Multi-index or non unique indices does not have a style\n # attribute, hence we need to display as a regular pd html format.\n except ValueError:\n return df.to_html()\n\n\ndef get_conditions_table(check_results: Union['CheckResult', List['CheckResult']],\n unique_id: str = None, max_info_len: int = 3000, icon_html: bool = True) -> Styler:\n \"\"\"Return the conditions table as DataFrame.\n\n Parameters\n ----------\n check_results : Union['CheckResult', List['CheckResult']]\n check results to show conditions of.\n unique_id : str\n the unique id to append for the check names to create links (won't create links if None/empty).\n max_info_len : int\n max length of the additional info.\n icon_html : bool , default: True\n if to show the html condition result icon or the enum\n Returns\n -------\n pd.Dataframe:\n the condition table.\n \"\"\"\n if not isinstance(check_results, List):\n show_check_column = False\n check_results = [check_results]\n else:\n show_check_column = True\n\n table = []\n for check_result in check_results:\n for cond_result in check_result.conditions_results:\n sort_value = cond_result.priority\n icon = cond_result.get_icon() if icon_html else cond_result.category.value\n check_header = check_result.get_header()\n if unique_id and check_result.have_display():\n check_id = f'{check_result.check.__class__.__name__}_{unique_id}'\n link = f'<a href=#{check_id}>{check_header}</a>'\n else:\n link = check_header\n sort_value = 1 if sort_value == 1 else 5 # if it failed but has no display still show on top\n table.append([icon, link, cond_result.name,\n cond_result.details, sort_value])\n\n conditions_table = pd.DataFrame(data=table,\n columns=['Status', 'Check', 'Condition', 'More Info', 'sort'])\n conditions_table.sort_values(by=['sort'], inplace=True)\n conditions_table.drop('sort', axis=1, inplace=True)\n if show_check_column is False:\n conditions_table.drop('Check', axis=1, inplace=True)\n conditions_table['More Info'] = conditions_table['More Info'].map(lambda x: get_ellipsis(x, max_info_len))\n with warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n return conditions_table.style.hide_index()\n\n\ndef get_result_navigation_display(check_results: List['CheckResult'], unique_id: str):\n \"\"\"Display the results as a table with links for navigation.\n\n Parameters\n ----------\n check_results : Union['CheckResult', List['CheckResult']]\n check results to show navigation for.\n unique_id : str\n the unique id to append for the check names to create links.\n Returns\n -------\n str\n html representation of the navigation table.\n \"\"\"\n table = []\n for check_result in check_results:\n if check_result.have_display():\n check_header = check_result.get_header()\n check_id = f'{check_result.check.__class__.__name__}_{unique_id}'\n link = f'<a href=#{check_id}>{check_header}</a>'\n summary = get_docs_summary(check_result.check)\n table.append([link, summary])\n\n nav_table = pd.DataFrame(data=table,\n columns=['Check', 'Summary'])\n with warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n return dataframe_to_html(nav_table.style.hide_index())\n", "path": "deepchecks/core/display_pandas.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Type definitions.\"\"\"\n# pylint: disable=invalid-hash-returned,invalid-name\nfrom typing_extensions import Protocol, runtime_checkable\nfrom typing import List\n\n\n__all__ = ['Hashable', 'BasicModel', 'ClassificationModel']\n\n\n@runtime_checkable\nclass Hashable(Protocol):\n \"\"\"Trait for any hashable type that also defines comparison operators.\"\"\"\n\n def __hash__(self) -> int: # noqa: D105\n ...\n\n def __le__(self, __value) -> bool: # noqa: D105\n ...\n\n def __lt__(self, __value) -> bool: # noqa: D105\n ...\n\n def __ge__(self, __value) -> bool: # noqa: D105\n ...\n\n def __gt__(self, __value) -> bool: # noqa: D105\n ...\n\n def __eq__(self, __value) -> bool: # noqa: D105\n ...\n\n\n@runtime_checkable\nclass BasicModel(Protocol):\n \"\"\"Traits of a model that are necessary for deepchecks.\"\"\"\n\n def predict(self, X) -> List[Hashable]:\n \"\"\"Predict on given X.\"\"\"\n ...\n\n\n@runtime_checkable\nclass ClassificationModel(BasicModel, Protocol):\n \"\"\"Traits of a classification model that are used by deepchecks.\"\"\"\n\n def predict_proba(self, X) -> List[Hashable]:\n \"\"\"Predict probabilities on given X.\"\"\"\n ...\n", "path": "deepchecks/utils/typing.py"}]} | 2,822 | 280 |
gh_patches_debug_36676 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2767 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py]
1 from typing import Optional
2
3 import torch
4 import torch.distributed as dist
5 import torch.nn as nn
6 import torch.optim as optim
7
8 import colossalai
9 from colossalai.nn.optimizer import CPUAdam, HybridAdam
10 from colossalai.nn.parallel import zero_model_wrapper, zero_optim_wrapper
11 from colossalai.tensor import ProcessGroup, ShardSpec
12 from colossalai.utils import get_current_device
13 from colossalai.utils.model.colo_init_context import ColoInitContext
14
15 from .ddp import DDPStrategy
16
17
18 class ColossalAIStrategy(DDPStrategy):
19 """
20 The strategy for training with ColossalAI.
21
22 Args:
23 stage(int): The stage to use in ZeRO. Choose in (1, 2, 3)
24 seed(int): The seed for the random number generator.
25 shard_init(bool): Whether to shard the model parameters during initialization. Only for ZeRO-3.
26 placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')
27 If it is “cpu”, parameters, gradients and optimizer states will be offloaded to CPU,
28 If it is “cuda”, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.
29 pin_memory(bool): Whether to pin the memory for the data loader. Only for ZeRO-3.
30 force_outputs_fp32(bool): Whether to force the outputs to be fp32. Only for ZeRO-3.
31 search_range_mb(int): The search range in MB for the chunk size. Only for ZeRO-3.
32 hidden_dim(optional, int): The hidden dimension for the gemini. Only for ZeRO-3.
33 min_chunk_size_mb(float): The minimum chunk size in MB. Only for ZeRO-3.
34 gpu_margin_mem_ratio(float): The margin memory ratio for the GPU. Only for ZeRO-3.
35 reduce_bugket_size(int): The reduce bucket size in bytes. Only for ZeRO-1 and ZeRO-2.
36 overlap_communication(bool): Whether to overlap communication and computation. Only for ZeRO-1 and ZeRO-2.
37 initial_scale(float): The initial scale for the optimizer.
38 growth_factor(float): The growth factor for the optimizer.
39 backoff_factor(float): The backoff factor for the optimizer.
40 growth_interval(int): The growth interval for the optimizer.
41 hysteresis(int): The hysteresis for the optimizer.
42 min_scale(float): The minimum scale for the optimizer.
43 max_scale(float): The maximum scale for the optimizer.
44 max_norm(float): The maximum norm for the optimizer.
45 norm_type(float): The norm type for the optimizer.
46
47 """
48
49 def __init__(
50 self,
51 stage: int = 3,
52 seed: int = 42,
53 shard_init: bool = True, # only for stage 3
54 placement_policy: str = 'cuda',
55 pin_memory: bool = True, # only for stage 3
56 force_outputs_fp32: bool = False, # only for stage 3
57 search_range_mb: int = 32, # only for stage 3
58 hidden_dim: Optional[int] = None, # only for stage 3
59 min_chunk_size_mb: float = 32, # only for stage 3
60 gpu_margin_mem_ratio: float = 0.0, # only for stage 3
61 reduce_bucket_size: int = 12 * 1024**2, # only for stage 1&2
62 overlap_communication: bool = True, # only for stage 1&2
63 initial_scale: float = 2**16,
64 growth_factor: float = 2,
65 backoff_factor: float = 0.5,
66 growth_interval: int = 1000,
67 hysteresis: int = 2,
68 min_scale: float = 1,
69 max_scale: float = 2**32,
70 max_norm: float = 0.0,
71 norm_type: float = 2.0) -> None:
72 super().__init__(seed)
73 assert placement_policy in ('cpu', 'cuda'), f'Unsupported placement policy "{placement_policy}"'
74 self.stage = stage
75 self.shard_init = shard_init
76 self.gemini_config = dict(device=get_current_device(),
77 placement_policy=placement_policy,
78 pin_memory=pin_memory,
79 force_outputs_fp32=force_outputs_fp32,
80 strict_ddp_mode=shard_init,
81 search_range_mb=search_range_mb,
82 hidden_dim=hidden_dim,
83 min_chunk_size_mb=min_chunk_size_mb)
84 if stage == 3:
85 self.zero_optim_config = dict(gpu_margin_mem_ratio=gpu_margin_mem_ratio)
86 else:
87 self.zero_optim_config = dict(reduce_bucket_size=reduce_bucket_size,
88 overlap_communication=overlap_communication,
89 cpu_offload=(placement_policy == 'cpu'))
90 self.optim_kwargs = dict(initial_scale=initial_scale,
91 growth_factor=growth_factor,
92 backoff_factor=backoff_factor,
93 growth_interval=growth_interval,
94 hysteresis=hysteresis,
95 min_scale=min_scale,
96 max_scale=max_scale,
97 max_norm=max_norm,
98 norm_type=norm_type)
99
100 def setup_distributed(self) -> None:
101 colossalai.launch_from_torch({}, seed=self.seed)
102
103 def model_init_context(self):
104 if self.stage == 3:
105 world_size = dist.get_world_size()
106 shard_pg = ProcessGroup(tp_degree=world_size) if self.shard_init else None
107 default_dist_spec = ShardSpec([-1], [world_size]) if self.shard_init else None
108 return ColoInitContext(device=get_current_device(),
109 dtype=torch.half,
110 default_pg=shard_pg,
111 default_dist_spec=default_dist_spec)
112 return super().model_init_context()
113
114 def setup_model(self, model: nn.Module) -> nn.Module:
115 return zero_model_wrapper(model, zero_stage=self.stage, gemini_config=self.gemini_config)
116
117 def setup_optimizer(self, optimizer: optim.Optimizer, model: nn.Module) -> optim.Optimizer:
118 assert isinstance(optimizer, (CPUAdam, HybridAdam)), f'Unsupported optimizer {type(optimizer)}'
119 return zero_optim_wrapper(model, optimizer, optim_config=self.zero_optim_config, **self.optim_kwargs)
120
121 def backward(self, loss: torch.Tensor, model: nn.Module, optimizer: optim.Optimizer, **kwargs) -> None:
122 optimizer.backward(loss)
123
124 def optimizer_step(self, optimizer: optim.Optimizer, **kwargs) -> None:
125 optimizer.step()
126
[end of applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py b/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py
--- a/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py
+++ b/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py
@@ -1,3 +1,4 @@
+import warnings
from typing import Optional
import torch
@@ -23,6 +24,7 @@
stage(int): The stage to use in ZeRO. Choose in (1, 2, 3)
seed(int): The seed for the random number generator.
shard_init(bool): Whether to shard the model parameters during initialization. Only for ZeRO-3.
+ This is not compativle with `from_pretrained()`. We temporarily disable this and will support it in the future.
placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')
If it is “cpu”, parameters, gradients and optimizer states will be offloaded to CPU,
If it is “cuda”, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.
@@ -50,7 +52,7 @@
self,
stage: int = 3,
seed: int = 42,
- shard_init: bool = True, # only for stage 3
+ shard_init: bool = False, # only for stage 3
placement_policy: str = 'cuda',
pin_memory: bool = True, # only for stage 3
force_outputs_fp32: bool = False, # only for stage 3
@@ -72,6 +74,10 @@
super().__init__(seed)
assert placement_policy in ('cpu', 'cuda'), f'Unsupported placement policy "{placement_policy}"'
self.stage = stage
+ # TODO(ver217): support shard_init when using from_pretrained()
+ if shard_init:
+ warnings.warn(f'Shard init is not supported yet. Ignore.')
+ shard_init = False
self.shard_init = shard_init
self.gemini_config = dict(device=get_current_device(),
placement_policy=placement_policy,
| {"golden_diff": "diff --git a/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py b/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py\n--- a/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py\n+++ b/applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py\n@@ -1,3 +1,4 @@\n+import warnings\n from typing import Optional\n \n import torch\n@@ -23,6 +24,7 @@\n stage(int): The stage to use in ZeRO. Choose in (1, 2, 3)\n seed(int): The seed for the random number generator.\n shard_init(bool): Whether to shard the model parameters during initialization. Only for ZeRO-3.\n+ This is not compativle with `from_pretrained()`. We temporarily disable this and will support it in the future.\n placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')\n If it is \u201ccpu\u201d, parameters, gradients and optimizer states will be offloaded to CPU,\n If it is \u201ccuda\u201d, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.\n@@ -50,7 +52,7 @@\n self,\n stage: int = 3,\n seed: int = 42,\n- shard_init: bool = True, # only for stage 3\n+ shard_init: bool = False, # only for stage 3\n placement_policy: str = 'cuda',\n pin_memory: bool = True, # only for stage 3\n force_outputs_fp32: bool = False, # only for stage 3\n@@ -72,6 +74,10 @@\n super().__init__(seed)\n assert placement_policy in ('cpu', 'cuda'), f'Unsupported placement policy \"{placement_policy}\"'\n self.stage = stage\n+ # TODO(ver217): support shard_init when using from_pretrained()\n+ if shard_init:\n+ warnings.warn(f'Shard init is not supported yet. Ignore.')\n+ shard_init = False\n self.shard_init = shard_init\n self.gemini_config = dict(device=get_current_device(),\n placement_policy=placement_policy,\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import Optional\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport colossalai\nfrom colossalai.nn.optimizer import CPUAdam, HybridAdam\nfrom colossalai.nn.parallel import zero_model_wrapper, zero_optim_wrapper\nfrom colossalai.tensor import ProcessGroup, ShardSpec\nfrom colossalai.utils import get_current_device\nfrom colossalai.utils.model.colo_init_context import ColoInitContext\n\nfrom .ddp import DDPStrategy\n\n\nclass ColossalAIStrategy(DDPStrategy):\n \"\"\"\n The strategy for training with ColossalAI.\n\n Args:\n stage(int): The stage to use in ZeRO. Choose in (1, 2, 3)\n seed(int): The seed for the random number generator.\n shard_init(bool): Whether to shard the model parameters during initialization. Only for ZeRO-3.\n placement_policy(str): The placement policy for gemini. Choose in ('cpu', 'cuda')\n If it is \u201ccpu\u201d, parameters, gradients and optimizer states will be offloaded to CPU,\n If it is \u201ccuda\u201d, they will not be offloaded, which means max CUDA memory will be used. It is the fastest.\n pin_memory(bool): Whether to pin the memory for the data loader. Only for ZeRO-3.\n force_outputs_fp32(bool): Whether to force the outputs to be fp32. Only for ZeRO-3.\n search_range_mb(int): The search range in MB for the chunk size. Only for ZeRO-3.\n hidden_dim(optional, int): The hidden dimension for the gemini. Only for ZeRO-3.\n min_chunk_size_mb(float): The minimum chunk size in MB. Only for ZeRO-3.\n gpu_margin_mem_ratio(float): The margin memory ratio for the GPU. Only for ZeRO-3.\n reduce_bugket_size(int): The reduce bucket size in bytes. Only for ZeRO-1 and ZeRO-2.\n overlap_communication(bool): Whether to overlap communication and computation. Only for ZeRO-1 and ZeRO-2.\n initial_scale(float): The initial scale for the optimizer.\n growth_factor(float): The growth factor for the optimizer.\n backoff_factor(float): The backoff factor for the optimizer.\n growth_interval(int): The growth interval for the optimizer.\n hysteresis(int): The hysteresis for the optimizer.\n min_scale(float): The minimum scale for the optimizer.\n max_scale(float): The maximum scale for the optimizer.\n max_norm(float): The maximum norm for the optimizer.\n norm_type(float): The norm type for the optimizer.\n\n \"\"\"\n\n def __init__(\n self,\n stage: int = 3,\n seed: int = 42,\n shard_init: bool = True, # only for stage 3\n placement_policy: str = 'cuda',\n pin_memory: bool = True, # only for stage 3\n force_outputs_fp32: bool = False, # only for stage 3\n search_range_mb: int = 32, # only for stage 3\n hidden_dim: Optional[int] = None, # only for stage 3\n min_chunk_size_mb: float = 32, # only for stage 3\n gpu_margin_mem_ratio: float = 0.0, # only for stage 3\n reduce_bucket_size: int = 12 * 1024**2, # only for stage 1&2\n overlap_communication: bool = True, # only for stage 1&2\n initial_scale: float = 2**16,\n growth_factor: float = 2,\n backoff_factor: float = 0.5,\n growth_interval: int = 1000,\n hysteresis: int = 2,\n min_scale: float = 1,\n max_scale: float = 2**32,\n max_norm: float = 0.0,\n norm_type: float = 2.0) -> None:\n super().__init__(seed)\n assert placement_policy in ('cpu', 'cuda'), f'Unsupported placement policy \"{placement_policy}\"'\n self.stage = stage\n self.shard_init = shard_init\n self.gemini_config = dict(device=get_current_device(),\n placement_policy=placement_policy,\n pin_memory=pin_memory,\n force_outputs_fp32=force_outputs_fp32,\n strict_ddp_mode=shard_init,\n search_range_mb=search_range_mb,\n hidden_dim=hidden_dim,\n min_chunk_size_mb=min_chunk_size_mb)\n if stage == 3:\n self.zero_optim_config = dict(gpu_margin_mem_ratio=gpu_margin_mem_ratio)\n else:\n self.zero_optim_config = dict(reduce_bucket_size=reduce_bucket_size,\n overlap_communication=overlap_communication,\n cpu_offload=(placement_policy == 'cpu'))\n self.optim_kwargs = dict(initial_scale=initial_scale,\n growth_factor=growth_factor,\n backoff_factor=backoff_factor,\n growth_interval=growth_interval,\n hysteresis=hysteresis,\n min_scale=min_scale,\n max_scale=max_scale,\n max_norm=max_norm,\n norm_type=norm_type)\n\n def setup_distributed(self) -> None:\n colossalai.launch_from_torch({}, seed=self.seed)\n\n def model_init_context(self):\n if self.stage == 3:\n world_size = dist.get_world_size()\n shard_pg = ProcessGroup(tp_degree=world_size) if self.shard_init else None\n default_dist_spec = ShardSpec([-1], [world_size]) if self.shard_init else None\n return ColoInitContext(device=get_current_device(),\n dtype=torch.half,\n default_pg=shard_pg,\n default_dist_spec=default_dist_spec)\n return super().model_init_context()\n\n def setup_model(self, model: nn.Module) -> nn.Module:\n return zero_model_wrapper(model, zero_stage=self.stage, gemini_config=self.gemini_config)\n\n def setup_optimizer(self, optimizer: optim.Optimizer, model: nn.Module) -> optim.Optimizer:\n assert isinstance(optimizer, (CPUAdam, HybridAdam)), f'Unsupported optimizer {type(optimizer)}'\n return zero_optim_wrapper(model, optimizer, optim_config=self.zero_optim_config, **self.optim_kwargs)\n\n def backward(self, loss: torch.Tensor, model: nn.Module, optimizer: optim.Optimizer, **kwargs) -> None:\n optimizer.backward(loss)\n\n def optimizer_step(self, optimizer: optim.Optimizer, **kwargs) -> None:\n optimizer.step()\n", "path": "applications/ChatGPT/chatgpt/trainer/strategies/colossalai.py"}]} | 2,278 | 505 |
gh_patches_debug_2629 | rasdani/github-patches | git_diff | svthalia__concrexit-1880 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ImproperlyConfigured: Field name `language` is not valid for model `Profile`.
Sentry Issue: [CONCREXIT-8J](https://sentry.io/organizations/thalia/issues/2580014551/?referrer=github_integration)
```
ImproperlyConfigured: Field name `language` is not valid for model `Profile`.
(14 additional frame(s) were not displayed)
...
File "django/utils/functional.py", line 48, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "rest_framework/serializers.py", line 349, in fields
for key, value in self.get_fields().items():
File "rest_framework/serializers.py", line 1053, in get_fields
field_class, field_kwargs = self.build_field(
File "rest_framework/serializers.py", line 1199, in build_field
return self.build_unknown_field(field_name, model_class)
File "rest_framework/serializers.py", line 1317, in build_unknown_field
raise ImproperlyConfigured(
```
</issue>
<code>
[start of website/members/api/v1/serializers.py]
1 """DRF serializers defined by the members package."""
2 from django.templatetags.static import static
3 from rest_framework import serializers
4
5 from members.models import Member, Profile
6 from members.services import member_achievements, member_societies
7 from thaliawebsite.api.services import create_image_thumbnail_dict
8
9
10 class ProfileRetrieveSerializer(serializers.ModelSerializer):
11 """Serializer that renders a member profile."""
12
13 class Meta:
14 model = Profile
15 fields = (
16 "pk",
17 "display_name",
18 "avatar",
19 "profile_description",
20 "birthday",
21 "starting_year",
22 "programme",
23 "website",
24 "membership_type",
25 "achievements",
26 "societies",
27 )
28
29 pk = serializers.SerializerMethodField("_pk")
30 avatar = serializers.SerializerMethodField("_avatar")
31 birthday = serializers.SerializerMethodField("_birthday")
32 membership_type = serializers.SerializerMethodField("_membership_type")
33 achievements = serializers.SerializerMethodField("_achievements")
34 societies = serializers.SerializerMethodField("_societies")
35
36 def _pk(self, instance):
37 return instance.user.pk
38
39 def _birthday(self, instance):
40 if instance.show_birthday:
41 return instance.birthday
42 return None
43
44 def _membership_type(self, instance):
45 membership = instance.user.current_membership
46 if membership:
47 return membership.type
48 return None
49
50 def _achievements(self, instance):
51 return member_achievements(instance.user)
52
53 def _societies(self, instance):
54 return member_societies(instance.user)
55
56 def _avatar(self, instance):
57 placeholder = self.context["request"].build_absolute_uri(
58 static("members/images/default-avatar.jpg")
59 )
60 file = None
61 if instance.photo:
62 file = instance.photo
63 return create_image_thumbnail_dict(
64 self.context["request"], file, placeholder=placeholder, size_large="800x800"
65 )
66
67
68 class MemberListSerializer(serializers.ModelSerializer):
69 """Serializer that renders a list of members."""
70
71 class Meta:
72 model = Member
73 fields = ("pk", "starting_year", "display_name", "membership_type", "avatar")
74
75 display_name = serializers.SerializerMethodField("_display_name")
76 starting_year = serializers.SerializerMethodField("_starting_year")
77 avatar = serializers.SerializerMethodField("_avatar")
78 membership_type = serializers.SerializerMethodField("_membership_type")
79
80 def _display_name(self, instance):
81 return instance.profile.display_name()
82
83 def _starting_year(self, instance):
84 return instance.profile.starting_year
85
86 def _avatar(self, instance):
87 placeholder = self.context["request"].build_absolute_uri(
88 static("members/images/default-avatar.jpg")
89 )
90 file = None
91 if instance.profile.photo:
92 file = instance.profile.photo
93 return create_image_thumbnail_dict(
94 self.context["request"], file, placeholder=placeholder, size_large="800x800"
95 )
96
97 def _membership_type(self, instance):
98 membership = instance.current_membership
99 if membership:
100 return membership.type
101 return None
102
103
104 class ProfileEditSerializer(serializers.ModelSerializer):
105 """Serializer that renders a profile to be edited."""
106
107 class Meta:
108 model = Profile
109 fields = (
110 "pk",
111 "email",
112 "first_name",
113 "last_name",
114 "address_street",
115 "address_street2",
116 "address_postal_code",
117 "address_city",
118 "address_country",
119 "phone_number",
120 "show_birthday",
121 "website",
122 "photo",
123 "emergency_contact",
124 "emergency_contact_phone_number",
125 "profile_description",
126 "nickname",
127 "display_name_preference",
128 "language",
129 "receive_optin",
130 "receive_newsletter",
131 "receive_magazine",
132 "display_name",
133 "avatar",
134 "birthday",
135 "starting_year",
136 "programme",
137 "membership_type",
138 "achievements",
139 "societies",
140 )
141
142 read_only_fields = ("display_name", "starting_year", "programme", "birthday")
143
144 pk = serializers.SerializerMethodField("_pk")
145 email = serializers.SerializerMethodField("_email")
146 first_name = serializers.SerializerMethodField("_first_name")
147 last_name = serializers.SerializerMethodField("_last_name")
148 avatar = serializers.SerializerMethodField("_avatar")
149 membership_type = serializers.SerializerMethodField("_membership_type")
150 achievements = serializers.SerializerMethodField("_achievements")
151 societies = serializers.SerializerMethodField("_societies")
152
153 def _pk(self, instance):
154 return instance.user.pk
155
156 def _email(self, instance):
157 return instance.user.email
158
159 def _first_name(self, instance):
160 return instance.user.first_name
161
162 def _last_name(self, instance):
163 return instance.user.last_name
164
165 def _membership_type(self, instance):
166 membership = instance.user.current_membership
167 if membership:
168 return membership.type
169 return None
170
171 def _achievements(self, instance):
172 return member_achievements(instance.user)
173
174 def _societies(self, instance):
175 return member_societies(instance.user)
176
177 def _avatar(self, instance):
178 placeholder = self.context["request"].build_absolute_uri(
179 static("members/images/default-avatar.jpg")
180 )
181 file = None
182 if instance.photo:
183 file = instance.photo
184 return create_image_thumbnail_dict(
185 self.context["request"], file, placeholder=placeholder, size_large="800x800"
186 )
187
[end of website/members/api/v1/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/members/api/v1/serializers.py b/website/members/api/v1/serializers.py
--- a/website/members/api/v1/serializers.py
+++ b/website/members/api/v1/serializers.py
@@ -125,7 +125,6 @@
"profile_description",
"nickname",
"display_name_preference",
- "language",
"receive_optin",
"receive_newsletter",
"receive_magazine",
| {"golden_diff": "diff --git a/website/members/api/v1/serializers.py b/website/members/api/v1/serializers.py\n--- a/website/members/api/v1/serializers.py\n+++ b/website/members/api/v1/serializers.py\n@@ -125,7 +125,6 @@\n \"profile_description\",\n \"nickname\",\n \"display_name_preference\",\n- \"language\",\n \"receive_optin\",\n \"receive_newsletter\",\n \"receive_magazine\",\n", "issue": "ImproperlyConfigured: Field name `language` is not valid for model `Profile`.\nSentry Issue: [CONCREXIT-8J](https://sentry.io/organizations/thalia/issues/2580014551/?referrer=github_integration)\n\n```\nImproperlyConfigured: Field name `language` is not valid for model `Profile`.\n(14 additional frame(s) were not displayed)\n...\n File \"django/utils/functional.py\", line 48, in __get__\n res = instance.__dict__[self.name] = self.func(instance)\n File \"rest_framework/serializers.py\", line 349, in fields\n for key, value in self.get_fields().items():\n File \"rest_framework/serializers.py\", line 1053, in get_fields\n field_class, field_kwargs = self.build_field(\n File \"rest_framework/serializers.py\", line 1199, in build_field\n return self.build_unknown_field(field_name, model_class)\n File \"rest_framework/serializers.py\", line 1317, in build_unknown_field\n raise ImproperlyConfigured(\n```\n", "before_files": [{"content": "\"\"\"DRF serializers defined by the members package.\"\"\"\nfrom django.templatetags.static import static\nfrom rest_framework import serializers\n\nfrom members.models import Member, Profile\nfrom members.services import member_achievements, member_societies\nfrom thaliawebsite.api.services import create_image_thumbnail_dict\n\n\nclass ProfileRetrieveSerializer(serializers.ModelSerializer):\n \"\"\"Serializer that renders a member profile.\"\"\"\n\n class Meta:\n model = Profile\n fields = (\n \"pk\",\n \"display_name\",\n \"avatar\",\n \"profile_description\",\n \"birthday\",\n \"starting_year\",\n \"programme\",\n \"website\",\n \"membership_type\",\n \"achievements\",\n \"societies\",\n )\n\n pk = serializers.SerializerMethodField(\"_pk\")\n avatar = serializers.SerializerMethodField(\"_avatar\")\n birthday = serializers.SerializerMethodField(\"_birthday\")\n membership_type = serializers.SerializerMethodField(\"_membership_type\")\n achievements = serializers.SerializerMethodField(\"_achievements\")\n societies = serializers.SerializerMethodField(\"_societies\")\n\n def _pk(self, instance):\n return instance.user.pk\n\n def _birthday(self, instance):\n if instance.show_birthday:\n return instance.birthday\n return None\n\n def _membership_type(self, instance):\n membership = instance.user.current_membership\n if membership:\n return membership.type\n return None\n\n def _achievements(self, instance):\n return member_achievements(instance.user)\n\n def _societies(self, instance):\n return member_societies(instance.user)\n\n def _avatar(self, instance):\n placeholder = self.context[\"request\"].build_absolute_uri(\n static(\"members/images/default-avatar.jpg\")\n )\n file = None\n if instance.photo:\n file = instance.photo\n return create_image_thumbnail_dict(\n self.context[\"request\"], file, placeholder=placeholder, size_large=\"800x800\"\n )\n\n\nclass MemberListSerializer(serializers.ModelSerializer):\n \"\"\"Serializer that renders a list of members.\"\"\"\n\n class Meta:\n model = Member\n fields = (\"pk\", \"starting_year\", \"display_name\", \"membership_type\", \"avatar\")\n\n display_name = serializers.SerializerMethodField(\"_display_name\")\n starting_year = serializers.SerializerMethodField(\"_starting_year\")\n avatar = serializers.SerializerMethodField(\"_avatar\")\n membership_type = serializers.SerializerMethodField(\"_membership_type\")\n\n def _display_name(self, instance):\n return instance.profile.display_name()\n\n def _starting_year(self, instance):\n return instance.profile.starting_year\n\n def _avatar(self, instance):\n placeholder = self.context[\"request\"].build_absolute_uri(\n static(\"members/images/default-avatar.jpg\")\n )\n file = None\n if instance.profile.photo:\n file = instance.profile.photo\n return create_image_thumbnail_dict(\n self.context[\"request\"], file, placeholder=placeholder, size_large=\"800x800\"\n )\n\n def _membership_type(self, instance):\n membership = instance.current_membership\n if membership:\n return membership.type\n return None\n\n\nclass ProfileEditSerializer(serializers.ModelSerializer):\n \"\"\"Serializer that renders a profile to be edited.\"\"\"\n\n class Meta:\n model = Profile\n fields = (\n \"pk\",\n \"email\",\n \"first_name\",\n \"last_name\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"phone_number\",\n \"show_birthday\",\n \"website\",\n \"photo\",\n \"emergency_contact\",\n \"emergency_contact_phone_number\",\n \"profile_description\",\n \"nickname\",\n \"display_name_preference\",\n \"language\",\n \"receive_optin\",\n \"receive_newsletter\",\n \"receive_magazine\",\n \"display_name\",\n \"avatar\",\n \"birthday\",\n \"starting_year\",\n \"programme\",\n \"membership_type\",\n \"achievements\",\n \"societies\",\n )\n\n read_only_fields = (\"display_name\", \"starting_year\", \"programme\", \"birthday\")\n\n pk = serializers.SerializerMethodField(\"_pk\")\n email = serializers.SerializerMethodField(\"_email\")\n first_name = serializers.SerializerMethodField(\"_first_name\")\n last_name = serializers.SerializerMethodField(\"_last_name\")\n avatar = serializers.SerializerMethodField(\"_avatar\")\n membership_type = serializers.SerializerMethodField(\"_membership_type\")\n achievements = serializers.SerializerMethodField(\"_achievements\")\n societies = serializers.SerializerMethodField(\"_societies\")\n\n def _pk(self, instance):\n return instance.user.pk\n\n def _email(self, instance):\n return instance.user.email\n\n def _first_name(self, instance):\n return instance.user.first_name\n\n def _last_name(self, instance):\n return instance.user.last_name\n\n def _membership_type(self, instance):\n membership = instance.user.current_membership\n if membership:\n return membership.type\n return None\n\n def _achievements(self, instance):\n return member_achievements(instance.user)\n\n def _societies(self, instance):\n return member_societies(instance.user)\n\n def _avatar(self, instance):\n placeholder = self.context[\"request\"].build_absolute_uri(\n static(\"members/images/default-avatar.jpg\")\n )\n file = None\n if instance.photo:\n file = instance.photo\n return create_image_thumbnail_dict(\n self.context[\"request\"], file, placeholder=placeholder, size_large=\"800x800\"\n )\n", "path": "website/members/api/v1/serializers.py"}]} | 2,415 | 108 |
gh_patches_debug_50780 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-3745 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Explicitly depend on setuptools
Context: #3295
We should explicitly depend on a minimum version of setuptools to get around problems installing our packages if setuptools is too old.
</issue>
<code>
[start of core/setup.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 from setuptools import find_packages
18 from setuptools import setup
19
20
21 PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
22
23 with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
24 README = file_obj.read()
25
26 # NOTE: This is duplicated throughout and we should try to
27 # consolidate.
28 SETUP_BASE = {
29 'author': 'Google Cloud Platform',
30 'author_email': '[email protected]',
31 'scripts': [],
32 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
33 'license': 'Apache 2.0',
34 'platforms': 'Posix; MacOS X; Windows',
35 'include_package_data': True,
36 'zip_safe': False,
37 'classifiers': [
38 'Development Status :: 4 - Beta',
39 'Intended Audience :: Developers',
40 'License :: OSI Approved :: Apache Software License',
41 'Operating System :: OS Independent',
42 'Programming Language :: Python :: 2',
43 'Programming Language :: Python :: 2.7',
44 'Programming Language :: Python :: 3',
45 'Programming Language :: Python :: 3.4',
46 'Programming Language :: Python :: 3.5',
47 'Programming Language :: Python :: 3.6',
48 'Topic :: Internet',
49 ],
50 }
51
52
53 REQUIREMENTS = [
54 'googleapis-common-protos >= 1.3.4',
55 'protobuf >= 3.0.0',
56 'google-auth >= 0.4.0, < 2.0.0dev',
57 'requests >= 2.4.0, < 3.0.0dev',
58 'six',
59 'tenacity >= 4.0.0, <5.0.0dev'
60 ]
61
62 setup(
63 name='google-cloud-core',
64 version='0.26.0',
65 description='API Client library for Google Cloud: Core Helpers',
66 long_description=README,
67 namespace_packages=[
68 'google',
69 'google.cloud',
70 'google.api',
71 ],
72 packages=find_packages(exclude=('tests*',)),
73 install_requires=REQUIREMENTS,
74 **SETUP_BASE
75 )
76
[end of core/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -55,6 +55,7 @@
'protobuf >= 3.0.0',
'google-auth >= 0.4.0, < 2.0.0dev',
'requests >= 2.4.0, < 3.0.0dev',
+ 'setuptools >= 34.0.0',
'six',
'tenacity >= 4.0.0, <5.0.0dev'
]
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -55,6 +55,7 @@\n 'protobuf >= 3.0.0',\n 'google-auth >= 0.4.0, < 2.0.0dev',\n 'requests >= 2.4.0, < 3.0.0dev',\n+ 'setuptools >= 34.0.0',\n 'six',\n 'tenacity >= 4.0.0, <5.0.0dev'\n ]\n", "issue": "Explicitly depend on setuptools\nContext: #3295\r\n\r\nWe should explicitly depend on a minimum version of setuptools to get around problems installing our packages if setuptools is too old.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nPACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:\n README = file_obj.read()\n\n# NOTE: This is duplicated throughout and we should try to\n# consolidate.\nSETUP_BASE = {\n 'author': 'Google Cloud Platform',\n 'author_email': '[email protected]',\n 'scripts': [],\n 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',\n 'license': 'Apache 2.0',\n 'platforms': 'Posix; MacOS X; Windows',\n 'include_package_data': True,\n 'zip_safe': False,\n 'classifiers': [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n ],\n}\n\n\nREQUIREMENTS = [\n 'googleapis-common-protos >= 1.3.4',\n 'protobuf >= 3.0.0',\n 'google-auth >= 0.4.0, < 2.0.0dev',\n 'requests >= 2.4.0, < 3.0.0dev',\n 'six',\n 'tenacity >= 4.0.0, <5.0.0dev'\n]\n\nsetup(\n name='google-cloud-core',\n version='0.26.0',\n description='API Client library for Google Cloud: Core Helpers',\n long_description=README,\n namespace_packages=[\n 'google',\n 'google.cloud',\n 'google.api',\n ],\n packages=find_packages(exclude=('tests*',)),\n install_requires=REQUIREMENTS,\n **SETUP_BASE\n)\n", "path": "core/setup.py"}]} | 1,308 | 127 |
gh_patches_debug_16165 | rasdani/github-patches | git_diff | mdn__kuma-6240 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Viewing profile of user with Persona account generates an ISE
**Summary**
It seems that viewing the profile of a user who has a Persona social account triggers an ISE.
**Steps To Reproduce (STR)**
1. Visit https://developer.allizom.org/en-US/profiles/sheppy
2. It will generate a 500 error
3. Visit https://developer.allizom.org/en-US/profiles/escattone
4. Displays fine
**Actual behavior**
Returns 500 (internal server error)
**Expected behavior**
Success display of user's profile page
**Additional context**
This seems to be related to one or more of the account PR's that were merged prior to deploying to stage.
</issue>
<code>
[start of kuma/users/templatetags/jinja_helpers.py]
1 from allauth.account.utils import user_display
2 from allauth.socialaccount import providers
3 from allauth.socialaccount.templatetags.socialaccount import get_providers
4 from allauth.utils import get_request_param
5 from django.conf import settings
6 from django.contrib import admin
7 from django.utils.translation import ugettext
8 from django_jinja import library
9 from honeypot.templatetags.honeypot import render_honeypot_field
10 from jinja2 import contextfunction, escape, Markup
11
12 from kuma.core.templatetags.jinja_helpers import datetimeformat
13 from kuma.core.urlresolvers import reverse
14
15 from ..models import User
16
17
18 @library.global_function
19 def get_avatar_url(user):
20 """
21 Get the user's avatar URL for the first social account provider that has one.
22 Assumes that the user is not anonymous or None.
23 If the user has no avatar for any provider, returns the default avatar.
24 """
25 for account in user.socialaccount_set.order_by('date_joined'):
26 avatar_url = account.get_avatar_url()
27 if avatar_url:
28 return avatar_url
29 return settings.DEFAULT_AVATAR
30
31
32 @library.global_function
33 @contextfunction
34 def ban_links(context, ban_user, banner_user):
35 """Returns a link to ban a user"""
36 links = ''
37 if ban_user.id != banner_user.id and banner_user.has_perm('users.add_userban'):
38 active_ban = ban_user.active_ban
39 url_ban_cleanup = reverse('users.ban_user_and_cleanup',
40 kwargs={'username': ban_user.username})
41 if active_ban:
42 url = reverse('admin:users_userban_change', args=(active_ban.id,))
43 title = ugettext('Banned on %(ban_date)s by %(ban_admin)s.') % {
44 'ban_date': datetimeformat(context, active_ban.date,
45 format='date', output='json'),
46 'ban_admin': active_ban.by,
47 }
48 link = ('<a id="ban_link" href="%s" class="button ban-link" title="%s">%s'
49 '<i aria-hidden="true" class="icon-ban"></i></a>'
50 % (url, title, ugettext('Banned')))
51 link_cleanup = ('<a id="cleanup_link" href="%s" class="button negative ban-link">%s'
52 '<i aria-hidden="true" class="icon-ban"></i></a>'
53 % (url_ban_cleanup, ugettext('Clean Up Revisions')))
54 else:
55 url = reverse('users.ban_user', kwargs={'username': ban_user.username})
56 link = ('<a id="ban_link" href="%s" class="button negative ban-link">%s'
57 '<i aria-hidden="true" class="icon-ban"></i></a>'
58 % (url, ugettext('Ban User')))
59 link_cleanup = ('<a id="cleanup_link" href="%s" class="button negative ban-link">%s'
60 '<i aria-hidden="true" class="icon-ban"></i></a>'
61 % (url_ban_cleanup, ugettext('Ban User & Clean Up')))
62 links = link_cleanup + ' ' + link
63 return Markup(links)
64
65
66 @library.global_function
67 def admin_link(user):
68 """Returns a link to admin a user"""
69 url = reverse('admin:users_user_change', args=(user.id,),
70 current_app=admin.site.name)
71 link = ('<a href="%s" class="button neutral">%s'
72 '<i aria-hidden="true" class="icon-wrench"></i></a>' %
73 (url, ugettext('Admin')))
74 return Markup(link)
75
76
77 @library.filter
78 def public_email(email):
79 """Email address -> publicly displayable email."""
80 return Markup('<span class="email">%s</span>' % unicode_to_html(email))
81
82
83 def unicode_to_html(text):
84 """Turns all unicode into html entities, e.g. E -> E."""
85 return ''.join(['&#%s;' % ord(i) for i in text])
86
87
88 @library.global_function
89 def user_list(users):
90 """Turn a list of users into a list of links to their profiles."""
91 link = '<a href="%s">%s</a>'
92 list = ', '.join([link % (escape(u.get_absolute_url()), escape(u.username))
93 for u in users])
94 return Markup(list)
95
96
97 # Returns a string representation of a user
98 library.global_function(user_display)
99
100 # Returns a list of social authentication providers.
101 library.global_function(get_providers)
102
103
104 @library.global_function
105 @contextfunction
106 def provider_login_url(context, provider_id, **params):
107 """
108 {{ provider_login_url("github", next="/some/url") }}
109 """
110 request = context['request']
111 provider = providers.registry.by_id(provider_id)
112 auth_params = params.get('auth_params', None)
113 scope = params.get('scope', None)
114 process = params.get('process', None)
115 if scope == '':
116 del params['scope']
117 if auth_params == '':
118 del params['auth_params']
119 if 'next' not in params:
120 next = get_request_param(request, 'next')
121 if next:
122 params['next'] = next
123 elif process == 'redirect':
124 params['next'] = request.get_full_path()
125 else:
126 if not params['next']:
127 del params['next']
128 # get the login url and append params as url parameters
129 return Markup(provider.get_login_url(request, **params))
130
131
132 @library.global_function
133 @contextfunction
134 def providers_media_js(context):
135 """
136 {{ providers_media_js() }}
137 """
138 request = context['request']
139 return Markup('\n'.join([p.media_js(request)
140 for p in providers.registry.get_list()]))
141
142
143 @library.global_function
144 def social_accounts(user):
145 """
146 {% set accounts = social_accounts(user) %}
147
148 Then:
149 {{ accounts.twitter }} -- a list of connected Twitter accounts
150 {{ accounts.twitter.0 }} -- the first Twitter account
151 {% if accounts %} -- if there is at least one social account
152 """
153 accounts = {}
154 if not user.is_authenticated:
155 return accounts
156 for account in user.socialaccount_set.all().iterator():
157 providers = accounts.setdefault(account.provider, [])
158 providers.append(account)
159 return accounts
160
161
162 @library.global_function
163 @library.render_with('honeypot/honeypot_field.html')
164 def honeypot_field(field_name=None):
165 return render_honeypot_field(field_name)
166
167
168 @library.global_function
169 def get_primary_email(email_addresses):
170 for email in email_addresses:
171 if email_addresses[email].get('primary'):
172 return email
173
174
175 @library.global_function
176 def is_username_taken(username):
177 """
178 Returns True if a user with the given username exists (case-insentive),
179 otherwise False.
180 """
181 return User.objects.filter(username=username).exists()
182
[end of kuma/users/templatetags/jinja_helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/users/templatetags/jinja_helpers.py b/kuma/users/templatetags/jinja_helpers.py
--- a/kuma/users/templatetags/jinja_helpers.py
+++ b/kuma/users/templatetags/jinja_helpers.py
@@ -18,11 +18,14 @@
@library.global_function
def get_avatar_url(user):
"""
- Get the user's avatar URL for the first social account provider that has one.
- Assumes that the user is not anonymous or None.
- If the user has no avatar for any provider, returns the default avatar.
+ Get the avatar URL of the user's first-joined social account that has one,
+ excluding all Persona social accounts. Assumes that the user is not None or
+ anonymous. If the user has no social account with an avatar, returns the
+ default avatar URL.
"""
- for account in user.socialaccount_set.order_by('date_joined'):
+ for account in (user.socialaccount_set
+ .exclude(provider='persona')
+ .order_by('date_joined')):
avatar_url = account.get_avatar_url()
if avatar_url:
return avatar_url
| {"golden_diff": "diff --git a/kuma/users/templatetags/jinja_helpers.py b/kuma/users/templatetags/jinja_helpers.py\n--- a/kuma/users/templatetags/jinja_helpers.py\n+++ b/kuma/users/templatetags/jinja_helpers.py\n@@ -18,11 +18,14 @@\n @library.global_function\n def get_avatar_url(user):\n \"\"\"\n- Get the user's avatar URL for the first social account provider that has one.\n- Assumes that the user is not anonymous or None.\n- If the user has no avatar for any provider, returns the default avatar.\n+ Get the avatar URL of the user's first-joined social account that has one,\n+ excluding all Persona social accounts. Assumes that the user is not None or\n+ anonymous. If the user has no social account with an avatar, returns the\n+ default avatar URL.\n \"\"\"\n- for account in user.socialaccount_set.order_by('date_joined'):\n+ for account in (user.socialaccount_set\n+ .exclude(provider='persona')\n+ .order_by('date_joined')):\n avatar_url = account.get_avatar_url()\n if avatar_url:\n return avatar_url\n", "issue": "Viewing profile of user with Persona account generates an ISE\n**Summary**\r\nIt seems that viewing the profile of a user who has a Persona social account triggers an ISE.\r\n\r\n**Steps To Reproduce (STR)**\r\n1. Visit https://developer.allizom.org/en-US/profiles/sheppy\r\n2. It will generate a 500 error\r\n3. Visit https://developer.allizom.org/en-US/profiles/escattone\r\n4. Displays fine\r\n\r\n**Actual behavior**\r\nReturns 500 (internal server error)\r\n\r\n**Expected behavior**\r\nSuccess display of user's profile page\r\n\r\n**Additional context**\r\nThis seems to be related to one or more of the account PR's that were merged prior to deploying to stage.\r\n\n", "before_files": [{"content": "from allauth.account.utils import user_display\nfrom allauth.socialaccount import providers\nfrom allauth.socialaccount.templatetags.socialaccount import get_providers\nfrom allauth.utils import get_request_param\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.utils.translation import ugettext\nfrom django_jinja import library\nfrom honeypot.templatetags.honeypot import render_honeypot_field\nfrom jinja2 import contextfunction, escape, Markup\n\nfrom kuma.core.templatetags.jinja_helpers import datetimeformat\nfrom kuma.core.urlresolvers import reverse\n\nfrom ..models import User\n\n\[email protected]_function\ndef get_avatar_url(user):\n \"\"\"\n Get the user's avatar URL for the first social account provider that has one.\n Assumes that the user is not anonymous or None.\n If the user has no avatar for any provider, returns the default avatar.\n \"\"\"\n for account in user.socialaccount_set.order_by('date_joined'):\n avatar_url = account.get_avatar_url()\n if avatar_url:\n return avatar_url\n return settings.DEFAULT_AVATAR\n\n\[email protected]_function\n@contextfunction\ndef ban_links(context, ban_user, banner_user):\n \"\"\"Returns a link to ban a user\"\"\"\n links = ''\n if ban_user.id != banner_user.id and banner_user.has_perm('users.add_userban'):\n active_ban = ban_user.active_ban\n url_ban_cleanup = reverse('users.ban_user_and_cleanup',\n kwargs={'username': ban_user.username})\n if active_ban:\n url = reverse('admin:users_userban_change', args=(active_ban.id,))\n title = ugettext('Banned on %(ban_date)s by %(ban_admin)s.') % {\n 'ban_date': datetimeformat(context, active_ban.date,\n format='date', output='json'),\n 'ban_admin': active_ban.by,\n }\n link = ('<a id=\"ban_link\" href=\"%s\" class=\"button ban-link\" title=\"%s\">%s'\n '<i aria-hidden=\"true\" class=\"icon-ban\"></i></a>'\n % (url, title, ugettext('Banned')))\n link_cleanup = ('<a id=\"cleanup_link\" href=\"%s\" class=\"button negative ban-link\">%s'\n '<i aria-hidden=\"true\" class=\"icon-ban\"></i></a>'\n % (url_ban_cleanup, ugettext('Clean Up Revisions')))\n else:\n url = reverse('users.ban_user', kwargs={'username': ban_user.username})\n link = ('<a id=\"ban_link\" href=\"%s\" class=\"button negative ban-link\">%s'\n '<i aria-hidden=\"true\" class=\"icon-ban\"></i></a>'\n % (url, ugettext('Ban User')))\n link_cleanup = ('<a id=\"cleanup_link\" href=\"%s\" class=\"button negative ban-link\">%s'\n '<i aria-hidden=\"true\" class=\"icon-ban\"></i></a>'\n % (url_ban_cleanup, ugettext('Ban User & Clean Up')))\n links = link_cleanup + ' ' + link\n return Markup(links)\n\n\[email protected]_function\ndef admin_link(user):\n \"\"\"Returns a link to admin a user\"\"\"\n url = reverse('admin:users_user_change', args=(user.id,),\n current_app=admin.site.name)\n link = ('<a href=\"%s\" class=\"button neutral\">%s'\n '<i aria-hidden=\"true\" class=\"icon-wrench\"></i></a>' %\n (url, ugettext('Admin')))\n return Markup(link)\n\n\[email protected]\ndef public_email(email):\n \"\"\"Email address -> publicly displayable email.\"\"\"\n return Markup('<span class=\"email\">%s</span>' % unicode_to_html(email))\n\n\ndef unicode_to_html(text):\n \"\"\"Turns all unicode into html entities, e.g. E -> E.\"\"\"\n return ''.join(['&#%s;' % ord(i) for i in text])\n\n\[email protected]_function\ndef user_list(users):\n \"\"\"Turn a list of users into a list of links to their profiles.\"\"\"\n link = '<a href=\"%s\">%s</a>'\n list = ', '.join([link % (escape(u.get_absolute_url()), escape(u.username))\n for u in users])\n return Markup(list)\n\n\n# Returns a string representation of a user\nlibrary.global_function(user_display)\n\n# Returns a list of social authentication providers.\nlibrary.global_function(get_providers)\n\n\[email protected]_function\n@contextfunction\ndef provider_login_url(context, provider_id, **params):\n \"\"\"\n {{ provider_login_url(\"github\", next=\"/some/url\") }}\n \"\"\"\n request = context['request']\n provider = providers.registry.by_id(provider_id)\n auth_params = params.get('auth_params', None)\n scope = params.get('scope', None)\n process = params.get('process', None)\n if scope == '':\n del params['scope']\n if auth_params == '':\n del params['auth_params']\n if 'next' not in params:\n next = get_request_param(request, 'next')\n if next:\n params['next'] = next\n elif process == 'redirect':\n params['next'] = request.get_full_path()\n else:\n if not params['next']:\n del params['next']\n # get the login url and append params as url parameters\n return Markup(provider.get_login_url(request, **params))\n\n\[email protected]_function\n@contextfunction\ndef providers_media_js(context):\n \"\"\"\n {{ providers_media_js() }}\n \"\"\"\n request = context['request']\n return Markup('\\n'.join([p.media_js(request)\n for p in providers.registry.get_list()]))\n\n\[email protected]_function\ndef social_accounts(user):\n \"\"\"\n {% set accounts = social_accounts(user) %}\n\n Then:\n {{ accounts.twitter }} -- a list of connected Twitter accounts\n {{ accounts.twitter.0 }} -- the first Twitter account\n {% if accounts %} -- if there is at least one social account\n \"\"\"\n accounts = {}\n if not user.is_authenticated:\n return accounts\n for account in user.socialaccount_set.all().iterator():\n providers = accounts.setdefault(account.provider, [])\n providers.append(account)\n return accounts\n\n\[email protected]_function\[email protected]_with('honeypot/honeypot_field.html')\ndef honeypot_field(field_name=None):\n return render_honeypot_field(field_name)\n\n\[email protected]_function\ndef get_primary_email(email_addresses):\n for email in email_addresses:\n if email_addresses[email].get('primary'):\n return email\n\n\[email protected]_function\ndef is_username_taken(username):\n \"\"\"\n Returns True if a user with the given username exists (case-insentive),\n otherwise False.\n \"\"\"\n return User.objects.filter(username=username).exists()\n", "path": "kuma/users/templatetags/jinja_helpers.py"}]} | 2,610 | 260 |
gh_patches_debug_28051 | rasdani/github-patches | git_diff | cupy__cupy-5226 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove cupyx.allow_synchronize?
`cupyx.allow_synchronize` (#2808) was introduced originally for both unit tests and users who want to notice device synchronization in their own code.
Application in uint tests was dissmissed (#2893) because much more tests were synchronous than I had expected.
Now I doubt the usefulness of this feature for users as well.
It's fundamentally impossible to eliminate false positives and/or false negatives.
If we took a policy for zero false positives, there would be too many false negatives which I think would make this feature useless.
For example, the documentation of [cudaMemcpy](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY_1gc263dbe6574220cc776b45438fc351e8) says *"This function exhibits synchronous behavior for most use cases"*. If we took this policy, we wouldn't be able to consider this function synchronous, because no condition is mentioned that would make this function asynchronous.
If we took a policy to allow some false positives, this feature wouldn't be used to detect unexpected synchronization in users' code.
</issue>
<code>
[start of cupy/_core/syncdetect.py]
1 import contextlib
2 import threading
3
4 from cupy import _util
5
6
7 _thread_local = threading.local()
8
9
10 class DeviceSynchronized(RuntimeError):
11 """Raised when device synchronization is detected while disallowed.
12
13 .. seealso:: :func:`cupyx.allow_synchronize`
14
15 """
16
17 def __init__(self, message=None):
18 if message is None:
19 message = 'Device synchronization was detected while disallowed.'
20 super().__init__(message)
21
22
23 def _is_allowed():
24 # Returns whether device synchronization is allowed in the current thread.
25 try:
26 return _thread_local.allowed
27 except AttributeError:
28 _thread_local.allowed = True
29 return True
30
31
32 def _declare_synchronize():
33 # Raises DeviceSynchronized if device synchronization is disallowed in
34 # the current thread.
35 if not _is_allowed():
36 raise DeviceSynchronized()
37
38
39 @contextlib.contextmanager
40 def allow_synchronize(allow):
41 """Allows or disallows device synchronization temporarily in the current \
42 thread.
43
44 If device synchronization is detected, :class:`cupyx.DeviceSynchronized`
45 will be raised.
46
47 Note that there can be false negatives and positives.
48 Device synchronization outside CuPy will not be detected.
49 """
50 _util.experimental('cupyx.allow_synchronize')
51 old = _is_allowed()
52 _thread_local.allowed = allow
53 try:
54 yield
55 finally:
56 _thread_local.allowed = old
57
[end of cupy/_core/syncdetect.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/_core/syncdetect.py b/cupy/_core/syncdetect.py
--- a/cupy/_core/syncdetect.py
+++ b/cupy/_core/syncdetect.py
@@ -1,7 +1,6 @@
import contextlib
import threading
-
-from cupy import _util
+import warnings
_thread_local = threading.local()
@@ -10,6 +9,11 @@
class DeviceSynchronized(RuntimeError):
"""Raised when device synchronization is detected while disallowed.
+ .. warning::
+
+ This API has been deprecated in CuPy v10 and will be removed in future
+ releases.
+
.. seealso:: :func:`cupyx.allow_synchronize`
"""
@@ -41,13 +45,21 @@
"""Allows or disallows device synchronization temporarily in the current \
thread.
+ .. warning::
+
+ This API has been deprecated in CuPy v10 and will be removed in future
+ releases.
+
If device synchronization is detected, :class:`cupyx.DeviceSynchronized`
will be raised.
Note that there can be false negatives and positives.
Device synchronization outside CuPy will not be detected.
"""
- _util.experimental('cupyx.allow_synchronize')
+ warnings.warn(
+ 'cupyx.allow_synchronize will be removed in future releases as it '
+ 'is not possible to reliably detect synchronizations.')
+
old = _is_allowed()
_thread_local.allowed = allow
try:
| {"golden_diff": "diff --git a/cupy/_core/syncdetect.py b/cupy/_core/syncdetect.py\n--- a/cupy/_core/syncdetect.py\n+++ b/cupy/_core/syncdetect.py\n@@ -1,7 +1,6 @@\n import contextlib\n import threading\n-\n-from cupy import _util\n+import warnings\n \n \n _thread_local = threading.local()\n@@ -10,6 +9,11 @@\n class DeviceSynchronized(RuntimeError):\n \"\"\"Raised when device synchronization is detected while disallowed.\n \n+ .. warning::\n+\n+ This API has been deprecated in CuPy v10 and will be removed in future\n+ releases.\n+\n .. seealso:: :func:`cupyx.allow_synchronize`\n \n \"\"\"\n@@ -41,13 +45,21 @@\n \"\"\"Allows or disallows device synchronization temporarily in the current \\\n thread.\n \n+ .. warning::\n+\n+ This API has been deprecated in CuPy v10 and will be removed in future\n+ releases.\n+\n If device synchronization is detected, :class:`cupyx.DeviceSynchronized`\n will be raised.\n \n Note that there can be false negatives and positives.\n Device synchronization outside CuPy will not be detected.\n \"\"\"\n- _util.experimental('cupyx.allow_synchronize')\n+ warnings.warn(\n+ 'cupyx.allow_synchronize will be removed in future releases as it '\n+ 'is not possible to reliably detect synchronizations.')\n+\n old = _is_allowed()\n _thread_local.allowed = allow\n try:\n", "issue": "Remove cupyx.allow_synchronize?\n`cupyx.allow_synchronize` (#2808) was introduced originally for both unit tests and users who want to notice device synchronization in their own code.\r\n\r\nApplication in uint tests was dissmissed (#2893) because much more tests were synchronous than I had expected. \r\n\r\nNow I doubt the usefulness of this feature for users as well.\r\nIt's fundamentally impossible to eliminate false positives and/or false negatives.\r\n\r\nIf we took a policy for zero false positives, there would be too many false negatives which I think would make this feature useless.\r\nFor example, the documentation of [cudaMemcpy](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY_1gc263dbe6574220cc776b45438fc351e8) says *\"This function exhibits synchronous behavior for most use cases\"*. If we took this policy, we wouldn't be able to consider this function synchronous, because no condition is mentioned that would make this function asynchronous.\r\n\r\nIf we took a policy to allow some false positives, this feature wouldn't be used to detect unexpected synchronization in users' code.\n", "before_files": [{"content": "import contextlib\nimport threading\n\nfrom cupy import _util\n\n\n_thread_local = threading.local()\n\n\nclass DeviceSynchronized(RuntimeError):\n \"\"\"Raised when device synchronization is detected while disallowed.\n\n .. seealso:: :func:`cupyx.allow_synchronize`\n\n \"\"\"\n\n def __init__(self, message=None):\n if message is None:\n message = 'Device synchronization was detected while disallowed.'\n super().__init__(message)\n\n\ndef _is_allowed():\n # Returns whether device synchronization is allowed in the current thread.\n try:\n return _thread_local.allowed\n except AttributeError:\n _thread_local.allowed = True\n return True\n\n\ndef _declare_synchronize():\n # Raises DeviceSynchronized if device synchronization is disallowed in\n # the current thread.\n if not _is_allowed():\n raise DeviceSynchronized()\n\n\[email protected]\ndef allow_synchronize(allow):\n \"\"\"Allows or disallows device synchronization temporarily in the current \\\nthread.\n\n If device synchronization is detected, :class:`cupyx.DeviceSynchronized`\n will be raised.\n\n Note that there can be false negatives and positives.\n Device synchronization outside CuPy will not be detected.\n \"\"\"\n _util.experimental('cupyx.allow_synchronize')\n old = _is_allowed()\n _thread_local.allowed = allow\n try:\n yield\n finally:\n _thread_local.allowed = old\n", "path": "cupy/_core/syncdetect.py"}]} | 1,207 | 333 |
gh_patches_debug_33243 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-760 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'testing/constraints-*.txt' not used
From #757.
In [`noxfile.py`](https://github.com/googleapis/google-auth-library-python/blob/a9234423cb2b69068fc0d30a5a0ee86a599ab8b7/noxfile.py#L87-L89), the constraints are not being used, which is why the one populated file, [`testing/constraints-3.6.txt](https://github.com/googleapis/google-auth-library-python/blob/master/testing/constraints-3.6.txt) is in an invalid state (multiple entries for `rsa`, maybe more issues).
</issue>
<code>
[start of noxfile.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import shutil
16 import os
17 import nox
18
19 TEST_DEPENDENCIES = [
20 "flask",
21 "freezegun",
22 "mock",
23 "oauth2client",
24 "pyopenssl",
25 "pytest",
26 "pytest-cov",
27 "pytest-localserver",
28 "pyu2f",
29 "requests",
30 "urllib3",
31 "cryptography",
32 "responses",
33 "grpcio",
34 ]
35
36 ASYNC_DEPENDENCIES = [
37 "pytest-asyncio",
38 "aioresponses",
39 "asynctest",
40 "aiohttp!=3.7.4.post0",
41 ]
42
43 BLACK_VERSION = "black==19.3b0"
44 BLACK_PATHS = [
45 "google",
46 "tests",
47 "tests_async",
48 "noxfile.py",
49 "setup.py",
50 "docs/conf.py",
51 ]
52
53
54 @nox.session(python="3.7")
55 def lint(session):
56 session.install("flake8", "flake8-import-order", "docutils", BLACK_VERSION)
57 session.install(".")
58 session.run("black", "--check", *BLACK_PATHS)
59 session.run(
60 "flake8",
61 "--import-order-style=google",
62 "--application-import-names=google,tests,system_tests",
63 "google",
64 "tests",
65 "tests_async",
66 )
67 session.run(
68 "python", "setup.py", "check", "--metadata", "--restructuredtext", "--strict"
69 )
70
71
72 @nox.session(python="3.8")
73 def blacken(session):
74 """Run black.
75 Format code to uniform standard.
76 The Python version should be consistent with what is
77 supplied in the Python Owlbot postprocessor.
78
79 https://github.com/googleapis/synthtool/blob/master/docker/owlbot/python/Dockerfile
80 """
81 session.install(BLACK_VERSION)
82 session.run("black", *BLACK_PATHS)
83
84
85 @nox.session(python=["3.6", "3.7", "3.8", "3.9"])
86 def unit(session):
87 session.install(*TEST_DEPENDENCIES)
88 session.install(*(ASYNC_DEPENDENCIES))
89 session.install(".")
90 session.run(
91 "pytest",
92 f"--junitxml=unit_{session.python}_sponge_log.xml",
93 "--cov=google.auth",
94 "--cov=google.oauth2",
95 "--cov=tests",
96 "tests",
97 "tests_async",
98 )
99
100
101 @nox.session(python=["2.7"])
102 def unit_prev_versions(session):
103 session.install(".")
104 session.install(*TEST_DEPENDENCIES)
105 session.run(
106 "pytest",
107 f"--junitxml=unit_{session.python}_sponge_log.xml",
108 "--cov=google.auth",
109 "--cov=google.oauth2",
110 "--cov=tests",
111 "tests",
112 )
113
114
115 @nox.session(python="3.7")
116 def cover(session):
117 session.install(*TEST_DEPENDENCIES)
118 session.install(*(ASYNC_DEPENDENCIES))
119 session.install(".")
120 session.run(
121 "pytest",
122 "--cov=google.auth",
123 "--cov=google.oauth2",
124 "--cov=tests",
125 "--cov=tests_async",
126 "--cov-report=",
127 "tests",
128 "tests_async",
129 )
130 session.run("coverage", "report", "--show-missing", "--fail-under=100")
131
132
133 @nox.session(python="3.7")
134 def docgen(session):
135 session.env["SPHINX_APIDOC_OPTIONS"] = "members,inherited-members,show-inheritance"
136 session.install(*TEST_DEPENDENCIES)
137 session.install("sphinx")
138 session.install(".")
139 session.run("rm", "-r", "docs/reference")
140 session.run(
141 "sphinx-apidoc",
142 "--output-dir",
143 "docs/reference",
144 "--separate",
145 "--module-first",
146 "google",
147 )
148
149
150 @nox.session(python="3.7")
151 def docs(session):
152 """Build the docs for this library."""
153
154 session.install("-e", ".[aiohttp]")
155 session.install(
156 "sphinx<3.0.0", "alabaster", "recommonmark", "sphinx-docstring-typing"
157 )
158
159 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
160 session.run(
161 "sphinx-build",
162 "-T", # show full traceback on exception
163 "-W", # warnings as errors
164 "-N", # no colors
165 "-b",
166 "html",
167 "-d",
168 os.path.join("docs", "_build", "doctrees", ""),
169 os.path.join("docs", ""),
170 os.path.join("docs", "_build", "html", ""),
171 )
172
173
174 @nox.session(python="pypy")
175 def pypy(session):
176 session.install(*TEST_DEPENDENCIES)
177 session.install(*ASYNC_DEPENDENCIES)
178 session.install(".")
179 session.run(
180 "pytest",
181 f"--junitxml=unit_{session.python}_sponge_log.xml",
182 "--cov=google.auth",
183 "--cov=google.oauth2",
184 "--cov=tests",
185 "tests",
186 "tests_async",
187 )
188
[end of noxfile.py]
[start of setup.py]
1 # Copyright 2014 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 from setuptools import find_packages
19 from setuptools import setup
20
21
22 DEPENDENCIES = (
23 "cachetools>=2.0.0,<5.0",
24 "pyasn1-modules>=0.2.1",
25 # rsa==4.5 is the last version to support 2.7
26 # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233
27 'rsa<4.6; python_version < "3.6"',
28 'rsa>=3.1.4,<5; python_version >= "3.6"',
29 "setuptools>=40.3.0",
30 "six>=1.9.0",
31 )
32
33 extras = {
34 "aiohttp": [
35 "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'",
36 "requests >= 2.18.0, < 3.0.0dev",
37 "urllib3 >= 1.0.0, <2.0.0dev",
38 ],
39 "pyopenssl": "pyopenssl>=20.0.0",
40 "reauth": "pyu2f>=0.1.5",
41 }
42
43 with io.open("README.rst", "r") as fh:
44 long_description = fh.read()
45
46 package_root = os.path.abspath(os.path.dirname(__file__))
47
48 version = {}
49 with open(os.path.join(package_root, "google/auth/version.py")) as fp:
50 exec(fp.read(), version)
51 version = version["__version__"]
52
53 setup(
54 name="google-auth",
55 version=version,
56 author="Google Cloud Platform",
57 author_email="[email protected]",
58 description="Google Authentication Library",
59 long_description=long_description,
60 url="https://github.com/googleapis/google-auth-library-python",
61 packages=find_packages(exclude=("tests*", "system_tests*")),
62 namespace_packages=("google",),
63 install_requires=DEPENDENCIES,
64 extras_require=extras,
65 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*",
66 license="Apache 2.0",
67 keywords="google auth oauth client",
68 classifiers=[
69 "Programming Language :: Python :: 2",
70 "Programming Language :: Python :: 2.7",
71 "Programming Language :: Python :: 3",
72 "Programming Language :: Python :: 3.6",
73 "Programming Language :: Python :: 3.7",
74 "Programming Language :: Python :: 3.8",
75 "Programming Language :: Python :: 3.9",
76 "Development Status :: 5 - Production/Stable",
77 "Intended Audience :: Developers",
78 "License :: OSI Approved :: Apache Software License",
79 "Operating System :: POSIX",
80 "Operating System :: Microsoft :: Windows",
81 "Operating System :: MacOS :: MacOS X",
82 "Operating System :: OS Independent",
83 "Topic :: Internet :: WWW/HTTP",
84 ],
85 )
86
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -12,10 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import shutil
import os
+import pathlib
+import shutil
+
import nox
+CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
+
TEST_DEPENDENCIES = [
"flask",
"freezegun",
@@ -84,15 +88,20 @@
@nox.session(python=["3.6", "3.7", "3.8", "3.9"])
def unit(session):
- session.install(*TEST_DEPENDENCIES)
- session.install(*(ASYNC_DEPENDENCIES))
- session.install(".")
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ add_constraints = ["-c", constraints_path]
+ session.install(*(TEST_DEPENDENCIES + add_constraints))
+ session.install(*(ASYNC_DEPENDENCIES + add_constraints))
+ session.install(".", *add_constraints)
session.run(
"pytest",
f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=google.auth",
"--cov=google.oauth2",
"--cov=tests",
+ "--cov-report=term-missing",
"tests",
"tests_async",
)
@@ -123,7 +132,7 @@
"--cov=google.oauth2",
"--cov=tests",
"--cov=tests_async",
- "--cov-report=",
+ "--cov-report=term-missing",
"tests",
"tests_async",
)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,8 +33,7 @@
extras = {
"aiohttp": [
"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'",
- "requests >= 2.18.0, < 3.0.0dev",
- "urllib3 >= 1.0.0, <2.0.0dev",
+ "requests >= 2.20.0, < 3.0.0dev",
],
"pyopenssl": "pyopenssl>=20.0.0",
"reauth": "pyu2f>=0.1.5",
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -12,10 +12,14 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import shutil\n import os\n+import pathlib\n+import shutil\n+\n import nox\n \n+CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n+\n TEST_DEPENDENCIES = [\n \"flask\",\n \"freezegun\",\n@@ -84,15 +88,20 @@\n \n @nox.session(python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\"])\n def unit(session):\n- session.install(*TEST_DEPENDENCIES)\n- session.install(*(ASYNC_DEPENDENCIES))\n- session.install(\".\")\n+ constraints_path = str(\n+ CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n+ )\n+ add_constraints = [\"-c\", constraints_path]\n+ session.install(*(TEST_DEPENDENCIES + add_constraints))\n+ session.install(*(ASYNC_DEPENDENCIES + add_constraints))\n+ session.install(\".\", *add_constraints)\n session.run(\n \"pytest\",\n f\"--junitxml=unit_{session.python}_sponge_log.xml\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n+ \"--cov-report=term-missing\",\n \"tests\",\n \"tests_async\",\n )\n@@ -123,7 +132,7 @@\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"--cov=tests_async\",\n- \"--cov-report=\",\n+ \"--cov-report=term-missing\",\n \"tests\",\n \"tests_async\",\n )\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,8 +33,7 @@\n extras = {\n \"aiohttp\": [\n \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\",\n- \"requests >= 2.18.0, < 3.0.0dev\",\n- \"urllib3 >= 1.0.0, <2.0.0dev\",\n+ \"requests >= 2.20.0, < 3.0.0dev\",\n ],\n \"pyopenssl\": \"pyopenssl>=20.0.0\",\n \"reauth\": \"pyu2f>=0.1.5\",\n", "issue": "'testing/constraints-*.txt' not used\nFrom #757.\r\n\r\nIn [`noxfile.py`](https://github.com/googleapis/google-auth-library-python/blob/a9234423cb2b69068fc0d30a5a0ee86a599ab8b7/noxfile.py#L87-L89), the constraints are not being used, which is why the one populated file, [`testing/constraints-3.6.txt](https://github.com/googleapis/google-auth-library-python/blob/master/testing/constraints-3.6.txt) is in an invalid state (multiple entries for `rsa`, maybe more issues).\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport shutil\nimport os\nimport nox\n\nTEST_DEPENDENCIES = [\n \"flask\",\n \"freezegun\",\n \"mock\",\n \"oauth2client\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-localserver\",\n \"pyu2f\",\n \"requests\",\n \"urllib3\",\n \"cryptography\",\n \"responses\",\n \"grpcio\",\n]\n\nASYNC_DEPENDENCIES = [\n \"pytest-asyncio\",\n \"aioresponses\",\n \"asynctest\",\n \"aiohttp!=3.7.4.post0\",\n]\n\nBLACK_VERSION = \"black==19.3b0\"\nBLACK_PATHS = [\n \"google\",\n \"tests\",\n \"tests_async\",\n \"noxfile.py\",\n \"setup.py\",\n \"docs/conf.py\",\n]\n\n\[email protected](python=\"3.7\")\ndef lint(session):\n session.install(\"flake8\", \"flake8-import-order\", \"docutils\", BLACK_VERSION)\n session.install(\".\")\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n session.run(\n \"flake8\",\n \"--import-order-style=google\",\n \"--application-import-names=google,tests,system_tests\",\n \"google\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\n \"python\", \"setup.py\", \"check\", \"--metadata\", \"--restructuredtext\", \"--strict\"\n )\n\n\[email protected](python=\"3.8\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n The Python version should be consistent with what is\n supplied in the Python Owlbot postprocessor.\n\n https://github.com/googleapis/synthtool/blob/master/docker/owlbot/python/Dockerfile\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\"])\ndef unit(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n f\"--junitxml=unit_{session.python}_sponge_log.xml\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n\n\[email protected](python=[\"2.7\"])\ndef unit_prev_versions(session):\n session.install(\".\")\n session.install(*TEST_DEPENDENCIES)\n session.run(\n \"pytest\",\n f\"--junitxml=unit_{session.python}_sponge_log.xml\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n )\n\n\[email protected](python=\"3.7\")\ndef cover(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*(ASYNC_DEPENDENCIES))\n session.install(\".\")\n session.run(\n \"pytest\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"--cov=tests_async\",\n \"--cov-report=\",\n \"tests\",\n \"tests_async\",\n )\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n\n\[email protected](python=\"3.7\")\ndef docgen(session):\n session.env[\"SPHINX_APIDOC_OPTIONS\"] = \"members,inherited-members,show-inheritance\"\n session.install(*TEST_DEPENDENCIES)\n session.install(\"sphinx\")\n session.install(\".\")\n session.run(\"rm\", \"-r\", \"docs/reference\")\n session.run(\n \"sphinx-apidoc\",\n \"--output-dir\",\n \"docs/reference\",\n \"--separate\",\n \"--module-first\",\n \"google\",\n )\n\n\[email protected](python=\"3.7\")\ndef docs(session):\n \"\"\"Build the docs for this library.\"\"\"\n\n session.install(\"-e\", \".[aiohttp]\")\n session.install(\n \"sphinx<3.0.0\", \"alabaster\", \"recommonmark\", \"sphinx-docstring-typing\"\n )\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-W\", # warnings as errors\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=\"pypy\")\ndef pypy(session):\n session.install(*TEST_DEPENDENCIES)\n session.install(*ASYNC_DEPENDENCIES)\n session.install(\".\")\n session.run(\n \"pytest\",\n f\"--junitxml=unit_{session.python}_sponge_log.xml\",\n \"--cov=google.auth\",\n \"--cov=google.oauth2\",\n \"--cov=tests\",\n \"tests\",\n \"tests_async\",\n )\n", "path": "noxfile.py"}, {"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.6\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n)\n\nextras = {\n \"aiohttp\": [\n \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\",\n \"requests >= 2.18.0, < 3.0.0dev\",\n \"urllib3 >= 1.0.0, <2.0.0dev\",\n ],\n \"pyopenssl\": \"pyopenssl>=20.0.0\",\n \"reauth\": \"pyu2f>=0.1.5\",\n}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"google/auth/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 3,335 | 556 |
gh_patches_debug_27897 | rasdani/github-patches | git_diff | freqtrade__freqtrade-4144 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Possibly slightly wrong informative pair merging
When merging longer timeframe to a shorter one, the timedelta is added to the date:
https://github.com/freqtrade/freqtrade/blob/f320cb0d7a12e064018967cf049dd6719fff8ddb/freqtrade/strategy/strategy_helper.py#L35
then it merges to the shorter timeframe with these dates..
https://github.com/freqtrade/freqtrade/blob/f320cb0d7a12e064018967cf049dd6719fff8ddb/freqtrade/strategy/strategy_helper.py#L42-L43
So lets say we are merging a `1h` timeframe to a `5m` timeframe and there is a signal `True` at `00:00` on the `1h` timeseries.
With this merge, the signal will appear in the `5m` timeseries also at `00:00`. However the `00:00` candle for the `5m` timeframe is received at `00:05`, that is five minutes later than the time you actually received the `1h` candle, which should have been received at the candle with date `11:55`.
So after merging, the values should be shifted backward (`shift(-1)`)..or the merging dates should be reduced by one unit of timedelta of the shorter timeframe..
```python
informative['date_merge'] = informative["date"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')
```
</issue>
<code>
[start of freqtrade/strategy/strategy_helper.py]
1 import pandas as pd
2
3 from freqtrade.exchange import timeframe_to_minutes
4
5
6 def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,
7 timeframe: str, timeframe_inf: str, ffill: bool = True) -> pd.DataFrame:
8 """
9 Correctly merge informative samples to the original dataframe, avoiding lookahead bias.
10
11 Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a
12 1h candle that starts at 15:00 will result in all candles to know the close at 16:00
13 which they should not know.
14
15 Moves the date of the informative pair by 1 time interval forward.
16 This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the
17 last candle that's closed at 15:00, 15:15, 15:30 or 15:45.
18
19 Assuming inf_tf = '1d' - then the resulting columns will be:
20 date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d
21
22 :param dataframe: Original dataframe
23 :param informative: Informative pair, most likely loaded via dp.get_pair_dataframe
24 :param timeframe: Timeframe of the original pair sample.
25 :param timeframe_inf: Timeframe of the informative pair sample.
26 :param ffill: Forwardfill missing values - optional but usually required
27 """
28
29 minutes_inf = timeframe_to_minutes(timeframe_inf)
30 minutes = timeframe_to_minutes(timeframe)
31 if minutes >= minutes_inf:
32 # No need to forwardshift if the timeframes are identical
33 informative['date_merge'] = informative["date"]
34 else:
35 informative['date_merge'] = informative["date"] + pd.to_timedelta(minutes_inf, 'm')
36
37 # Rename columns to be unique
38 informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns]
39
40 # Combine the 2 dataframes
41 # all indicators on the informative sample MUST be calculated before this point
42 dataframe = pd.merge(dataframe, informative, left_on='date',
43 right_on=f'date_merge_{timeframe_inf}', how='left')
44 dataframe = dataframe.drop(f'date_merge_{timeframe_inf}', axis=1)
45
46 if ffill:
47 dataframe = dataframe.ffill()
48
49 return dataframe
50
[end of freqtrade/strategy/strategy_helper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/freqtrade/strategy/strategy_helper.py b/freqtrade/strategy/strategy_helper.py
--- a/freqtrade/strategy/strategy_helper.py
+++ b/freqtrade/strategy/strategy_helper.py
@@ -24,15 +24,24 @@
:param timeframe: Timeframe of the original pair sample.
:param timeframe_inf: Timeframe of the informative pair sample.
:param ffill: Forwardfill missing values - optional but usually required
+ :return: Merged dataframe
+ :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe
"""
minutes_inf = timeframe_to_minutes(timeframe_inf)
minutes = timeframe_to_minutes(timeframe)
- if minutes >= minutes_inf:
+ if minutes == minutes_inf:
# No need to forwardshift if the timeframes are identical
informative['date_merge'] = informative["date"]
+ elif minutes < minutes_inf:
+ # Subtract "small" timeframe so merging is not delayed by 1 small candle
+ # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073
+ informative['date_merge'] = (
+ informative["date"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')
+ )
else:
- informative['date_merge'] = informative["date"] + pd.to_timedelta(minutes_inf, 'm')
+ raise ValueError("Tried to merge a faster timeframe to a slower timeframe."
+ "This would create new rows, and can throw off your regular indicators.")
# Rename columns to be unique
informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns]
| {"golden_diff": "diff --git a/freqtrade/strategy/strategy_helper.py b/freqtrade/strategy/strategy_helper.py\n--- a/freqtrade/strategy/strategy_helper.py\n+++ b/freqtrade/strategy/strategy_helper.py\n@@ -24,15 +24,24 @@\n :param timeframe: Timeframe of the original pair sample.\n :param timeframe_inf: Timeframe of the informative pair sample.\n :param ffill: Forwardfill missing values - optional but usually required\n+ :return: Merged dataframe\n+ :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe\n \"\"\"\n \n minutes_inf = timeframe_to_minutes(timeframe_inf)\n minutes = timeframe_to_minutes(timeframe)\n- if minutes >= minutes_inf:\n+ if minutes == minutes_inf:\n # No need to forwardshift if the timeframes are identical\n informative['date_merge'] = informative[\"date\"]\n+ elif minutes < minutes_inf:\n+ # Subtract \"small\" timeframe so merging is not delayed by 1 small candle\n+ # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073\n+ informative['date_merge'] = (\n+ informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')\n+ )\n else:\n- informative['date_merge'] = informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm')\n+ raise ValueError(\"Tried to merge a faster timeframe to a slower timeframe.\"\n+ \"This would create new rows, and can throw off your regular indicators.\")\n \n # Rename columns to be unique\n informative.columns = [f\"{col}_{timeframe_inf}\" for col in informative.columns]\n", "issue": "Possibly slightly wrong informative pair merging\nWhen merging longer timeframe to a shorter one, the timedelta is added to the date:\r\nhttps://github.com/freqtrade/freqtrade/blob/f320cb0d7a12e064018967cf049dd6719fff8ddb/freqtrade/strategy/strategy_helper.py#L35\r\n\r\nthen it merges to the shorter timeframe with these dates..\r\nhttps://github.com/freqtrade/freqtrade/blob/f320cb0d7a12e064018967cf049dd6719fff8ddb/freqtrade/strategy/strategy_helper.py#L42-L43\r\n\r\nSo lets say we are merging a `1h` timeframe to a `5m` timeframe and there is a signal `True` at `00:00` on the `1h` timeseries. \r\nWith this merge, the signal will appear in the `5m` timeseries also at `00:00`. However the `00:00` candle for the `5m` timeframe is received at `00:05`, that is five minutes later than the time you actually received the `1h` candle, which should have been received at the candle with date `11:55`. \r\nSo after merging, the values should be shifted backward (`shift(-1)`)..or the merging dates should be reduced by one unit of timedelta of the shorter timeframe..\r\n```python\r\ninformative['date_merge'] = informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')\r\n```\n", "before_files": [{"content": "import pandas as pd\n\nfrom freqtrade.exchange import timeframe_to_minutes\n\n\ndef merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,\n timeframe: str, timeframe_inf: str, ffill: bool = True) -> pd.DataFrame:\n \"\"\"\n Correctly merge informative samples to the original dataframe, avoiding lookahead bias.\n\n Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a\n 1h candle that starts at 15:00 will result in all candles to know the close at 16:00\n which they should not know.\n\n Moves the date of the informative pair by 1 time interval forward.\n This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the\n last candle that's closed at 15:00, 15:15, 15:30 or 15:45.\n\n Assuming inf_tf = '1d' - then the resulting columns will be:\n date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d\n\n :param dataframe: Original dataframe\n :param informative: Informative pair, most likely loaded via dp.get_pair_dataframe\n :param timeframe: Timeframe of the original pair sample.\n :param timeframe_inf: Timeframe of the informative pair sample.\n :param ffill: Forwardfill missing values - optional but usually required\n \"\"\"\n\n minutes_inf = timeframe_to_minutes(timeframe_inf)\n minutes = timeframe_to_minutes(timeframe)\n if minutes >= minutes_inf:\n # No need to forwardshift if the timeframes are identical\n informative['date_merge'] = informative[\"date\"]\n else:\n informative['date_merge'] = informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm')\n\n # Rename columns to be unique\n informative.columns = [f\"{col}_{timeframe_inf}\" for col in informative.columns]\n\n # Combine the 2 dataframes\n # all indicators on the informative sample MUST be calculated before this point\n dataframe = pd.merge(dataframe, informative, left_on='date',\n right_on=f'date_merge_{timeframe_inf}', how='left')\n dataframe = dataframe.drop(f'date_merge_{timeframe_inf}', axis=1)\n\n if ffill:\n dataframe = dataframe.ffill()\n\n return dataframe\n", "path": "freqtrade/strategy/strategy_helper.py"}]} | 1,548 | 372 |
gh_patches_debug_13394 | rasdani/github-patches | git_diff | ManimCommunity__manim-1923 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Command line help text is cut off
## Enhancement proposal
When running `manim --help`, the following output is returned:
```
Manim Community v0.9.0
Usage: manim [OPTIONS] COMMAND [ARGS]...
Animation engine for explanatory math videos
Options:
--version Show version and exit.
--help Show this message and exit.
Commands:
render* Render SCENE(S) from the input FILE.
cfg Manages Manim configuration files.
init Sets up a project in current working directory with default...
new Create a new project or insert a new scene.
plugins Manages Manim plugins.
Made with <3 by Manim Community developers.
```
As you can see, the help text for `init` is cut off, and does not provide sufficient information about what that command does.
## Additional comments
<!-- Add further context that you think might be relevant. -->
</issue>
<code>
[start of manim/cli/init/commands.py]
1 """Manim's init subcommand.
2
3 Manim's init subcommand is accessed in the command-line interface via ``manim
4 init``. Here you can specify options, subcommands, and subgroups for the init
5 group.
6
7 """
8 from pathlib import Path
9
10 import click
11
12 from ...constants import CONTEXT_SETTINGS, EPILOG
13 from ...utils.file_ops import copy_template_files
14
15
16 @click.command(
17 context_settings=CONTEXT_SETTINGS,
18 epilog=EPILOG,
19 )
20 def init():
21 """Sets up a project in current working directory with default settings.
22
23 It copies files from templates directory and pastes them in the current working dir.
24
25 The new project is set up with default settings.
26 """
27 cfg = Path("manim.cfg")
28 if cfg.exists():
29 raise FileExistsError(f"\t{cfg} exists\n")
30 else:
31 copy_template_files()
32
[end of manim/cli/init/commands.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/manim/cli/init/commands.py b/manim/cli/init/commands.py
--- a/manim/cli/init/commands.py
+++ b/manim/cli/init/commands.py
@@ -16,13 +16,14 @@
@click.command(
context_settings=CONTEXT_SETTINGS,
epilog=EPILOG,
+ short_help="""Sets up a new project in current working directory with default settings.\n
+It copies files from templates directory and pastes them in the current working dir.
+""",
)
def init():
- """Sets up a project in current working directory with default settings.
+ """Sets up a new project in current working directory with default settings.
It copies files from templates directory and pastes them in the current working dir.
-
- The new project is set up with default settings.
"""
cfg = Path("manim.cfg")
if cfg.exists():
| {"golden_diff": "diff --git a/manim/cli/init/commands.py b/manim/cli/init/commands.py\n--- a/manim/cli/init/commands.py\n+++ b/manim/cli/init/commands.py\n@@ -16,13 +16,14 @@\n @click.command(\n context_settings=CONTEXT_SETTINGS,\n epilog=EPILOG,\n+ short_help=\"\"\"Sets up a new project in current working directory with default settings.\\n\n+It copies files from templates directory and pastes them in the current working dir.\n+\"\"\",\n )\n def init():\n- \"\"\"Sets up a project in current working directory with default settings.\n+ \"\"\"Sets up a new project in current working directory with default settings.\n \n It copies files from templates directory and pastes them in the current working dir.\n-\n- The new project is set up with default settings.\n \"\"\"\n cfg = Path(\"manim.cfg\")\n if cfg.exists():\n", "issue": "Command line help text is cut off\n## Enhancement proposal\r\nWhen running `manim --help`, the following output is returned:\r\n\r\n```\r\nManim Community v0.9.0\r\n\r\nUsage: manim [OPTIONS] COMMAND [ARGS]...\r\n\r\n Animation engine for explanatory math videos\r\n\r\nOptions:\r\n --version Show version and exit.\r\n --help Show this message and exit.\r\n\r\nCommands:\r\n render* Render SCENE(S) from the input FILE.\r\n cfg Manages Manim configuration files.\r\n init Sets up a project in current working directory with default...\r\n new Create a new project or insert a new scene.\r\n plugins Manages Manim plugins.\r\n\r\n Made with <3 by Manim Community developers.\r\n```\r\n\r\nAs you can see, the help text for `init` is cut off, and does not provide sufficient information about what that command does.\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant. -->\r\n\n", "before_files": [{"content": "\"\"\"Manim's init subcommand.\n\nManim's init subcommand is accessed in the command-line interface via ``manim\ninit``. Here you can specify options, subcommands, and subgroups for the init\ngroup.\n\n\"\"\"\nfrom pathlib import Path\n\nimport click\n\nfrom ...constants import CONTEXT_SETTINGS, EPILOG\nfrom ...utils.file_ops import copy_template_files\n\n\[email protected](\n context_settings=CONTEXT_SETTINGS,\n epilog=EPILOG,\n)\ndef init():\n \"\"\"Sets up a project in current working directory with default settings.\n\n It copies files from templates directory and pastes them in the current working dir.\n\n The new project is set up with default settings.\n \"\"\"\n cfg = Path(\"manim.cfg\")\n if cfg.exists():\n raise FileExistsError(f\"\\t{cfg} exists\\n\")\n else:\n copy_template_files()\n", "path": "manim/cli/init/commands.py"}]} | 982 | 193 |
gh_patches_debug_10626 | rasdani/github-patches | git_diff | spotify__luigi-3216 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
using default_scheduler_url as a mounting point (not root `http://address/mount`) behind proxy not working
<!---
We use GitHub issues mainly for tracking bugs and feature requests.
Questions for how to use luigi can be sent to the mailing list.
Currently, there are no strict procedures or guidelines for submitting issues.
In short, please just use common sense.
Common sense includes this at bare-minimum:
* search for similar issues posted before creating a new issue.
* Use markdown to format all code/logs. Issues which are hard to read
when rendered on GitHub might be closed with a friendly reminder of this.
* If applicable, reading relevant parts of the documentation.
Also, add steps to reproduce the bug, if applicable. Sample code would be nice too :)
For more information on how to submit valuable contributions,
see https://opensource.guide/how-to-contribute/#how-to-submit-a-contribution
-->
I need to mount `luigi` behind a nginx, for example luigi at `http://address/mount`.
For that I configure:
```ini
[core]
default_scheduler_url=http://address/mount
....
```
GUI is ok and works but, CLI not due to url resolution. it happens there https://github.com/spotify/luigi/blob/c13566418c92de3e4d8d33ead4e7c936511afae1/luigi/rpc.py#L54
To understand what happened:
```python
parsed=urlparse('http://address/mount')
url='/api/add_task'
urljoin(parsed.geturl(), url)
# ==> give 'http://address/api/add_task'
# expected http://address/mount/api/add_task
```
What I must do for working - slash at the end of mount point, no slash for url -:
```python
parsed=urlparse('http://address/mount/')
url='api/add_task'
urljoin(parsed.geturl(), url)
# ==> http://address/mount/api/add_task
```
</issue>
<code>
[start of luigi/rpc.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2012-2015 Spotify AB
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 """
18 Implementation of the REST interface between the workers and the server.
19 rpc.py implements the client side of it, server.py implements the server side.
20 See :doc:`/central_scheduler` for more info.
21 """
22 import abc
23 import os
24 import json
25 import logging
26 import socket
27 import base64
28
29 from urllib.parse import urljoin, urlencode, urlparse
30 from urllib.request import urlopen, Request
31 from urllib.error import URLError
32
33 from tenacity import Retrying, wait_fixed, stop_after_attempt
34 from luigi import configuration
35 from luigi.scheduler import RPC_METHODS
36
37 HAS_UNIX_SOCKET = True
38 HAS_REQUESTS = True
39
40
41 try:
42 import requests_unixsocket as requests
43 except ImportError:
44 HAS_UNIX_SOCKET = False
45 try:
46 import requests
47 except ImportError:
48 HAS_REQUESTS = False
49
50
51 logger = logging.getLogger('luigi-interface') # TODO: 'interface'?
52
53
54 def _urljoin(base, url):
55 """
56 Join relative URLs to base URLs like urllib.parse.urljoin but support
57 arbitrary URIs (esp. 'http+unix://').
58 """
59 parsed = urlparse(base)
60 scheme = parsed.scheme
61 return urlparse(
62 urljoin(parsed._replace(scheme='http').geturl(), url)
63 )._replace(scheme=scheme).geturl()
64
65
66 class RPCError(Exception):
67
68 def __init__(self, message, sub_exception=None):
69 super(RPCError, self).__init__(message)
70 self.sub_exception = sub_exception
71
72
73 class _FetcherInterface(metaclass=abc.ABCMeta):
74 @abc.abstractmethod
75 def fetch(self, full_url, body, timeout):
76 pass
77
78 @abc.abstractmethod
79 def close(self):
80 pass
81
82
83 class URLLibFetcher(_FetcherInterface):
84 raises = (URLError, socket.timeout)
85
86 def _create_request(self, full_url, body=None):
87 # when full_url contains basic auth info, extract it and set the Authorization header
88 url = urlparse(full_url)
89 if url.username:
90 # base64 encoding of username:password
91 auth = base64.b64encode('{}:{}'.format(url.username, url.password or '').encode('utf-8'))
92 auth = auth.decode('utf-8')
93 # update full_url and create a request object with the auth header set
94 full_url = url._replace(netloc=url.netloc.split('@', 1)[-1]).geturl()
95 req = Request(full_url)
96 req.add_header('Authorization', 'Basic {}'.format(auth))
97 else:
98 req = Request(full_url)
99
100 # add the request body
101 if body:
102 req.data = urlencode(body).encode('utf-8')
103
104 return req
105
106 def fetch(self, full_url, body, timeout):
107 req = self._create_request(full_url, body=body)
108 return urlopen(req, timeout=timeout).read().decode('utf-8')
109
110 def close(self):
111 pass
112
113
114 class RequestsFetcher(_FetcherInterface):
115 def __init__(self):
116 from requests import exceptions as requests_exceptions
117 self.raises = requests_exceptions.RequestException
118 self.session = requests.Session()
119 self.process_id = os.getpid()
120
121 def check_pid(self):
122 # if the process id change changed from when the session was created
123 # a new session needs to be setup since requests isn't multiprocessing safe.
124 if os.getpid() != self.process_id:
125 self.session = requests.Session()
126 self.process_id = os.getpid()
127
128 def fetch(self, full_url, body, timeout):
129 self.check_pid()
130 resp = self.session.post(full_url, data=body, timeout=timeout)
131 resp.raise_for_status()
132 return resp.text
133
134 def close(self):
135 self.session.close()
136
137
138 class RemoteScheduler:
139 """
140 Scheduler proxy object. Talks to a RemoteSchedulerResponder.
141 """
142
143 def __init__(self, url='http://localhost:8082/', connect_timeout=None):
144 assert not url.startswith('http+unix://') or HAS_UNIX_SOCKET, (
145 'You need to install requests-unixsocket for Unix socket support.'
146 )
147
148 self._url = url.rstrip('/')
149 config = configuration.get_config()
150
151 if connect_timeout is None:
152 connect_timeout = config.getfloat('core', 'rpc-connect-timeout', 10.0)
153 self._connect_timeout = connect_timeout
154
155 self._rpc_retry_attempts = config.getint('core', 'rpc-retry-attempts', 3)
156 self._rpc_retry_wait = config.getint('core', 'rpc-retry-wait', 30)
157 self._rpc_log_retries = config.getboolean('core', 'rpc-log-retries', True)
158
159 if HAS_REQUESTS:
160 self._fetcher = RequestsFetcher()
161 else:
162 self._fetcher = URLLibFetcher()
163
164 def close(self):
165 self._fetcher.close()
166
167 def _get_retryer(self):
168 def retry_logging(retry_state):
169 if self._rpc_log_retries:
170 logger.warning("Failed connecting to remote scheduler %r", self._url, exc_info=True)
171 logger.info("Retrying attempt %r of %r (max)" % (retry_state.attempt_number + 1, self._rpc_retry_attempts))
172 logger.info("Wait for %d seconds" % self._rpc_retry_wait)
173
174 return Retrying(wait=wait_fixed(self._rpc_retry_wait),
175 stop=stop_after_attempt(self._rpc_retry_attempts),
176 reraise=True,
177 after=retry_logging)
178
179 def _fetch(self, url_suffix, body):
180 full_url = _urljoin(self._url, url_suffix)
181 scheduler_retry = self._get_retryer()
182
183 try:
184 response = scheduler_retry(self._fetcher.fetch, full_url, body, self._connect_timeout)
185 except self._fetcher.raises as e:
186 raise RPCError(
187 "Errors (%d attempts) when connecting to remote scheduler %r" %
188 (self._rpc_retry_attempts, self._url),
189 e
190 )
191 return response
192
193 def _request(self, url, data, attempts=3, allow_null=True):
194 body = {'data': json.dumps(data)}
195
196 for _ in range(attempts):
197 page = self._fetch(url, body)
198 response = json.loads(page)["response"]
199 if allow_null or response is not None:
200 return response
201 raise RPCError("Received null response from remote scheduler %r" % self._url)
202
203
204 for method_name, method in RPC_METHODS.items():
205 setattr(RemoteScheduler, method_name, method)
206
[end of luigi/rpc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/luigi/rpc.py b/luigi/rpc.py
--- a/luigi/rpc.py
+++ b/luigi/rpc.py
@@ -55,11 +55,12 @@
"""
Join relative URLs to base URLs like urllib.parse.urljoin but support
arbitrary URIs (esp. 'http+unix://').
+ base part is fixed or mounted point, every url contains full base part.
"""
parsed = urlparse(base)
scheme = parsed.scheme
return urlparse(
- urljoin(parsed._replace(scheme='http').geturl(), url)
+ urljoin(parsed._replace(scheme='http').geturl(), parsed.path + (url if url[0] == '/' else '/' + url))
)._replace(scheme=scheme).geturl()
| {"golden_diff": "diff --git a/luigi/rpc.py b/luigi/rpc.py\n--- a/luigi/rpc.py\n+++ b/luigi/rpc.py\n@@ -55,11 +55,12 @@\n \"\"\"\n Join relative URLs to base URLs like urllib.parse.urljoin but support\n arbitrary URIs (esp. 'http+unix://').\n+ base part is fixed or mounted point, every url contains full base part.\n \"\"\"\n parsed = urlparse(base)\n scheme = parsed.scheme\n return urlparse(\n- urljoin(parsed._replace(scheme='http').geturl(), url)\n+ urljoin(parsed._replace(scheme='http').geturl(), parsed.path + (url if url[0] == '/' else '/' + url))\n )._replace(scheme=scheme).geturl()\n", "issue": "using default_scheduler_url as a mounting point (not root `http://address/mount`) behind proxy not working\n<!---\r\nWe use GitHub issues mainly for tracking bugs and feature requests.\r\nQuestions for how to use luigi can be sent to the mailing list.\r\n\r\nCurrently, there are no strict procedures or guidelines for submitting issues.\r\nIn short, please just use common sense.\r\n\r\nCommon sense includes this at bare-minimum:\r\n\r\n * search for similar issues posted before creating a new issue.\r\n * Use markdown to format all code/logs. Issues which are hard to read\r\n when rendered on GitHub might be closed with a friendly reminder of this.\r\n * If applicable, reading relevant parts of the documentation.\r\n\r\nAlso, add steps to reproduce the bug, if applicable. Sample code would be nice too :)\r\n\r\nFor more information on how to submit valuable contributions,\r\nsee https://opensource.guide/how-to-contribute/#how-to-submit-a-contribution\r\n-->\r\nI need to mount `luigi` behind a nginx, for example luigi at `http://address/mount`.\r\nFor that I configure:\r\n```ini\r\n[core]\r\ndefault_scheduler_url=http://address/mount\r\n....\r\n```\r\n\r\nGUI is ok and works but, CLI not due to url resolution. it happens there https://github.com/spotify/luigi/blob/c13566418c92de3e4d8d33ead4e7c936511afae1/luigi/rpc.py#L54\r\n\r\nTo understand what happened:\r\n```python\r\nparsed=urlparse('http://address/mount')\r\nurl='/api/add_task'\r\nurljoin(parsed.geturl(), url)\r\n# ==> give 'http://address/api/add_task'\r\n# expected http://address/mount/api/add_task\r\n```\r\nWhat I must do for working - slash at the end of mount point, no slash for url -:\r\n```python\r\nparsed=urlparse('http://address/mount/')\r\nurl='api/add_task'\r\nurljoin(parsed.geturl(), url)\r\n# ==> http://address/mount/api/add_task\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nImplementation of the REST interface between the workers and the server.\nrpc.py implements the client side of it, server.py implements the server side.\nSee :doc:`/central_scheduler` for more info.\n\"\"\"\nimport abc\nimport os\nimport json\nimport logging\nimport socket\nimport base64\n\nfrom urllib.parse import urljoin, urlencode, urlparse\nfrom urllib.request import urlopen, Request\nfrom urllib.error import URLError\n\nfrom tenacity import Retrying, wait_fixed, stop_after_attempt\nfrom luigi import configuration\nfrom luigi.scheduler import RPC_METHODS\n\nHAS_UNIX_SOCKET = True\nHAS_REQUESTS = True\n\n\ntry:\n import requests_unixsocket as requests\nexcept ImportError:\n HAS_UNIX_SOCKET = False\n try:\n import requests\n except ImportError:\n HAS_REQUESTS = False\n\n\nlogger = logging.getLogger('luigi-interface') # TODO: 'interface'?\n\n\ndef _urljoin(base, url):\n \"\"\"\n Join relative URLs to base URLs like urllib.parse.urljoin but support\n arbitrary URIs (esp. 'http+unix://').\n \"\"\"\n parsed = urlparse(base)\n scheme = parsed.scheme\n return urlparse(\n urljoin(parsed._replace(scheme='http').geturl(), url)\n )._replace(scheme=scheme).geturl()\n\n\nclass RPCError(Exception):\n\n def __init__(self, message, sub_exception=None):\n super(RPCError, self).__init__(message)\n self.sub_exception = sub_exception\n\n\nclass _FetcherInterface(metaclass=abc.ABCMeta):\n @abc.abstractmethod\n def fetch(self, full_url, body, timeout):\n pass\n\n @abc.abstractmethod\n def close(self):\n pass\n\n\nclass URLLibFetcher(_FetcherInterface):\n raises = (URLError, socket.timeout)\n\n def _create_request(self, full_url, body=None):\n # when full_url contains basic auth info, extract it and set the Authorization header\n url = urlparse(full_url)\n if url.username:\n # base64 encoding of username:password\n auth = base64.b64encode('{}:{}'.format(url.username, url.password or '').encode('utf-8'))\n auth = auth.decode('utf-8')\n # update full_url and create a request object with the auth header set\n full_url = url._replace(netloc=url.netloc.split('@', 1)[-1]).geturl()\n req = Request(full_url)\n req.add_header('Authorization', 'Basic {}'.format(auth))\n else:\n req = Request(full_url)\n\n # add the request body\n if body:\n req.data = urlencode(body).encode('utf-8')\n\n return req\n\n def fetch(self, full_url, body, timeout):\n req = self._create_request(full_url, body=body)\n return urlopen(req, timeout=timeout).read().decode('utf-8')\n\n def close(self):\n pass\n\n\nclass RequestsFetcher(_FetcherInterface):\n def __init__(self):\n from requests import exceptions as requests_exceptions\n self.raises = requests_exceptions.RequestException\n self.session = requests.Session()\n self.process_id = os.getpid()\n\n def check_pid(self):\n # if the process id change changed from when the session was created\n # a new session needs to be setup since requests isn't multiprocessing safe.\n if os.getpid() != self.process_id:\n self.session = requests.Session()\n self.process_id = os.getpid()\n\n def fetch(self, full_url, body, timeout):\n self.check_pid()\n resp = self.session.post(full_url, data=body, timeout=timeout)\n resp.raise_for_status()\n return resp.text\n\n def close(self):\n self.session.close()\n\n\nclass RemoteScheduler:\n \"\"\"\n Scheduler proxy object. Talks to a RemoteSchedulerResponder.\n \"\"\"\n\n def __init__(self, url='http://localhost:8082/', connect_timeout=None):\n assert not url.startswith('http+unix://') or HAS_UNIX_SOCKET, (\n 'You need to install requests-unixsocket for Unix socket support.'\n )\n\n self._url = url.rstrip('/')\n config = configuration.get_config()\n\n if connect_timeout is None:\n connect_timeout = config.getfloat('core', 'rpc-connect-timeout', 10.0)\n self._connect_timeout = connect_timeout\n\n self._rpc_retry_attempts = config.getint('core', 'rpc-retry-attempts', 3)\n self._rpc_retry_wait = config.getint('core', 'rpc-retry-wait', 30)\n self._rpc_log_retries = config.getboolean('core', 'rpc-log-retries', True)\n\n if HAS_REQUESTS:\n self._fetcher = RequestsFetcher()\n else:\n self._fetcher = URLLibFetcher()\n\n def close(self):\n self._fetcher.close()\n\n def _get_retryer(self):\n def retry_logging(retry_state):\n if self._rpc_log_retries:\n logger.warning(\"Failed connecting to remote scheduler %r\", self._url, exc_info=True)\n logger.info(\"Retrying attempt %r of %r (max)\" % (retry_state.attempt_number + 1, self._rpc_retry_attempts))\n logger.info(\"Wait for %d seconds\" % self._rpc_retry_wait)\n\n return Retrying(wait=wait_fixed(self._rpc_retry_wait),\n stop=stop_after_attempt(self._rpc_retry_attempts),\n reraise=True,\n after=retry_logging)\n\n def _fetch(self, url_suffix, body):\n full_url = _urljoin(self._url, url_suffix)\n scheduler_retry = self._get_retryer()\n\n try:\n response = scheduler_retry(self._fetcher.fetch, full_url, body, self._connect_timeout)\n except self._fetcher.raises as e:\n raise RPCError(\n \"Errors (%d attempts) when connecting to remote scheduler %r\" %\n (self._rpc_retry_attempts, self._url),\n e\n )\n return response\n\n def _request(self, url, data, attempts=3, allow_null=True):\n body = {'data': json.dumps(data)}\n\n for _ in range(attempts):\n page = self._fetch(url, body)\n response = json.loads(page)[\"response\"]\n if allow_null or response is not None:\n return response\n raise RPCError(\"Received null response from remote scheduler %r\" % self._url)\n\n\nfor method_name, method in RPC_METHODS.items():\n setattr(RemoteScheduler, method_name, method)\n", "path": "luigi/rpc.py"}]} | 3,033 | 176 |
gh_patches_debug_24682 | rasdani/github-patches | git_diff | python-poetry__poetry-7975 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
poetry install does not honor `build-system.requires`
<!--
Hi there! Thank you for discovering and submitting an issue.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [ ] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
<!--
Once those are done, if you're able to fill in the following list with your information,
it'd be very helpful to whoever handles the issue.
-->
- **OS version and name**: Ubuntu 20.04.4
- **Poetry version**: 1.2.0b2
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**:
<details>
<summary>pyproject.toml</summary>
```
[tool.poetry]
name = "meow"
version = "0.1.0"
description = ""
authors = [""]
build = "build.py"
[tool.poetry.dependencies]
python = "^3.9"
SoundFile = "0.10.*"
crcmod = "^1.7"
librosa = ">=0.8.0"
numba = "^0.56.0"
protobuf = "3.14.0"
tensorboard = "^2.10.0"
torchaudio = {version = "=0.12.1+cu113", source="torch"}
torch = {version = "=1.12.1+cu113", source="torch"}
[[tool.poetry.source]]
name = "torch"
url = "https://download.pytorch.org/whl/cu113"
secondary = true
[tool.poetry.dev-dependencies]
[build-system]
requires = ["poetry-core>=1.0.0", "setuptools>=40.6.0", "wheel", "numpy", "pybind11"]
build-backend = "poetry.core.masonry.api"
```
</details>
## Issue
<!-- Now feel free to write your issue, but please be descriptive! Thanks again 🙌 ❤️ -->
Running `poetry build` with the above `pyproject.toml` does the right thing:
```
Preparing build environment with build-system requirements poetry-core>=1.0.0, setuptools>=40.6.0, wheel, numpy, pybind11
[...snip...]
```
Running `poetry install` fails because it attempts to build the project without installing `build-system.requires` packages.
I would have expected the install command to create an isolated environment, install the build requirements and perform the build in that isolated environment, and then install the produced artifact in my current environment.
</issue>
<code>
[start of src/poetry/masonry/builders/editable.py]
1 from __future__ import annotations
2
3 import csv
4 import hashlib
5 import json
6 import os
7
8 from base64 import urlsafe_b64encode
9 from pathlib import Path
10 from typing import TYPE_CHECKING
11
12 from poetry.core.constraints.version import Version
13 from poetry.core.masonry.builders.builder import Builder
14 from poetry.core.masonry.builders.sdist import SdistBuilder
15 from poetry.core.masonry.utils.package_include import PackageInclude
16
17 from poetry.utils._compat import WINDOWS
18 from poetry.utils._compat import decode
19 from poetry.utils.env import build_environment
20 from poetry.utils.helpers import is_dir_writable
21 from poetry.utils.pip import pip_install
22
23
24 if TYPE_CHECKING:
25 from cleo.io.io import IO
26
27 from poetry.poetry import Poetry
28 from poetry.utils.env import Env
29
30 SCRIPT_TEMPLATE = """\
31 #!{python}
32 import sys
33 from {module} import {callable_holder}
34
35 if __name__ == '__main__':
36 sys.exit({callable_}())
37 """
38
39 WINDOWS_CMD_TEMPLATE = """\
40 @echo off\r\n"{python}" "%~dp0\\{script}" %*\r\n
41 """
42
43
44 class EditableBuilder(Builder):
45 def __init__(self, poetry: Poetry, env: Env, io: IO) -> None:
46 self._poetry: Poetry
47 super().__init__(poetry)
48
49 self._env = env
50 self._io = io
51
52 def build(self, target_dir: Path | None = None) -> Path:
53 self._debug(
54 f" - Building package <c1>{self._package.name}</c1> in"
55 " <info>editable</info> mode"
56 )
57
58 if self._package.build_script:
59 if self._package.build_should_generate_setup():
60 self._debug(
61 " - <warning>Falling back on using a <b>setup.py</b></warning>"
62 )
63 self._setup_build()
64 path: Path = self._path
65 return path
66
67 self._run_build_script(self._package.build_script)
68
69 for removed in self._env.site_packages.remove_distribution_files(
70 distribution_name=self._package.name
71 ):
72 self._debug(
73 f" - Removed <c2>{removed.name}</c2> directory from"
74 f" <b>{removed.parent}</b>"
75 )
76
77 added_files = []
78 added_files += self._add_pth()
79 added_files += self._add_scripts()
80 self._add_dist_info(added_files)
81
82 path = self._path
83 return path
84
85 def _run_build_script(self, build_script: str) -> None:
86 with build_environment(poetry=self._poetry, env=self._env, io=self._io) as env:
87 self._debug(f" - Executing build script: <b>{build_script}</b>")
88 env.run("python", str(self._path.joinpath(build_script)), call=True)
89
90 def _setup_build(self) -> None:
91 builder = SdistBuilder(self._poetry)
92 setup = self._path / "setup.py"
93 has_setup = setup.exists()
94
95 if has_setup:
96 self._io.write_error_line(
97 "<warning>A setup.py file already exists. Using it.</warning>"
98 )
99 else:
100 with setup.open("w", encoding="utf-8") as f:
101 f.write(decode(builder.build_setup()))
102
103 try:
104 if self._env.pip_version < Version.from_parts(19, 0):
105 pip_install(self._path, self._env, upgrade=True, editable=True)
106 else:
107 # Temporarily rename pyproject.toml
108 renamed_pyproject = self._poetry.file.path.with_suffix(".tmp")
109 self._poetry.file.path.rename(renamed_pyproject)
110 try:
111 pip_install(self._path, self._env, upgrade=True, editable=True)
112 finally:
113 renamed_pyproject.rename(self._poetry.file.path)
114 finally:
115 if not has_setup:
116 os.remove(setup)
117
118 def _add_pth(self) -> list[Path]:
119 paths = {
120 include.base.resolve().as_posix()
121 for include in self._module.includes
122 if isinstance(include, PackageInclude)
123 and (include.is_module() or include.is_package())
124 }
125
126 content = "".join(decode(path + os.linesep) for path in paths)
127 pth_file = Path(self._module.name).with_suffix(".pth")
128
129 # remove any pre-existing pth files for this package
130 for file in self._env.site_packages.find(path=pth_file, writable_only=True):
131 self._debug(
132 f" - Removing existing <c2>{file.name}</c2> from <b>{file.parent}</b>"
133 f" for {self._poetry.file.path.parent}"
134 )
135 file.unlink(missing_ok=True)
136
137 try:
138 pth_file = self._env.site_packages.write_text(
139 pth_file, content, encoding="utf-8"
140 )
141 self._debug(
142 f" - Adding <c2>{pth_file.name}</c2> to <b>{pth_file.parent}</b> for"
143 f" {self._poetry.file.path.parent}"
144 )
145 return [pth_file]
146 except OSError:
147 # TODO: Replace with PermissionError
148 self._io.write_error_line(
149 f" - Failed to create <c2>{pth_file.name}</c2> for"
150 f" {self._poetry.file.path.parent}"
151 )
152 return []
153
154 def _add_scripts(self) -> list[Path]:
155 added = []
156 entry_points = self.convert_entry_points()
157
158 for scripts_path in self._env.script_dirs:
159 if is_dir_writable(path=scripts_path, create=True):
160 break
161 else:
162 self._io.write_error_line(
163 " - Failed to find a suitable script installation directory for"
164 f" {self._poetry.file.path.parent}"
165 )
166 return []
167
168 scripts = entry_points.get("console_scripts", [])
169 for script in scripts:
170 name, script = script.split(" = ")
171 module, callable_ = script.split(":")
172 callable_holder = callable_.split(".", 1)[0]
173
174 script_file = scripts_path.joinpath(name)
175 self._debug(
176 f" - Adding the <c2>{name}</c2> script to <b>{scripts_path}</b>"
177 )
178 with script_file.open("w", encoding="utf-8") as f:
179 f.write(
180 decode(
181 SCRIPT_TEMPLATE.format(
182 python=self._env.python,
183 module=module,
184 callable_holder=callable_holder,
185 callable_=callable_,
186 )
187 )
188 )
189
190 script_file.chmod(0o755)
191
192 added.append(script_file)
193
194 if WINDOWS:
195 cmd_script = script_file.with_suffix(".cmd")
196 cmd = WINDOWS_CMD_TEMPLATE.format(python=self._env.python, script=name)
197 self._debug(
198 f" - Adding the <c2>{cmd_script.name}</c2> script wrapper to"
199 f" <b>{scripts_path}</b>"
200 )
201
202 with cmd_script.open("w", encoding="utf-8") as f:
203 f.write(decode(cmd))
204
205 added.append(cmd_script)
206
207 return added
208
209 def _add_dist_info(self, added_files: list[Path]) -> None:
210 from poetry.core.masonry.builders.wheel import WheelBuilder
211
212 added_files = added_files[:]
213
214 builder = WheelBuilder(self._poetry)
215 dist_info = self._env.site_packages.mkdir(Path(builder.dist_info))
216
217 self._debug(
218 f" - Adding the <c2>{dist_info.name}</c2> directory to"
219 f" <b>{dist_info.parent}</b>"
220 )
221
222 with dist_info.joinpath("METADATA").open("w", encoding="utf-8") as f:
223 builder._write_metadata_file(f)
224
225 added_files.append(dist_info.joinpath("METADATA"))
226
227 with dist_info.joinpath("INSTALLER").open("w", encoding="utf-8") as f:
228 f.write("poetry")
229
230 added_files.append(dist_info.joinpath("INSTALLER"))
231
232 if self.convert_entry_points():
233 with dist_info.joinpath("entry_points.txt").open(
234 "w", encoding="utf-8"
235 ) as f:
236 builder._write_entry_points(f)
237
238 added_files.append(dist_info.joinpath("entry_points.txt"))
239
240 # write PEP 610 metadata
241 direct_url_json = dist_info.joinpath("direct_url.json")
242 direct_url_json.write_text(
243 json.dumps(
244 {
245 "dir_info": {"editable": True},
246 "url": self._poetry.file.path.parent.absolute().as_uri(),
247 }
248 )
249 )
250 added_files.append(direct_url_json)
251
252 record = dist_info.joinpath("RECORD")
253 with record.open("w", encoding="utf-8", newline="") as f:
254 csv_writer = csv.writer(f)
255 for path in added_files:
256 hash = self._get_file_hash(path)
257 size = path.stat().st_size
258 csv_writer.writerow((path, f"sha256={hash}", size))
259
260 # RECORD itself is recorded with no hash or size
261 csv_writer.writerow((record, "", ""))
262
263 def _get_file_hash(self, filepath: Path) -> str:
264 hashsum = hashlib.sha256()
265 with filepath.open("rb") as src:
266 while True:
267 buf = src.read(1024 * 8)
268 if not buf:
269 break
270 hashsum.update(buf)
271
272 src.seek(0)
273
274 return urlsafe_b64encode(hashsum.digest()).decode("ascii").rstrip("=")
275
276 def _debug(self, msg: str) -> None:
277 if self._io.is_debug():
278 self._io.write_line(msg)
279
[end of src/poetry/masonry/builders/editable.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/poetry/masonry/builders/editable.py b/src/poetry/masonry/builders/editable.py
--- a/src/poetry/masonry/builders/editable.py
+++ b/src/poetry/masonry/builders/editable.py
@@ -9,7 +9,6 @@
from pathlib import Path
from typing import TYPE_CHECKING
-from poetry.core.constraints.version import Version
from poetry.core.masonry.builders.builder import Builder
from poetry.core.masonry.builders.sdist import SdistBuilder
from poetry.core.masonry.utils.package_include import PackageInclude
@@ -101,16 +100,7 @@
f.write(decode(builder.build_setup()))
try:
- if self._env.pip_version < Version.from_parts(19, 0):
- pip_install(self._path, self._env, upgrade=True, editable=True)
- else:
- # Temporarily rename pyproject.toml
- renamed_pyproject = self._poetry.file.path.with_suffix(".tmp")
- self._poetry.file.path.rename(renamed_pyproject)
- try:
- pip_install(self._path, self._env, upgrade=True, editable=True)
- finally:
- renamed_pyproject.rename(self._poetry.file.path)
+ pip_install(self._path, self._env, upgrade=True, editable=True)
finally:
if not has_setup:
os.remove(setup)
| {"golden_diff": "diff --git a/src/poetry/masonry/builders/editable.py b/src/poetry/masonry/builders/editable.py\n--- a/src/poetry/masonry/builders/editable.py\n+++ b/src/poetry/masonry/builders/editable.py\n@@ -9,7 +9,6 @@\n from pathlib import Path\n from typing import TYPE_CHECKING\n \n-from poetry.core.constraints.version import Version\n from poetry.core.masonry.builders.builder import Builder\n from poetry.core.masonry.builders.sdist import SdistBuilder\n from poetry.core.masonry.utils.package_include import PackageInclude\n@@ -101,16 +100,7 @@\n f.write(decode(builder.build_setup()))\n \n try:\n- if self._env.pip_version < Version.from_parts(19, 0):\n- pip_install(self._path, self._env, upgrade=True, editable=True)\n- else:\n- # Temporarily rename pyproject.toml\n- renamed_pyproject = self._poetry.file.path.with_suffix(\".tmp\")\n- self._poetry.file.path.rename(renamed_pyproject)\n- try:\n- pip_install(self._path, self._env, upgrade=True, editable=True)\n- finally:\n- renamed_pyproject.rename(self._poetry.file.path)\n+ pip_install(self._path, self._env, upgrade=True, editable=True)\n finally:\n if not has_setup:\n os.remove(setup)\n", "issue": "poetry install does not honor `build-system.requires`\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [ ] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **OS version and name**: Ubuntu 20.04.4\r\n- **Poetry version**: 1.2.0b2\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**:\r\n<details>\r\n<summary>pyproject.toml</summary>\r\n\r\n```\r\n[tool.poetry]\r\nname = \"meow\"\r\nversion = \"0.1.0\"\r\ndescription = \"\"\r\nauthors = [\"\"]\r\nbuild = \"build.py\"\r\n\r\n[tool.poetry.dependencies]\r\npython = \"^3.9\"\r\nSoundFile = \"0.10.*\"\r\ncrcmod = \"^1.7\"\r\nlibrosa = \">=0.8.0\"\r\nnumba = \"^0.56.0\"\r\nprotobuf = \"3.14.0\"\r\ntensorboard = \"^2.10.0\"\r\ntorchaudio = {version = \"=0.12.1+cu113\", source=\"torch\"}\r\ntorch = {version = \"=1.12.1+cu113\", source=\"torch\"}\r\n\r\n[[tool.poetry.source]]\r\nname = \"torch\"\r\nurl = \"https://download.pytorch.org/whl/cu113\"\r\nsecondary = true\r\n\r\n[tool.poetry.dev-dependencies]\r\n\r\n[build-system]\r\nrequires = [\"poetry-core>=1.0.0\", \"setuptools>=40.6.0\", \"wheel\", \"numpy\", \"pybind11\"]\r\nbuild-backend = \"poetry.core.masonry.api\"\r\n```\r\n\r\n</details>\r\n\r\n## Issue\r\n<!-- Now feel free to write your issue, but please be descriptive! Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\nRunning `poetry build` with the above `pyproject.toml` does the right thing:\r\n```\r\nPreparing build environment with build-system requirements poetry-core>=1.0.0, setuptools>=40.6.0, wheel, numpy, pybind11\r\n[...snip...]\r\n```\r\n\r\nRunning `poetry install` fails because it attempts to build the project without installing `build-system.requires` packages.\r\n\r\nI would have expected the install command to create an isolated environment, install the build requirements and perform the build in that isolated environment, and then install the produced artifact in my current environment.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport csv\nimport hashlib\nimport json\nimport os\n\nfrom base64 import urlsafe_b64encode\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nfrom poetry.core.constraints.version import Version\nfrom poetry.core.masonry.builders.builder import Builder\nfrom poetry.core.masonry.builders.sdist import SdistBuilder\nfrom poetry.core.masonry.utils.package_include import PackageInclude\n\nfrom poetry.utils._compat import WINDOWS\nfrom poetry.utils._compat import decode\nfrom poetry.utils.env import build_environment\nfrom poetry.utils.helpers import is_dir_writable\nfrom poetry.utils.pip import pip_install\n\n\nif TYPE_CHECKING:\n from cleo.io.io import IO\n\n from poetry.poetry import Poetry\n from poetry.utils.env import Env\n\nSCRIPT_TEMPLATE = \"\"\"\\\n#!{python}\nimport sys\nfrom {module} import {callable_holder}\n\nif __name__ == '__main__':\n sys.exit({callable_}())\n\"\"\"\n\nWINDOWS_CMD_TEMPLATE = \"\"\"\\\n@echo off\\r\\n\"{python}\" \"%~dp0\\\\{script}\" %*\\r\\n\n\"\"\"\n\n\nclass EditableBuilder(Builder):\n def __init__(self, poetry: Poetry, env: Env, io: IO) -> None:\n self._poetry: Poetry\n super().__init__(poetry)\n\n self._env = env\n self._io = io\n\n def build(self, target_dir: Path | None = None) -> Path:\n self._debug(\n f\" - Building package <c1>{self._package.name}</c1> in\"\n \" <info>editable</info> mode\"\n )\n\n if self._package.build_script:\n if self._package.build_should_generate_setup():\n self._debug(\n \" - <warning>Falling back on using a <b>setup.py</b></warning>\"\n )\n self._setup_build()\n path: Path = self._path\n return path\n\n self._run_build_script(self._package.build_script)\n\n for removed in self._env.site_packages.remove_distribution_files(\n distribution_name=self._package.name\n ):\n self._debug(\n f\" - Removed <c2>{removed.name}</c2> directory from\"\n f\" <b>{removed.parent}</b>\"\n )\n\n added_files = []\n added_files += self._add_pth()\n added_files += self._add_scripts()\n self._add_dist_info(added_files)\n\n path = self._path\n return path\n\n def _run_build_script(self, build_script: str) -> None:\n with build_environment(poetry=self._poetry, env=self._env, io=self._io) as env:\n self._debug(f\" - Executing build script: <b>{build_script}</b>\")\n env.run(\"python\", str(self._path.joinpath(build_script)), call=True)\n\n def _setup_build(self) -> None:\n builder = SdistBuilder(self._poetry)\n setup = self._path / \"setup.py\"\n has_setup = setup.exists()\n\n if has_setup:\n self._io.write_error_line(\n \"<warning>A setup.py file already exists. Using it.</warning>\"\n )\n else:\n with setup.open(\"w\", encoding=\"utf-8\") as f:\n f.write(decode(builder.build_setup()))\n\n try:\n if self._env.pip_version < Version.from_parts(19, 0):\n pip_install(self._path, self._env, upgrade=True, editable=True)\n else:\n # Temporarily rename pyproject.toml\n renamed_pyproject = self._poetry.file.path.with_suffix(\".tmp\")\n self._poetry.file.path.rename(renamed_pyproject)\n try:\n pip_install(self._path, self._env, upgrade=True, editable=True)\n finally:\n renamed_pyproject.rename(self._poetry.file.path)\n finally:\n if not has_setup:\n os.remove(setup)\n\n def _add_pth(self) -> list[Path]:\n paths = {\n include.base.resolve().as_posix()\n for include in self._module.includes\n if isinstance(include, PackageInclude)\n and (include.is_module() or include.is_package())\n }\n\n content = \"\".join(decode(path + os.linesep) for path in paths)\n pth_file = Path(self._module.name).with_suffix(\".pth\")\n\n # remove any pre-existing pth files for this package\n for file in self._env.site_packages.find(path=pth_file, writable_only=True):\n self._debug(\n f\" - Removing existing <c2>{file.name}</c2> from <b>{file.parent}</b>\"\n f\" for {self._poetry.file.path.parent}\"\n )\n file.unlink(missing_ok=True)\n\n try:\n pth_file = self._env.site_packages.write_text(\n pth_file, content, encoding=\"utf-8\"\n )\n self._debug(\n f\" - Adding <c2>{pth_file.name}</c2> to <b>{pth_file.parent}</b> for\"\n f\" {self._poetry.file.path.parent}\"\n )\n return [pth_file]\n except OSError:\n # TODO: Replace with PermissionError\n self._io.write_error_line(\n f\" - Failed to create <c2>{pth_file.name}</c2> for\"\n f\" {self._poetry.file.path.parent}\"\n )\n return []\n\n def _add_scripts(self) -> list[Path]:\n added = []\n entry_points = self.convert_entry_points()\n\n for scripts_path in self._env.script_dirs:\n if is_dir_writable(path=scripts_path, create=True):\n break\n else:\n self._io.write_error_line(\n \" - Failed to find a suitable script installation directory for\"\n f\" {self._poetry.file.path.parent}\"\n )\n return []\n\n scripts = entry_points.get(\"console_scripts\", [])\n for script in scripts:\n name, script = script.split(\" = \")\n module, callable_ = script.split(\":\")\n callable_holder = callable_.split(\".\", 1)[0]\n\n script_file = scripts_path.joinpath(name)\n self._debug(\n f\" - Adding the <c2>{name}</c2> script to <b>{scripts_path}</b>\"\n )\n with script_file.open(\"w\", encoding=\"utf-8\") as f:\n f.write(\n decode(\n SCRIPT_TEMPLATE.format(\n python=self._env.python,\n module=module,\n callable_holder=callable_holder,\n callable_=callable_,\n )\n )\n )\n\n script_file.chmod(0o755)\n\n added.append(script_file)\n\n if WINDOWS:\n cmd_script = script_file.with_suffix(\".cmd\")\n cmd = WINDOWS_CMD_TEMPLATE.format(python=self._env.python, script=name)\n self._debug(\n f\" - Adding the <c2>{cmd_script.name}</c2> script wrapper to\"\n f\" <b>{scripts_path}</b>\"\n )\n\n with cmd_script.open(\"w\", encoding=\"utf-8\") as f:\n f.write(decode(cmd))\n\n added.append(cmd_script)\n\n return added\n\n def _add_dist_info(self, added_files: list[Path]) -> None:\n from poetry.core.masonry.builders.wheel import WheelBuilder\n\n added_files = added_files[:]\n\n builder = WheelBuilder(self._poetry)\n dist_info = self._env.site_packages.mkdir(Path(builder.dist_info))\n\n self._debug(\n f\" - Adding the <c2>{dist_info.name}</c2> directory to\"\n f\" <b>{dist_info.parent}</b>\"\n )\n\n with dist_info.joinpath(\"METADATA\").open(\"w\", encoding=\"utf-8\") as f:\n builder._write_metadata_file(f)\n\n added_files.append(dist_info.joinpath(\"METADATA\"))\n\n with dist_info.joinpath(\"INSTALLER\").open(\"w\", encoding=\"utf-8\") as f:\n f.write(\"poetry\")\n\n added_files.append(dist_info.joinpath(\"INSTALLER\"))\n\n if self.convert_entry_points():\n with dist_info.joinpath(\"entry_points.txt\").open(\n \"w\", encoding=\"utf-8\"\n ) as f:\n builder._write_entry_points(f)\n\n added_files.append(dist_info.joinpath(\"entry_points.txt\"))\n\n # write PEP 610 metadata\n direct_url_json = dist_info.joinpath(\"direct_url.json\")\n direct_url_json.write_text(\n json.dumps(\n {\n \"dir_info\": {\"editable\": True},\n \"url\": self._poetry.file.path.parent.absolute().as_uri(),\n }\n )\n )\n added_files.append(direct_url_json)\n\n record = dist_info.joinpath(\"RECORD\")\n with record.open(\"w\", encoding=\"utf-8\", newline=\"\") as f:\n csv_writer = csv.writer(f)\n for path in added_files:\n hash = self._get_file_hash(path)\n size = path.stat().st_size\n csv_writer.writerow((path, f\"sha256={hash}\", size))\n\n # RECORD itself is recorded with no hash or size\n csv_writer.writerow((record, \"\", \"\"))\n\n def _get_file_hash(self, filepath: Path) -> str:\n hashsum = hashlib.sha256()\n with filepath.open(\"rb\") as src:\n while True:\n buf = src.read(1024 * 8)\n if not buf:\n break\n hashsum.update(buf)\n\n src.seek(0)\n\n return urlsafe_b64encode(hashsum.digest()).decode(\"ascii\").rstrip(\"=\")\n\n def _debug(self, msg: str) -> None:\n if self._io.is_debug():\n self._io.write_line(msg)\n", "path": "src/poetry/masonry/builders/editable.py"}]} | 4,083 | 306 |
gh_patches_debug_27261 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-27396 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Eporner stuck at invoked download
<!--
######################################################################
WARNING!
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
######################################################################
-->
## Checklist
<!--
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
- Read bugs section in FAQ: http://yt-dl.org/reporting
- Finally, put x into all relevant boxes (like this [x])
-->
- [x] I'm reporting a broken site support issue
- [x] I've verified that I'm running youtube-dl version **2020.07.28**
- [x] I've checked that all provided URLs are alive and playable in a browser
- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped
- [x] I've searched the bugtracker for similar bug reports including closed ones
- [x] I've read bugs section in FAQ
## Verbose log
<!--
Provide the complete verbose output of youtube-dl that clearly demonstrates the problem.
Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <your command line>`), copy the WHOLE output and insert it below. It should look similar to this:
[debug] System config: []
[debug] User config: []
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
[debug] youtube-dl version 2020.07.28
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
[debug] Proxy map: {}
<more lines>
-->
```
PASTE VERBOSE LOG HERE
```
ubuntu@ip-172-26-9-219:~$ youtube-dl --verbose https://www.eporner.com/hd-porn/HBgBgaViIbY/Busty-Tiny-Teen-With-Angel-Body-Gets-Fucked/
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: [u'--verbose', u'https://www.eporner.com/hd-porn/HBgBgaViIbY/Busty-Tiny-Teen-With-Angel-Body-Gets-Fucked/']
[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2020.07.28
[debug] Python version 2.7.17 (CPython) - Linux-5.3.0-1032-aws-x86_64-with-Ubuntu-18.04-bionic
[debug] exe versions: none
[debug] Proxy map: {}
[Eporner] Busty-Tiny-Teen-With-Angel-Body-Gets-Fucked: Downloading webpage
[Eporner] Busty-Tiny-Teen-With-Angel-Body-Gets-Fucked: Downloading video JSON
WARNING: unable to extract view count; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
[debug] Default format spec: best/bestvideo+bestaudio
[debug] Invoking downloader on u'https://cache1-ca-cdn.eporner.com/bb6353b496c613f4904fe9760409625d/5f23b8c504f800/2361178-1080p.mp4'
## Description
<!--
Provide an explanation of your issue in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.
If work on your issue requires account credentials please provide them or explain how one can obtain them.
-->
WRITE DESCRIPTION HERE
Can't download any video from eporner it use YouTube extractor not using eporner.py
</issue>
<code>
[start of youtube_dl/extractor/eporner.py]
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..utils import (
8 encode_base_n,
9 ExtractorError,
10 int_or_none,
11 merge_dicts,
12 parse_duration,
13 str_to_int,
14 url_or_none,
15 )
16
17
18 class EpornerIE(InfoExtractor):
19 _VALID_URL = r'https?://(?:www\.)?eporner\.com/(?:hd-porn|embed)/(?P<id>\w+)(?:/(?P<display_id>[\w-]+))?'
20 _TESTS = [{
21 'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
22 'md5': '39d486f046212d8e1b911c52ab4691f8',
23 'info_dict': {
24 'id': 'qlDUmNsj6VS',
25 'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
26 'ext': 'mp4',
27 'title': 'Infamous Tiffany Teen Strip Tease Video',
28 'description': 'md5:764f39abf932daafa37485eb46efa152',
29 'timestamp': 1232520922,
30 'upload_date': '20090121',
31 'duration': 1838,
32 'view_count': int,
33 'age_limit': 18,
34 },
35 'params': {
36 'proxy': '127.0.0.1:8118'
37 }
38 }, {
39 # New (May 2016) URL layout
40 'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0/Star-Wars-XXX-Parody/',
41 'only_matching': True,
42 }, {
43 'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0',
44 'only_matching': True,
45 }, {
46 'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0',
47 'only_matching': True,
48 }]
49
50 def _real_extract(self, url):
51 mobj = re.match(self._VALID_URL, url)
52 video_id = mobj.group('id')
53 display_id = mobj.group('display_id') or video_id
54
55 webpage, urlh = self._download_webpage_handle(url, display_id)
56
57 video_id = self._match_id(urlh.geturl())
58
59 hash = self._search_regex(
60 r'hash\s*:\s*["\']([\da-f]{32})', webpage, 'hash')
61
62 title = self._og_search_title(webpage, default=None) or self._html_search_regex(
63 r'<title>(.+?) - EPORNER', webpage, 'title')
64
65 # Reverse engineered from vjs.js
66 def calc_hash(s):
67 return ''.join((encode_base_n(int(s[lb:lb + 8], 16), 36) for lb in range(0, 32, 8)))
68
69 video = self._download_json(
70 'http://www.eporner.com/xhr/video/%s' % video_id,
71 display_id, note='Downloading video JSON',
72 query={
73 'hash': calc_hash(hash),
74 'device': 'generic',
75 'domain': 'www.eporner.com',
76 'fallback': 'false',
77 })
78
79 if video.get('available') is False:
80 raise ExtractorError(
81 '%s said: %s' % (self.IE_NAME, video['message']), expected=True)
82
83 sources = video['sources']
84
85 formats = []
86 for kind, formats_dict in sources.items():
87 if not isinstance(formats_dict, dict):
88 continue
89 for format_id, format_dict in formats_dict.items():
90 if not isinstance(format_dict, dict):
91 continue
92 src = url_or_none(format_dict.get('src'))
93 if not src or not src.startswith('http'):
94 continue
95 if kind == 'hls':
96 formats.extend(self._extract_m3u8_formats(
97 src, display_id, 'mp4', entry_protocol='m3u8_native',
98 m3u8_id=kind, fatal=False))
99 else:
100 height = int_or_none(self._search_regex(
101 r'(\d+)[pP]', format_id, 'height', default=None))
102 fps = int_or_none(self._search_regex(
103 r'(\d+)fps', format_id, 'fps', default=None))
104
105 formats.append({
106 'url': src,
107 'format_id': format_id,
108 'height': height,
109 'fps': fps,
110 })
111 self._sort_formats(formats)
112
113 json_ld = self._search_json_ld(webpage, display_id, default={})
114
115 duration = parse_duration(self._html_search_meta(
116 'duration', webpage, default=None))
117 view_count = str_to_int(self._search_regex(
118 r'id="cinemaviews">\s*([0-9,]+)\s*<small>views',
119 webpage, 'view count', fatal=False))
120
121 return merge_dicts(json_ld, {
122 'id': video_id,
123 'display_id': display_id,
124 'title': title,
125 'duration': duration,
126 'view_count': view_count,
127 'formats': formats,
128 'age_limit': 18,
129 })
130
[end of youtube_dl/extractor/eporner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/youtube_dl/extractor/eporner.py b/youtube_dl/extractor/eporner.py
--- a/youtube_dl/extractor/eporner.py
+++ b/youtube_dl/extractor/eporner.py
@@ -16,7 +16,7 @@
class EpornerIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?eporner\.com/(?:hd-porn|embed)/(?P<id>\w+)(?:/(?P<display_id>[\w-]+))?'
+ _VALID_URL = r'https?://(?:www\.)?eporner\.com/(?:(?:hd-porn|embed)/|video-)(?P<id>\w+)(?:/(?P<display_id>[\w-]+))?'
_TESTS = [{
'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
'md5': '39d486f046212d8e1b911c52ab4691f8',
@@ -45,6 +45,9 @@
}, {
'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0',
'only_matching': True,
+ }, {
+ 'url': 'https://www.eporner.com/video-FJsA19J3Y3H/one-of-the-greats/',
+ 'only_matching': True,
}]
def _real_extract(self, url):
@@ -57,7 +60,7 @@
video_id = self._match_id(urlh.geturl())
hash = self._search_regex(
- r'hash\s*:\s*["\']([\da-f]{32})', webpage, 'hash')
+ r'hash\s*[:=]\s*["\']([\da-f]{32})', webpage, 'hash')
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
r'<title>(.+?) - EPORNER', webpage, 'title')
| {"golden_diff": "diff --git a/youtube_dl/extractor/eporner.py b/youtube_dl/extractor/eporner.py\n--- a/youtube_dl/extractor/eporner.py\n+++ b/youtube_dl/extractor/eporner.py\n@@ -16,7 +16,7 @@\n \n \n class EpornerIE(InfoExtractor):\n- _VALID_URL = r'https?://(?:www\\.)?eporner\\.com/(?:hd-porn|embed)/(?P<id>\\w+)(?:/(?P<display_id>[\\w-]+))?'\n+ _VALID_URL = r'https?://(?:www\\.)?eporner\\.com/(?:(?:hd-porn|embed)/|video-)(?P<id>\\w+)(?:/(?P<display_id>[\\w-]+))?'\n _TESTS = [{\n 'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',\n 'md5': '39d486f046212d8e1b911c52ab4691f8',\n@@ -45,6 +45,9 @@\n }, {\n 'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0',\n 'only_matching': True,\n+ }, {\n+ 'url': 'https://www.eporner.com/video-FJsA19J3Y3H/one-of-the-greats/',\n+ 'only_matching': True,\n }]\n \n def _real_extract(self, url):\n@@ -57,7 +60,7 @@\n video_id = self._match_id(urlh.geturl())\n \n hash = self._search_regex(\n- r'hash\\s*:\\s*[\"\\']([\\da-f]{32})', webpage, 'hash')\n+ r'hash\\s*[:=]\\s*[\"\\']([\\da-f]{32})', webpage, 'hash')\n \n title = self._og_search_title(webpage, default=None) or self._html_search_regex(\n r'<title>(.+?) - EPORNER', webpage, 'title')\n", "issue": "Eporner stuck at invoked download\n<!--\r\n\r\n######################################################################\r\n WARNING!\r\n IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE\r\n######################################################################\r\n\r\n-->\r\n\r\n\r\n## Checklist\r\n\r\n<!--\r\nCarefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:\r\n- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.\r\n- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.\r\n- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.\r\n- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.\r\n- Read bugs section in FAQ: http://yt-dl.org/reporting\r\n- Finally, put x into all relevant boxes (like this [x])\r\n-->\r\n\r\n- [x] I'm reporting a broken site support issue\r\n- [x] I've verified that I'm running youtube-dl version **2020.07.28**\r\n- [x] I've checked that all provided URLs are alive and playable in a browser\r\n- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped\r\n- [x] I've searched the bugtracker for similar bug reports including closed ones\r\n- [x] I've read bugs section in FAQ\r\n\r\n\r\n## Verbose log\r\n\r\n\r\n<!--\r\nProvide the complete verbose output of youtube-dl that clearly demonstrates the problem.\r\nAdd the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <your command line>`), copy the WHOLE output and insert it below. It should look similar to this:\r\n [debug] System config: []\r\n [debug] User config: []\r\n [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']\r\n [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251\r\n [debug] youtube-dl version 2020.07.28\r\n [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2\r\n [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4\r\n [debug] Proxy map: {}\r\n <more lines>\r\n-->\r\n\r\n```\r\nPASTE VERBOSE LOG HERE\r\n```\r\nubuntu@ip-172-26-9-219:~$ youtube-dl --verbose https://www.eporner.com/hd-porn/HBgBgaViIbY/Busty-Tiny-Teen-With-Angel-Body-Gets-Fucked/\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: [u'--verbose', u'https://www.eporner.com/hd-porn/HBgBgaViIbY/Busty-Tiny-Teen-With-Angel-Body-Gets-Fucked/']\r\n[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8\r\n[debug] youtube-dl version 2020.07.28\r\n[debug] Python version 2.7.17 (CPython) - Linux-5.3.0-1032-aws-x86_64-with-Ubuntu-18.04-bionic\r\n[debug] exe versions: none\r\n[debug] Proxy map: {}\r\n[Eporner] Busty-Tiny-Teen-With-Angel-Body-Gets-Fucked: Downloading webpage\r\n[Eporner] Busty-Tiny-Teen-With-Angel-Body-Gets-Fucked: Downloading video JSON\r\nWARNING: unable to extract view count; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\n[debug] Default format spec: best/bestvideo+bestaudio\r\n[debug] Invoking downloader on u'https://cache1-ca-cdn.eporner.com/bb6353b496c613f4904fe9760409625d/5f23b8c504f800/2361178-1080p.mp4'\r\n\r\n## Description\r\n\r\n<!--\r\nProvide an explanation of your issue in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.\r\nIf work on your issue requires account credentials please provide them or explain how one can obtain them.\r\n-->\r\n\r\nWRITE DESCRIPTION HERE\r\nCan't download any video from eporner it use YouTube extractor not using eporner.py\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n encode_base_n,\n ExtractorError,\n int_or_none,\n merge_dicts,\n parse_duration,\n str_to_int,\n url_or_none,\n)\n\n\nclass EpornerIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?eporner\\.com/(?:hd-porn|embed)/(?P<id>\\w+)(?:/(?P<display_id>[\\w-]+))?'\n _TESTS = [{\n 'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',\n 'md5': '39d486f046212d8e1b911c52ab4691f8',\n 'info_dict': {\n 'id': 'qlDUmNsj6VS',\n 'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',\n 'ext': 'mp4',\n 'title': 'Infamous Tiffany Teen Strip Tease Video',\n 'description': 'md5:764f39abf932daafa37485eb46efa152',\n 'timestamp': 1232520922,\n 'upload_date': '20090121',\n 'duration': 1838,\n 'view_count': int,\n 'age_limit': 18,\n },\n 'params': {\n 'proxy': '127.0.0.1:8118'\n }\n }, {\n # New (May 2016) URL layout\n 'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0/Star-Wars-XXX-Parody/',\n 'only_matching': True,\n }, {\n 'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0',\n 'only_matching': True,\n }, {\n 'url': 'http://www.eporner.com/hd-porn/3YRUtzMcWn0',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n display_id = mobj.group('display_id') or video_id\n\n webpage, urlh = self._download_webpage_handle(url, display_id)\n\n video_id = self._match_id(urlh.geturl())\n\n hash = self._search_regex(\n r'hash\\s*:\\s*[\"\\']([\\da-f]{32})', webpage, 'hash')\n\n title = self._og_search_title(webpage, default=None) or self._html_search_regex(\n r'<title>(.+?) - EPORNER', webpage, 'title')\n\n # Reverse engineered from vjs.js\n def calc_hash(s):\n return ''.join((encode_base_n(int(s[lb:lb + 8], 16), 36) for lb in range(0, 32, 8)))\n\n video = self._download_json(\n 'http://www.eporner.com/xhr/video/%s' % video_id,\n display_id, note='Downloading video JSON',\n query={\n 'hash': calc_hash(hash),\n 'device': 'generic',\n 'domain': 'www.eporner.com',\n 'fallback': 'false',\n })\n\n if video.get('available') is False:\n raise ExtractorError(\n '%s said: %s' % (self.IE_NAME, video['message']), expected=True)\n\n sources = video['sources']\n\n formats = []\n for kind, formats_dict in sources.items():\n if not isinstance(formats_dict, dict):\n continue\n for format_id, format_dict in formats_dict.items():\n if not isinstance(format_dict, dict):\n continue\n src = url_or_none(format_dict.get('src'))\n if not src or not src.startswith('http'):\n continue\n if kind == 'hls':\n formats.extend(self._extract_m3u8_formats(\n src, display_id, 'mp4', entry_protocol='m3u8_native',\n m3u8_id=kind, fatal=False))\n else:\n height = int_or_none(self._search_regex(\n r'(\\d+)[pP]', format_id, 'height', default=None))\n fps = int_or_none(self._search_regex(\n r'(\\d+)fps', format_id, 'fps', default=None))\n\n formats.append({\n 'url': src,\n 'format_id': format_id,\n 'height': height,\n 'fps': fps,\n })\n self._sort_formats(formats)\n\n json_ld = self._search_json_ld(webpage, display_id, default={})\n\n duration = parse_duration(self._html_search_meta(\n 'duration', webpage, default=None))\n view_count = str_to_int(self._search_regex(\n r'id=\"cinemaviews\">\\s*([0-9,]+)\\s*<small>views',\n webpage, 'view count', fatal=False))\n\n return merge_dicts(json_ld, {\n 'id': video_id,\n 'display_id': display_id,\n 'title': title,\n 'duration': duration,\n 'view_count': view_count,\n 'formats': formats,\n 'age_limit': 18,\n })\n", "path": "youtube_dl/extractor/eporner.py"}]} | 3,241 | 493 |
gh_patches_debug_16755 | rasdani/github-patches | git_diff | scrapy__scrapy-5299 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tests failing with new Sybil
Sybil 3.0.0 was just released, and at least the following change breaks our tests: `CodeBlockParser has been renamed to PythonCodeBlockParser`
```python-traceback
docs/conftest.py:24: in <module>
CodeBlockParser(future_imports=['print_function']),
E TypeError: __init__() got an unexpected keyword argument 'future_imports'
```
</issue>
<code>
[start of docs/conftest.py]
1 import os
2 from doctest import ELLIPSIS, NORMALIZE_WHITESPACE
3
4 from scrapy.http.response.html import HtmlResponse
5 from sybil import Sybil
6 from sybil.parsers.codeblock import CodeBlockParser
7 from sybil.parsers.doctest import DocTestParser
8 from sybil.parsers.skip import skip
9
10
11 def load_response(url, filename):
12 input_path = os.path.join(os.path.dirname(__file__), '_tests', filename)
13 with open(input_path, 'rb') as input_file:
14 return HtmlResponse(url, body=input_file.read())
15
16
17 def setup(namespace):
18 namespace['load_response'] = load_response
19
20
21 pytest_collect_file = Sybil(
22 parsers=[
23 DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),
24 CodeBlockParser(future_imports=['print_function']),
25 skip,
26 ],
27 pattern='*.rst',
28 setup=setup,
29 ).pytest()
30
[end of docs/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conftest.py b/docs/conftest.py
--- a/docs/conftest.py
+++ b/docs/conftest.py
@@ -3,7 +3,11 @@
from scrapy.http.response.html import HtmlResponse
from sybil import Sybil
-from sybil.parsers.codeblock import CodeBlockParser
+try:
+ # >2.0.1
+ from sybil.parsers.codeblock import PythonCodeBlockParser
+except ImportError:
+ from sybil.parsers.codeblock import CodeBlockParser as PythonCodeBlockParser
from sybil.parsers.doctest import DocTestParser
from sybil.parsers.skip import skip
@@ -21,7 +25,7 @@
pytest_collect_file = Sybil(
parsers=[
DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),
- CodeBlockParser(future_imports=['print_function']),
+ PythonCodeBlockParser(future_imports=['print_function']),
skip,
],
pattern='*.rst',
| {"golden_diff": "diff --git a/docs/conftest.py b/docs/conftest.py\n--- a/docs/conftest.py\n+++ b/docs/conftest.py\n@@ -3,7 +3,11 @@\n \n from scrapy.http.response.html import HtmlResponse\n from sybil import Sybil\n-from sybil.parsers.codeblock import CodeBlockParser\n+try:\n+ # >2.0.1\n+ from sybil.parsers.codeblock import PythonCodeBlockParser\n+except ImportError:\n+ from sybil.parsers.codeblock import CodeBlockParser as PythonCodeBlockParser\n from sybil.parsers.doctest import DocTestParser\n from sybil.parsers.skip import skip\n \n@@ -21,7 +25,7 @@\n pytest_collect_file = Sybil(\n parsers=[\n DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),\n- CodeBlockParser(future_imports=['print_function']),\n+ PythonCodeBlockParser(future_imports=['print_function']),\n skip,\n ],\n pattern='*.rst',\n", "issue": "Tests failing with new Sybil\nSybil 3.0.0 was just released, and at least the following change breaks our tests: `CodeBlockParser has been renamed to PythonCodeBlockParser`\r\n\r\n```python-traceback\r\ndocs/conftest.py:24: in <module>\r\n CodeBlockParser(future_imports=['print_function']),\r\nE TypeError: __init__() got an unexpected keyword argument 'future_imports'\r\n```\n", "before_files": [{"content": "import os\nfrom doctest import ELLIPSIS, NORMALIZE_WHITESPACE\n\nfrom scrapy.http.response.html import HtmlResponse\nfrom sybil import Sybil\nfrom sybil.parsers.codeblock import CodeBlockParser\nfrom sybil.parsers.doctest import DocTestParser\nfrom sybil.parsers.skip import skip\n\n\ndef load_response(url, filename):\n input_path = os.path.join(os.path.dirname(__file__), '_tests', filename)\n with open(input_path, 'rb') as input_file:\n return HtmlResponse(url, body=input_file.read())\n\n\ndef setup(namespace):\n namespace['load_response'] = load_response\n\n\npytest_collect_file = Sybil(\n parsers=[\n DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),\n CodeBlockParser(future_imports=['print_function']),\n skip,\n ],\n pattern='*.rst',\n setup=setup,\n).pytest()\n", "path": "docs/conftest.py"}]} | 872 | 216 |
gh_patches_debug_40430 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1586 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: portenf_sa_gov_au reporting incorrect dates
### I Have A Problem With:
A specific source
### What's Your Problem
The portenf_sa_gov_au sensor has been reporting incorrectly since it updated itself on 24 December 2023 (I can see this from HA logs). It appears that when there is 1 week or less left in the month "main-month" switches to the coming month and "other-month" becomes the current month.
Because of this, the integration reports the current collection next month and the next collections as in the past (and hides them).
The fix in #1110 by @5ila5 partly addresses the problem but it was not foreseeable to him that EOM would be treated this way. @5ila5 also noted that this might be an issue in that closed issue.
### Source (if relevant)
portenf_sa_gov_au
### Logs
```Shell
Output of test_sources.py:
Testing source portenf_sa_gov_au ...
found 8 entries for Broadview, Regency Road, 565
2024-01-26 : general-waste bin [mdi:trash-can]
2024-01-26 : recycling bin [mdi:recycle]
2023-12-02 : general-waste bin [mdi:trash-can]
2023-12-02 : organics bin [mdi:leaf]
2023-12-09 : general-waste bin [mdi:trash-can]
2023-12-09 : recycling bin [mdi:recycle]
2023-12-16 : general-waste bin [mdi:trash-can]
2023-12-16 : organics bin [mdi:leaf]
found 8 entries for 48 Floriedale Rd
2024-01-26 : general-waste bin [mdi:trash-can]
2024-01-26 : recycling bin [mdi:recycle]
2023-12-02 : general-waste bin [mdi:trash-can]
2023-12-02 : organics bin [mdi:leaf]
2023-12-09 : general-waste bin [mdi:trash-can]
2023-12-09 : recycling bin [mdi:recycle]
2023-12-16 : general-waste bin [mdi:trash-can]
2023-12-16 : organics bin [mdi:leaf]
found 8 entries for 24 Margaret Terrace
2024-01-28 : general-waste bin [mdi:trash-can]
2024-01-28 : organics bin [mdi:leaf]
2023-12-04 : general-waste bin [mdi:trash-can]
2023-12-04 : recycling bin [mdi:recycle]
2023-12-11 : general-waste bin [mdi:trash-can]
2023-12-11 : organics bin [mdi:leaf]
2023-12-18 : general-waste bin [mdi:trash-can]
2023-12-18 : recycling bin [mdi:recycle]
found 8 entries for Addison Road 91 with unit
2024-01-28 : general-waste bin [mdi:trash-can]
2024-01-28 : organics bin [mdi:leaf]
2023-12-04 : general-waste bin [mdi:trash-can]
2023-12-04 : recycling bin [mdi:recycle]
2023-12-11 : general-waste bin [mdi:trash-can]
2023-12-11 : organics bin [mdi:leaf]
2023-12-18 : general-waste bin [mdi:trash-can]
2023-12-18 : recycling bin [mdi:recycle]
```
### Relevant Configuration
_No response_
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/portenf_sa_gov_au.py]
1 import datetime
2 import logging
3 import re
4
5 import requests
6 import urllib3
7 from bs4 import BeautifulSoup
8 from waste_collection_schedule import Collection # type: ignore[attr-defined]
9
10 # With verify=True the POST fails due to a SSLCertVerificationError.
11 # Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:
12 # https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
13 # https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
14 # These two lines areused to suppress the InsecureRequestWarning when using verify=False
15 urllib3.disable_warnings()
16
17 TITLE = "Port Adelaide Enfield, South Australia"
18 DESCRIPTION = "Source for City of Port Adelaide Enfield, South Australia."
19 URL = "https://ecouncil.portenf.sa.gov.au/"
20 TEST_CASES = {
21 "Broadview, Regency Road, 565 ": {
22 "suburb": "Broadview",
23 "street": "Regency Road",
24 "house_number": 565,
25 "unit_number": "",
26 },
27 "48 Floriedale Rd ": {
28 "suburb": "Greenacres",
29 "street": "Floriedale Rd",
30 "house_number": "48",
31 },
32 "24 Margaret Terrace": {
33 "suburb": "Rosewater",
34 "street": "Margaret Terrace",
35 "house_number": "24",
36 },
37 "Addison Road 91 with unit": {
38 "suburb": "Rosewater",
39 "street": "Addison Road",
40 "house_number": 91,
41 "unit_number": 2,
42 },
43 }
44
45 ICON_MAP = {
46 "general-waste bin": "mdi:trash-can",
47 "organics bin": "mdi:leaf",
48 "recycling bin": "mdi:recycle",
49 }
50
51 LOGGER = logging.getLogger(__name__)
52
53 API_URL = "https://ecouncil.portenf.sa.gov.au/public/propertywastedates/public.aspx"
54
55
56 class Source:
57 def __init__(
58 self,
59 suburb: str,
60 street: str,
61 house_number: str | int,
62 unit_number: str | int = "",
63 ):
64 self._suburb: str = suburb
65 self._street: str = street
66 self._house_number: str = str(house_number)
67 self._unit_number: str = str(unit_number)
68
69 def __set_args(
70 self, soup: BeautifulSoup, event_taget=None, additional: dict = {}
71 ) -> dict:
72 args = {
73 "ctl00$MainContent$txtSuburb": self._suburb,
74 "ctl00$MainContent$txtStreetName": self._street,
75 "ctl00$MainContent$txtHouseNumber": self._house_number,
76 "ctl00$MainContent$txtUnitNumber": self._unit_number,
77 }
78 if event_taget is not None:
79 args["__EVENTTARGET"] = event_taget
80
81 for hidden_val in soup.find_all("input", {"type": "hidden"}):
82 args[hidden_val["name"]] = hidden_val["value"]
83
84 for key, value in additional.items():
85 args[key] = value
86 return args
87
88 def fetch(self):
89 session = requests.Session()
90
91 # get First page
92 r = session.get(API_URL, verify=False)
93 r.raise_for_status()
94
95 # extractt arguments
96 args = self.__set_args(
97 BeautifulSoup(r.text, "html.parser"),
98 event_taget="ctl00$MainContent$btnSearch",
99 )
100
101 r = session.post(API_URL, data=args)
102 r.raise_for_status()
103
104 # get page to select an address
105 soup = BeautifulSoup(r.text, "html.parser")
106
107 selectable = soup.find_all("a", {"class": "anchor-button small"}, text="Select")
108
109 if len(selectable) == 0:
110 raise ValueError("No address found")
111 selected = selectable[0]
112
113 # If multiple addresses are found, try to find the one that matches the input and warn if there are multiple or none matches
114 if len(selectable) > 1:
115 found = [
116 " ".join(
117 [y.text for y in x.parent.parent.find_all("td")[1].find_all("span")]
118 )
119 for x in selectable
120 ]
121 using_index = 0
122
123 match = False
124
125 for index, entry in enumerate(found):
126 entry = entry.lower().strip().replace(" ", "")
127 if (
128 self._house_number.lower().strip().replace(" ", "") in entry
129 and self._street.lower().strip().replace(" ", "") in entry
130 and self._suburb.lower().strip().replace(" ", "") in entry
131 and self._unit_number.lower().strip().replace(" ", "") in entry
132 ):
133 if match:
134 LOGGER.warning(
135 f"Multiple addresses found, using first one \nfound:{', '.join(found[:10])}{'...' if len(found) >= 10 else ''} \nusing:{found[using_index]}"
136 )
137 break
138 using_index = index
139 match = True
140 if not match:
141 LOGGER.warning(
142 f"no perfect address match found, using:{found[using_index]}"
143 )
144
145 # request first address
146 args = self.__set_args(
147 soup,
148 event_taget="ctl00$MainContent$gvPropertyResults$ctl02$btnSelect",
149 additional={selected["href"].split("'")[1]: ""},
150 )
151 r = session.post(API_URL, data=args)
152 r.raise_for_status()
153
154 soup = BeautifulSoup(r.text, "html.parser")
155 cal_header = soup.find("th", {"class": "header-month"}).find("span").text
156 main_month = cal_header.split("-")[0].strip()
157
158 secondary_month = cal_header.split("-")[1].strip().split(" ")[0]
159 secondary_year = main_year = cal_header.split("-")[1].strip().split(" ")[1]
160
161 # if main month contains a year, set it (maybe happens in december???)
162 if len(main_month.split(" ")) > 1:
163 main_year = main_month.split(" ")[1]
164 main_month = main_month.split(" ")[0]
165
166 entries = []
167
168 calendar = soup.find("table", {"class": "collection-day-calendar"})
169 # Iterate over all days with pickups
170 for pickup in calendar.find_all(
171 "div", {"class": re.compile(r"pickup|next-pickup")}
172 ):
173 parent_td = pickup.parent
174 month = (
175 main_month
176 if "main-month" in parent_td.attrs["class"]
177 else secondary_month
178 )
179 year = (
180 main_year
181 if "main-month" in parent_td.attrs["class"]
182 else secondary_year
183 )
184 day = parent_td.find("div", {"class": "daynumber"}).text
185
186 # Iterate over all pickup container types for this day
187 for container in pickup.find_all("div", {"class": "bin-container"}):
188 container_type = " ".join(container.find("div").attrs["class"])
189 container_icon = ICON_MAP.get(container_type)
190
191 date = datetime.datetime.strptime(
192 f"{year}-{month}-{day}", "%Y-%B-%d"
193 ).date()
194 entries.append(
195 Collection(date=date, t=container_type, icon=container_icon)
196 )
197
198 return entries
199
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/portenf_sa_gov_au.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/portenf_sa_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/portenf_sa_gov_au.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/portenf_sa_gov_au.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/portenf_sa_gov_au.py
@@ -1,6 +1,6 @@
-import datetime
import logging
import re
+from datetime import datetime
import requests
import urllib3
@@ -153,15 +153,28 @@
soup = BeautifulSoup(r.text, "html.parser")
cal_header = soup.find("th", {"class": "header-month"}).find("span").text
- main_month = cal_header.split("-")[0].strip()
-
- secondary_month = cal_header.split("-")[1].strip().split(" ")[0]
- secondary_year = main_year = cal_header.split("-")[1].strip().split(" ")[1]
+ from_month = cal_header.split("-")[0].strip()
+ to_month = cal_header.split("-")[1].strip().split(" ")[0]
+ to_year = from_year = cal_header.split("-")[1].strip().split(" ")[1]
# if main month contains a year, set it (maybe happens in december???)
- if len(main_month.split(" ")) > 1:
- main_year = main_month.split(" ")[1]
- main_month = main_month.split(" ")[0]
+ if len(from_month.split(" ")) > 1:
+ from_year = from_month.split(" ")[1]
+ from_month = from_month.split(" ")[0]
+
+ today_div = soup.find("table", id="cal").find("td", class_="today")
+ print(today_div)
+
+ # if other-month is to_month
+ if (
+ "other-month" in today_div.attrs
+ and datetime.now().strftime("%B") != to_month
+ ):
+ main_month, other_month = from_month, to_month
+ main_year, other_year = from_year, to_year
+ else: # if other-month is from_month
+ main_month, other_month = to_month, from_month
+ main_year, other_year = to_year, from_year
entries = []
@@ -172,15 +185,9 @@
):
parent_td = pickup.parent
month = (
- main_month
- if "main-month" in parent_td.attrs["class"]
- else secondary_month
- )
- year = (
- main_year
- if "main-month" in parent_td.attrs["class"]
- else secondary_year
+ main_month if "main-month" in parent_td.attrs["class"] else other_month
)
+ year = main_year if "main-month" in parent_td.attrs["class"] else other_year
day = parent_td.find("div", {"class": "daynumber"}).text
# Iterate over all pickup container types for this day
@@ -188,9 +195,7 @@
container_type = " ".join(container.find("div").attrs["class"])
container_icon = ICON_MAP.get(container_type)
- date = datetime.datetime.strptime(
- f"{year}-{month}-{day}", "%Y-%B-%d"
- ).date()
+ date = datetime.strptime(f"{year}-{month}-{day}", "%Y-%B-%d").date()
entries.append(
Collection(date=date, t=container_type, icon=container_icon)
)
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/portenf_sa_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/portenf_sa_gov_au.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/portenf_sa_gov_au.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/portenf_sa_gov_au.py\n@@ -1,6 +1,6 @@\n-import datetime\n import logging\n import re\n+from datetime import datetime\n \n import requests\n import urllib3\n@@ -153,15 +153,28 @@\n \n soup = BeautifulSoup(r.text, \"html.parser\")\n cal_header = soup.find(\"th\", {\"class\": \"header-month\"}).find(\"span\").text\n- main_month = cal_header.split(\"-\")[0].strip()\n-\n- secondary_month = cal_header.split(\"-\")[1].strip().split(\" \")[0]\n- secondary_year = main_year = cal_header.split(\"-\")[1].strip().split(\" \")[1]\n \n+ from_month = cal_header.split(\"-\")[0].strip()\n+ to_month = cal_header.split(\"-\")[1].strip().split(\" \")[0]\n+ to_year = from_year = cal_header.split(\"-\")[1].strip().split(\" \")[1]\n # if main month contains a year, set it (maybe happens in december???)\n- if len(main_month.split(\" \")) > 1:\n- main_year = main_month.split(\" \")[1]\n- main_month = main_month.split(\" \")[0]\n+ if len(from_month.split(\" \")) > 1:\n+ from_year = from_month.split(\" \")[1]\n+ from_month = from_month.split(\" \")[0]\n+\n+ today_div = soup.find(\"table\", id=\"cal\").find(\"td\", class_=\"today\")\n+ print(today_div)\n+\n+ # if other-month is to_month\n+ if (\n+ \"other-month\" in today_div.attrs\n+ and datetime.now().strftime(\"%B\") != to_month\n+ ):\n+ main_month, other_month = from_month, to_month\n+ main_year, other_year = from_year, to_year\n+ else: # if other-month is from_month\n+ main_month, other_month = to_month, from_month\n+ main_year, other_year = to_year, from_year\n \n entries = []\n \n@@ -172,15 +185,9 @@\n ):\n parent_td = pickup.parent\n month = (\n- main_month\n- if \"main-month\" in parent_td.attrs[\"class\"]\n- else secondary_month\n- )\n- year = (\n- main_year\n- if \"main-month\" in parent_td.attrs[\"class\"]\n- else secondary_year\n+ main_month if \"main-month\" in parent_td.attrs[\"class\"] else other_month\n )\n+ year = main_year if \"main-month\" in parent_td.attrs[\"class\"] else other_year\n day = parent_td.find(\"div\", {\"class\": \"daynumber\"}).text\n \n # Iterate over all pickup container types for this day\n@@ -188,9 +195,7 @@\n container_type = \" \".join(container.find(\"div\").attrs[\"class\"])\n container_icon = ICON_MAP.get(container_type)\n \n- date = datetime.datetime.strptime(\n- f\"{year}-{month}-{day}\", \"%Y-%B-%d\"\n- ).date()\n+ date = datetime.strptime(f\"{year}-{month}-{day}\", \"%Y-%B-%d\").date()\n entries.append(\n Collection(date=date, t=container_type, icon=container_icon)\n )\n", "issue": "[Bug]: portenf_sa_gov_au reporting incorrect dates\n### I Have A Problem With:\r\n\r\nA specific source\r\n\r\n### What's Your Problem\r\n\r\nThe portenf_sa_gov_au sensor has been reporting incorrectly since it updated itself on 24 December 2023 (I can see this from HA logs). It appears that when there is 1 week or less left in the month \"main-month\" switches to the coming month and \"other-month\" becomes the current month.\r\n\r\nBecause of this, the integration reports the current collection next month and the next collections as in the past (and hides them).\r\n\r\nThe fix in #1110 by @5ila5 partly addresses the problem but it was not foreseeable to him that EOM would be treated this way. @5ila5 also noted that this might be an issue in that closed issue.\r\n\r\n### Source (if relevant)\r\n\r\nportenf_sa_gov_au\r\n\r\n### Logs\r\n\r\n```Shell\r\nOutput of test_sources.py:\r\n\r\nTesting source portenf_sa_gov_au ...\r\n found 8 entries for Broadview, Regency Road, 565\r\n 2024-01-26 : general-waste bin [mdi:trash-can]\r\n 2024-01-26 : recycling bin [mdi:recycle]\r\n 2023-12-02 : general-waste bin [mdi:trash-can]\r\n 2023-12-02 : organics bin [mdi:leaf]\r\n 2023-12-09 : general-waste bin [mdi:trash-can]\r\n 2023-12-09 : recycling bin [mdi:recycle]\r\n 2023-12-16 : general-waste bin [mdi:trash-can]\r\n 2023-12-16 : organics bin [mdi:leaf]\r\n found 8 entries for 48 Floriedale Rd\r\n 2024-01-26 : general-waste bin [mdi:trash-can]\r\n 2024-01-26 : recycling bin [mdi:recycle]\r\n 2023-12-02 : general-waste bin [mdi:trash-can]\r\n 2023-12-02 : organics bin [mdi:leaf]\r\n 2023-12-09 : general-waste bin [mdi:trash-can]\r\n 2023-12-09 : recycling bin [mdi:recycle]\r\n 2023-12-16 : general-waste bin [mdi:trash-can]\r\n 2023-12-16 : organics bin [mdi:leaf]\r\n found 8 entries for 24 Margaret Terrace\r\n 2024-01-28 : general-waste bin [mdi:trash-can]\r\n 2024-01-28 : organics bin [mdi:leaf]\r\n 2023-12-04 : general-waste bin [mdi:trash-can]\r\n 2023-12-04 : recycling bin [mdi:recycle]\r\n 2023-12-11 : general-waste bin [mdi:trash-can]\r\n 2023-12-11 : organics bin [mdi:leaf]\r\n 2023-12-18 : general-waste bin [mdi:trash-can]\r\n 2023-12-18 : recycling bin [mdi:recycle]\r\n found 8 entries for Addison Road 91 with unit\r\n 2024-01-28 : general-waste bin [mdi:trash-can]\r\n 2024-01-28 : organics bin [mdi:leaf]\r\n 2023-12-04 : general-waste bin [mdi:trash-can]\r\n 2023-12-04 : recycling bin [mdi:recycle]\r\n 2023-12-11 : general-waste bin [mdi:trash-can]\r\n 2023-12-11 : organics bin [mdi:leaf]\r\n 2023-12-18 : general-waste bin [mdi:trash-can]\r\n 2023-12-18 : recycling bin [mdi:recycle]\r\n```\r\n\r\n\r\n### Relevant Configuration\r\n\r\n_No response_\r\n\r\n### Checklist Source Error\r\n\r\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\r\n- [X] Checked that the website of your service provider is still working\r\n- [X] Tested my attributes on the service provider website (if possible)\r\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\r\n\r\n### Checklist Sensor Error\r\n\r\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\r\n\r\n### Required\r\n\r\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\r\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import datetime\nimport logging\nimport re\n\nimport requests\nimport urllib3\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\n# With verify=True the POST fails due to a SSLCertVerificationError.\n# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:\n# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings\n# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl\n# These two lines areused to suppress the InsecureRequestWarning when using verify=False\nurllib3.disable_warnings()\n\nTITLE = \"Port Adelaide Enfield, South Australia\"\nDESCRIPTION = \"Source for City of Port Adelaide Enfield, South Australia.\"\nURL = \"https://ecouncil.portenf.sa.gov.au/\"\nTEST_CASES = {\n \"Broadview, Regency Road, 565 \": {\n \"suburb\": \"Broadview\",\n \"street\": \"Regency Road\",\n \"house_number\": 565,\n \"unit_number\": \"\",\n },\n \"48 Floriedale Rd \": {\n \"suburb\": \"Greenacres\",\n \"street\": \"Floriedale Rd\",\n \"house_number\": \"48\",\n },\n \"24 Margaret Terrace\": {\n \"suburb\": \"Rosewater\",\n \"street\": \"Margaret Terrace\",\n \"house_number\": \"24\",\n },\n \"Addison Road 91 with unit\": {\n \"suburb\": \"Rosewater\",\n \"street\": \"Addison Road\",\n \"house_number\": 91,\n \"unit_number\": 2,\n },\n}\n\nICON_MAP = {\n \"general-waste bin\": \"mdi:trash-can\",\n \"organics bin\": \"mdi:leaf\",\n \"recycling bin\": \"mdi:recycle\",\n}\n\nLOGGER = logging.getLogger(__name__)\n\nAPI_URL = \"https://ecouncil.portenf.sa.gov.au/public/propertywastedates/public.aspx\"\n\n\nclass Source:\n def __init__(\n self,\n suburb: str,\n street: str,\n house_number: str | int,\n unit_number: str | int = \"\",\n ):\n self._suburb: str = suburb\n self._street: str = street\n self._house_number: str = str(house_number)\n self._unit_number: str = str(unit_number)\n\n def __set_args(\n self, soup: BeautifulSoup, event_taget=None, additional: dict = {}\n ) -> dict:\n args = {\n \"ctl00$MainContent$txtSuburb\": self._suburb,\n \"ctl00$MainContent$txtStreetName\": self._street,\n \"ctl00$MainContent$txtHouseNumber\": self._house_number,\n \"ctl00$MainContent$txtUnitNumber\": self._unit_number,\n }\n if event_taget is not None:\n args[\"__EVENTTARGET\"] = event_taget\n\n for hidden_val in soup.find_all(\"input\", {\"type\": \"hidden\"}):\n args[hidden_val[\"name\"]] = hidden_val[\"value\"]\n\n for key, value in additional.items():\n args[key] = value\n return args\n\n def fetch(self):\n session = requests.Session()\n\n # get First page\n r = session.get(API_URL, verify=False)\n r.raise_for_status()\n\n # extractt arguments\n args = self.__set_args(\n BeautifulSoup(r.text, \"html.parser\"),\n event_taget=\"ctl00$MainContent$btnSearch\",\n )\n\n r = session.post(API_URL, data=args)\n r.raise_for_status()\n\n # get page to select an address\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n selectable = soup.find_all(\"a\", {\"class\": \"anchor-button small\"}, text=\"Select\")\n\n if len(selectable) == 0:\n raise ValueError(\"No address found\")\n selected = selectable[0]\n\n # If multiple addresses are found, try to find the one that matches the input and warn if there are multiple or none matches\n if len(selectable) > 1:\n found = [\n \" \".join(\n [y.text for y in x.parent.parent.find_all(\"td\")[1].find_all(\"span\")]\n )\n for x in selectable\n ]\n using_index = 0\n\n match = False\n\n for index, entry in enumerate(found):\n entry = entry.lower().strip().replace(\" \", \"\")\n if (\n self._house_number.lower().strip().replace(\" \", \"\") in entry\n and self._street.lower().strip().replace(\" \", \"\") in entry\n and self._suburb.lower().strip().replace(\" \", \"\") in entry\n and self._unit_number.lower().strip().replace(\" \", \"\") in entry\n ):\n if match:\n LOGGER.warning(\n f\"Multiple addresses found, using first one \\nfound:{', '.join(found[:10])}{'...' if len(found) >= 10 else ''} \\nusing:{found[using_index]}\"\n )\n break\n using_index = index\n match = True\n if not match:\n LOGGER.warning(\n f\"no perfect address match found, using:{found[using_index]}\"\n )\n\n # request first address\n args = self.__set_args(\n soup,\n event_taget=\"ctl00$MainContent$gvPropertyResults$ctl02$btnSelect\",\n additional={selected[\"href\"].split(\"'\")[1]: \"\"},\n )\n r = session.post(API_URL, data=args)\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n cal_header = soup.find(\"th\", {\"class\": \"header-month\"}).find(\"span\").text\n main_month = cal_header.split(\"-\")[0].strip()\n\n secondary_month = cal_header.split(\"-\")[1].strip().split(\" \")[0]\n secondary_year = main_year = cal_header.split(\"-\")[1].strip().split(\" \")[1]\n\n # if main month contains a year, set it (maybe happens in december???)\n if len(main_month.split(\" \")) > 1:\n main_year = main_month.split(\" \")[1]\n main_month = main_month.split(\" \")[0]\n\n entries = []\n\n calendar = soup.find(\"table\", {\"class\": \"collection-day-calendar\"})\n # Iterate over all days with pickups\n for pickup in calendar.find_all(\n \"div\", {\"class\": re.compile(r\"pickup|next-pickup\")}\n ):\n parent_td = pickup.parent\n month = (\n main_month\n if \"main-month\" in parent_td.attrs[\"class\"]\n else secondary_month\n )\n year = (\n main_year\n if \"main-month\" in parent_td.attrs[\"class\"]\n else secondary_year\n )\n day = parent_td.find(\"div\", {\"class\": \"daynumber\"}).text\n\n # Iterate over all pickup container types for this day\n for container in pickup.find_all(\"div\", {\"class\": \"bin-container\"}):\n container_type = \" \".join(container.find(\"div\").attrs[\"class\"])\n container_icon = ICON_MAP.get(container_type)\n\n date = datetime.datetime.strptime(\n f\"{year}-{month}-{day}\", \"%Y-%B-%d\"\n ).date()\n entries.append(\n Collection(date=date, t=container_type, icon=container_icon)\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/portenf_sa_gov_au.py"}]} | 3,879 | 802 |
gh_patches_debug_5396 | rasdani/github-patches | git_diff | tensorflow__tfx-1082 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
just a few typos fixed
That Python version check can be removed in a few days when Colab drops Python 2 support.
</issue>
<code>
[start of tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py]
1 # Lint as: python2, python3
2 # Copyright 2019 Google LLC. All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Example pipeline to demonstrate custom TFX component.
16
17 This example consists of standard TFX components as well as a custom TFX
18 component requesting for manual review through Slack.
19
20 This example along with the custom `SlackComponent` will only serve as an
21 example and will not be supported by TFX team.
22
23 This example runs in Kubeflow with Google Cloud services..
24 """
25
26 from __future__ import absolute_import
27 from __future__ import division
28 from __future__ import print_function
29
30 import datetime
31 import os
32
33 from slack_component.component import SlackComponent
34
35 from tfx.components import CsvExampleGen
36 from tfx.components import Evaluator
37 from tfx.components import ExampleValidator
38 from tfx.components import ModelValidator
39 from tfx.components import Pusher
40 from tfx.components import SchemaGen
41 from tfx.components import StatisticsGen
42 from tfx.components import Trainer
43 from tfx.components import Transform
44 from tfx.orchestration import pipeline
45 from tfx.orchestration.kubeflow import kubeflow_dag_runner
46 from tfx.proto import evaluator_pb2
47 from tfx.proto import pusher_pb2
48 from tfx.proto import trainer_pb2
49 from tfx.utils.dsl_utils import csv_input
50
51 # This example assumes that the taxi data is stored in _input_bucket/data/simple
52 # and the taxi utility function is in example/taxi_utils_slack.py.
53 # Feel free to customize this as needed.
54 _input_bucket = 'gs://my-bucket'
55 _output_bucket = 'gs://my-bucket'
56 _taxi_root = __file__
57 _data_root = os.path.join(_input_bucket, 'data', 'simple')
58
59
60 # Python module file to inject customized logic into the TFX components. The
61 # Transform and Trainer both require user-defined functions to run successfully.
62 _taxi_trainer_func = 'example.taxi_utils_slack.trainer_fn'
63 _taxi_transformer_func = 'example.taxi_utils_slack.preprocessing_fn'
64 # Path which can be listened to by the model server. Pusher will output the
65 # trained model here.
66 _serving_model_dir = os.path.join(_taxi_root, 'serving_model/taxi_slack')
67 # Slack channel to push the model notifications to.
68 _slack_channel_id = 'my-channel-id'
69 # Slack token to set up connection.
70 _slack_token = os.environ['SLACK_BOT_TOKEN']
71
72 # Directory and data locations. This example assumes all of the chicago taxi
73 # example code and metadata library is relative to $HOME, but you can store
74 # these files anywhere on your local filesystem.
75 _tfx_root = os.path.join(os.environ['HOME'], '/tfx')
76 _pipeline_name = 'chicago_taxi_slack_kubeflow'
77 _pipeline_root = os.path.join(_input_bucket, _pipeline_name)
78 _log_root = os.path.join(_tfx_root, 'logs')
79
80 # Airflow-specific configs; these will be passed directly to airflow
81 _airflow_config = {
82 'schedule_interval': None,
83 'start_date': datetime.datetime(2019, 1, 1),
84 }
85
86
87 def _create_pipeline():
88 """Implements the chicago taxi pipeline with TFX."""
89 examples = csv_input(_data_root)
90
91 # Brings data into the pipeline or otherwise joins/converts training data.
92 example_gen = CsvExampleGen(input=examples)
93
94 # Computes statistics over data for visualization and example validation.
95 statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
96
97 # Generates schema based on statistics files.
98 infer_schema = SchemaGen(statistics=statistics_gen.outputs['statistics'])
99
100 # Performs anomaly detection based on statistics and data schema.
101 validate_stats = ExampleValidator(
102 statistics=statistics_gen.outputs['statistics'],
103 schema=infer_schema.outputs['schema'])
104
105 # Performs transformations and feature engineering in training and serving.
106 transform = Transform(
107 examples=example_gen.outputs['examples'],
108 schema=infer_schema.outputs['schema'],
109 preprocessing_fn=_taxi_transformer_func)
110
111 # Uses user-provided Python function that implements a model using TF-Learn.
112 trainer = Trainer(
113 trainer_fn=_taxi_trainer_func,
114 examples=transform.outputs['transformed_examples'],
115 schema=infer_schema.outputs['schema'],
116 transform_graph=transform.outputs['transform_graph'],
117 train_args=trainer_pb2.TrainArgs(num_steps=10000),
118 eval_args=trainer_pb2.EvalArgs(num_steps=5000))
119
120 # Uses TFMA to compute a evaluation statistics over features of a model.
121 model_analyzer = Evaluator(
122 examples=example_gen.outputs['examples'],
123 model_exports=trainer.outputs['model'],
124 feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
125 evaluator_pb2.SingleSlicingSpec(
126 column_for_slicing=['trip_start_hour'])
127 ]))
128
129 # Performs quality validation of a candidate model (compared to a baseline).
130 model_validator = ModelValidator(
131 examples=example_gen.outputs['examples'], model=trainer.outputs['model'])
132
133 # This custom component serves as a bridge between pipeline and human model
134 # reviewers to enable review-and-push workflow in model development cycle. It
135 # utilizes Slack API to send message to user-defined Slack channel with model
136 # URI info and wait for go / no-go decision from the same Slack channel:
137 # * To approve the model, users need to reply the thread sent out by the bot
138 # started by SlackComponent with 'lgtm' or 'approve'.
139 # * To reject the model, users need to reply the thread sent out by the bot
140 # started by SlackComponent with 'decline' or 'reject'.
141 slack_validator = SlackComponent(
142 model=trainer.outputs['model'],
143 model_blessing=model_validator.outputs['blessing'],
144 slack_token=_slack_token,
145 slack_channel_id=_slack_channel_id,
146 timeout_sec=3600,
147 )
148 # Checks whether the model passed the validation steps and pushes the model
149 # to a file destination if check passed.
150 pusher = Pusher(
151 model=trainer.outputs['model'],
152 model_blessing=slack_validator.outputs['slack_blessing'],
153 push_destination=pusher_pb2.PushDestination(
154 filesystem=pusher_pb2.PushDestination.Filesystem(
155 base_directory=_serving_model_dir)))
156
157 return pipeline.Pipeline(
158 pipeline_name=_pipeline_name,
159 pipeline_root=_pipeline_root,
160 components=[
161 example_gen, statistics_gen, infer_schema, validate_stats, transform,
162 trainer, model_analyzer, model_validator, slack_validator, pusher
163 ],
164 enable_cache=True,
165 )
166
167
168 if __name__ == '__main__':
169 # Metadata config. The defaults works work with the installation of
170 # KF Pipelines using Kubeflow. If installing KF Pipelines using the
171 # lightweight deployment option, you may need to override the defaults.
172 metadata_config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()
173
174 # This pipeline automatically injects the Kubeflow TFX image if the
175 # environment variable 'KUBEFLOW_TFX_IMAGE' is defined. Currently, the tfx
176 # cli tool exports the environment variable to pass to the pipelines.
177 tfx_image = os.environ.get('KUBEFLOW_TFX_IMAGE', None)
178
179 runner_config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
180 kubeflow_metadata_config=metadata_config,
181 # Specify custom docker image to use.
182 tfx_image=tfx_image
183 )
184
185 kubeflow_dag_runner.KubeflowDagRunner(config=runner_config).run(
186 _create_pipeline())
187
[end of tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py b/tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py
--- a/tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py
+++ b/tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py
@@ -145,7 +145,7 @@
slack_channel_id=_slack_channel_id,
timeout_sec=3600,
)
- # Checks whether the model passed the validation steps and pushes the model
+ # Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
| {"golden_diff": "diff --git a/tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py b/tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py\n--- a/tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py\n+++ b/tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py\n@@ -145,7 +145,7 @@\n slack_channel_id=_slack_channel_id,\n timeout_sec=3600,\n )\n- # Checks whether the model passed the validation steps and pushes the model\n+ # Checks whether the model passed the validation steps and pushes the model\n # to a file destination if check passed.\n pusher = Pusher(\n model=trainer.outputs['model'],\n", "issue": "just a few typos fixed\nThat Python version check can be removed in a few days when Colab drops Python 2 support.\n", "before_files": [{"content": "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Example pipeline to demonstrate custom TFX component.\n\nThis example consists of standard TFX components as well as a custom TFX\ncomponent requesting for manual review through Slack.\n\nThis example along with the custom `SlackComponent` will only serve as an\nexample and will not be supported by TFX team.\n\nThis example runs in Kubeflow with Google Cloud services..\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport os\n\nfrom slack_component.component import SlackComponent\n\nfrom tfx.components import CsvExampleGen\nfrom tfx.components import Evaluator\nfrom tfx.components import ExampleValidator\nfrom tfx.components import ModelValidator\nfrom tfx.components import Pusher\nfrom tfx.components import SchemaGen\nfrom tfx.components import StatisticsGen\nfrom tfx.components import Trainer\nfrom tfx.components import Transform\nfrom tfx.orchestration import pipeline\nfrom tfx.orchestration.kubeflow import kubeflow_dag_runner\nfrom tfx.proto import evaluator_pb2\nfrom tfx.proto import pusher_pb2\nfrom tfx.proto import trainer_pb2\nfrom tfx.utils.dsl_utils import csv_input\n\n# This example assumes that the taxi data is stored in _input_bucket/data/simple\n# and the taxi utility function is in example/taxi_utils_slack.py.\n# Feel free to customize this as needed.\n_input_bucket = 'gs://my-bucket'\n_output_bucket = 'gs://my-bucket'\n_taxi_root = __file__\n_data_root = os.path.join(_input_bucket, 'data', 'simple')\n\n\n# Python module file to inject customized logic into the TFX components. The\n# Transform and Trainer both require user-defined functions to run successfully.\n_taxi_trainer_func = 'example.taxi_utils_slack.trainer_fn'\n_taxi_transformer_func = 'example.taxi_utils_slack.preprocessing_fn'\n# Path which can be listened to by the model server. Pusher will output the\n# trained model here.\n_serving_model_dir = os.path.join(_taxi_root, 'serving_model/taxi_slack')\n# Slack channel to push the model notifications to.\n_slack_channel_id = 'my-channel-id'\n# Slack token to set up connection.\n_slack_token = os.environ['SLACK_BOT_TOKEN']\n\n# Directory and data locations. This example assumes all of the chicago taxi\n# example code and metadata library is relative to $HOME, but you can store\n# these files anywhere on your local filesystem.\n_tfx_root = os.path.join(os.environ['HOME'], '/tfx')\n_pipeline_name = 'chicago_taxi_slack_kubeflow'\n_pipeline_root = os.path.join(_input_bucket, _pipeline_name)\n_log_root = os.path.join(_tfx_root, 'logs')\n\n# Airflow-specific configs; these will be passed directly to airflow\n_airflow_config = {\n 'schedule_interval': None,\n 'start_date': datetime.datetime(2019, 1, 1),\n}\n\n\ndef _create_pipeline():\n \"\"\"Implements the chicago taxi pipeline with TFX.\"\"\"\n examples = csv_input(_data_root)\n\n # Brings data into the pipeline or otherwise joins/converts training data.\n example_gen = CsvExampleGen(input=examples)\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])\n\n # Generates schema based on statistics files.\n infer_schema = SchemaGen(statistics=statistics_gen.outputs['statistics'])\n\n # Performs anomaly detection based on statistics and data schema.\n validate_stats = ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=infer_schema.outputs['schema'])\n\n # Performs transformations and feature engineering in training and serving.\n transform = Transform(\n examples=example_gen.outputs['examples'],\n schema=infer_schema.outputs['schema'],\n preprocessing_fn=_taxi_transformer_func)\n\n # Uses user-provided Python function that implements a model using TF-Learn.\n trainer = Trainer(\n trainer_fn=_taxi_trainer_func,\n examples=transform.outputs['transformed_examples'],\n schema=infer_schema.outputs['schema'],\n transform_graph=transform.outputs['transform_graph'],\n train_args=trainer_pb2.TrainArgs(num_steps=10000),\n eval_args=trainer_pb2.EvalArgs(num_steps=5000))\n\n # Uses TFMA to compute a evaluation statistics over features of a model.\n model_analyzer = Evaluator(\n examples=example_gen.outputs['examples'],\n model_exports=trainer.outputs['model'],\n feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[\n evaluator_pb2.SingleSlicingSpec(\n column_for_slicing=['trip_start_hour'])\n ]))\n\n # Performs quality validation of a candidate model (compared to a baseline).\n model_validator = ModelValidator(\n examples=example_gen.outputs['examples'], model=trainer.outputs['model'])\n\n # This custom component serves as a bridge between pipeline and human model\n # reviewers to enable review-and-push workflow in model development cycle. It\n # utilizes Slack API to send message to user-defined Slack channel with model\n # URI info and wait for go / no-go decision from the same Slack channel:\n # * To approve the model, users need to reply the thread sent out by the bot\n # started by SlackComponent with 'lgtm' or 'approve'.\n # * To reject the model, users need to reply the thread sent out by the bot\n # started by SlackComponent with 'decline' or 'reject'.\n slack_validator = SlackComponent(\n model=trainer.outputs['model'],\n model_blessing=model_validator.outputs['blessing'],\n slack_token=_slack_token,\n slack_channel_id=_slack_channel_id,\n timeout_sec=3600,\n )\n # Checks whether the model passed the validation steps and pushes the model\n # to a file destination if check passed.\n pusher = Pusher(\n model=trainer.outputs['model'],\n model_blessing=slack_validator.outputs['slack_blessing'],\n push_destination=pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n base_directory=_serving_model_dir)))\n\n return pipeline.Pipeline(\n pipeline_name=_pipeline_name,\n pipeline_root=_pipeline_root,\n components=[\n example_gen, statistics_gen, infer_schema, validate_stats, transform,\n trainer, model_analyzer, model_validator, slack_validator, pusher\n ],\n enable_cache=True,\n )\n\n\nif __name__ == '__main__':\n # Metadata config. The defaults works work with the installation of\n # KF Pipelines using Kubeflow. If installing KF Pipelines using the\n # lightweight deployment option, you may need to override the defaults.\n metadata_config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()\n\n # This pipeline automatically injects the Kubeflow TFX image if the\n # environment variable 'KUBEFLOW_TFX_IMAGE' is defined. Currently, the tfx\n # cli tool exports the environment variable to pass to the pipelines.\n tfx_image = os.environ.get('KUBEFLOW_TFX_IMAGE', None)\n\n runner_config = kubeflow_dag_runner.KubeflowDagRunnerConfig(\n kubeflow_metadata_config=metadata_config,\n # Specify custom docker image to use.\n tfx_image=tfx_image\n )\n\n kubeflow_dag_runner.KubeflowDagRunner(config=runner_config).run(\n _create_pipeline())\n", "path": "tfx/examples/custom_components/slack/example/taxi_pipeline_slack_kubeflow.py"}]} | 2,838 | 178 |
gh_patches_debug_4388 | rasdani/github-patches | git_diff | freqtrade__freqtrade-2197 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build number in -dev versioning
Since we do not have a build number in the -dev version, it may be quite complex to understand which particular version of freqtrade the user runs (especially with that your conda). So if we change user interface I guess it's better for a while to make those changes along with changing the version of both develop and master...
Is it possible to introduce the build number into the -dev versioning, which will change with every merge, as ccxt employs, for example?
It's not necessary for master (2019-8 is completely enough), but for develop changing every day this may be useful since `freqtrade -V` will give particular version info (even when a user has current codebase from github fetched in a dir, but installed an outdated version with that your conda, which is used instead of latest develop from the dir; that's the confusing usecase when it's complicated to find out the actual version of freqtrade that runs at the user site...)
It's not necessary to be an increasing number, it can be a short commit id (merged into develop), for example...
</issue>
<code>
[start of freqtrade/__init__.py]
1 """ FreqTrade bot """
2 __version__ = '2019.7-dev'
3
4
5 class DependencyException(Exception):
6 """
7 Indicates that an assumed dependency is not met.
8 This could happen when there is currently not enough money on the account.
9 """
10
11
12 class OperationalException(Exception):
13 """
14 Requires manual intervention and will usually stop the bot.
15 This happens when an exchange returns an unexpected error during runtime
16 or given configuration is invalid.
17 """
18
19
20 class InvalidOrderException(Exception):
21 """
22 This is returned when the order is not valid. Example:
23 If stoploss on exchange order is hit, then trying to cancel the order
24 should return this exception.
25 """
26
27
28 class TemporaryError(Exception):
29 """
30 Temporary network or exchange related error.
31 This could happen when an exchange is congested, unavailable, or the user
32 has networking problems. Usually resolves itself after a time.
33 """
34
[end of freqtrade/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/freqtrade/__init__.py b/freqtrade/__init__.py
--- a/freqtrade/__init__.py
+++ b/freqtrade/__init__.py
@@ -1,5 +1,16 @@
""" FreqTrade bot """
-__version__ = '2019.7-dev'
+__version__ = 'develop'
+
+if __version__ == 'develop':
+
+ try:
+ import subprocess
+ __version__ = 'develop-' + subprocess.check_output(
+ ['git', 'log', '--format="%h"', '-n 1'],
+ stderr=subprocess.DEVNULL).decode("utf-8").rstrip().strip('"')
+ except Exception:
+ # git not available, ignore
+ pass
class DependencyException(Exception):
| {"golden_diff": "diff --git a/freqtrade/__init__.py b/freqtrade/__init__.py\n--- a/freqtrade/__init__.py\n+++ b/freqtrade/__init__.py\n@@ -1,5 +1,16 @@\n \"\"\" FreqTrade bot \"\"\"\n-__version__ = '2019.7-dev'\n+__version__ = 'develop'\n+\n+if __version__ == 'develop':\n+\n+ try:\n+ import subprocess\n+ __version__ = 'develop-' + subprocess.check_output(\n+ ['git', 'log', '--format=\"%h\"', '-n 1'],\n+ stderr=subprocess.DEVNULL).decode(\"utf-8\").rstrip().strip('\"')\n+ except Exception:\n+ # git not available, ignore\n+ pass\n \n \n class DependencyException(Exception):\n", "issue": "Build number in -dev versioning\nSince we do not have a build number in the -dev version, it may be quite complex to understand which particular version of freqtrade the user runs (especially with that your conda). So if we change user interface I guess it's better for a while to make those changes along with changing the version of both develop and master...\r\n\r\nIs it possible to introduce the build number into the -dev versioning, which will change with every merge, as ccxt employs, for example?\r\n\r\nIt's not necessary for master (2019-8 is completely enough), but for develop changing every day this may be useful since `freqtrade -V` will give particular version info (even when a user has current codebase from github fetched in a dir, but installed an outdated version with that your conda, which is used instead of latest develop from the dir; that's the confusing usecase when it's complicated to find out the actual version of freqtrade that runs at the user site...)\r\n\r\nIt's not necessary to be an increasing number, it can be a short commit id (merged into develop), for example...\r\n\n", "before_files": [{"content": "\"\"\" FreqTrade bot \"\"\"\n__version__ = '2019.7-dev'\n\n\nclass DependencyException(Exception):\n \"\"\"\n Indicates that an assumed dependency is not met.\n This could happen when there is currently not enough money on the account.\n \"\"\"\n\n\nclass OperationalException(Exception):\n \"\"\"\n Requires manual intervention and will usually stop the bot.\n This happens when an exchange returns an unexpected error during runtime\n or given configuration is invalid.\n \"\"\"\n\n\nclass InvalidOrderException(Exception):\n \"\"\"\n This is returned when the order is not valid. Example:\n If stoploss on exchange order is hit, then trying to cancel the order\n should return this exception.\n \"\"\"\n\n\nclass TemporaryError(Exception):\n \"\"\"\n Temporary network or exchange related error.\n This could happen when an exchange is congested, unavailable, or the user\n has networking problems. Usually resolves itself after a time.\n \"\"\"\n", "path": "freqtrade/__init__.py"}]} | 1,027 | 174 |
gh_patches_debug_1680 | rasdani/github-patches | git_diff | vispy__vispy-2592 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
What's the status of GLFW?
I see there's a `glfw` backend, but in `setup.py` it is neither listed as a dependency nor defined as an extra. Is that an oversight or deliberately?
I'm packaging `vispy` for Fedora and with [glfw](https://pypi.org/project/glfw/) added as a dependency, I'm seeing `glfw` listed in the output of `vispy.sys_info()`. Tests using `glsw` as a backend also appear to work fine.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) Vispy Development Team. All Rights Reserved.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4 """Vispy setup script.
5
6 Steps to do a new release:
7
8 Preparations:
9 * Test on Windows, Linux, Mac
10 * Make release notes
11 * Update API documentation and other docs that need updating.
12
13 Define the version and release:
14 * tag the tip changeset as version x.x.x; `git tag -a 'vX.Y.Z' -m "Version X.Y.Z"`
15 * push tag to github
16 * verify that azure pipelines complete
17 * verify that `.tar.gz` sdist and binary wheels are available on PyPI
18
19 Announcing:
20 * It can be worth waiting a day for eager users to report critical bugs
21 * Announce in scipy-user, vispy mailing list, twitter (@vispyproject)
22
23 """
24
25 import os
26 import sys
27 from os import path as op
28 from setuptools import setup, find_packages
29
30 import numpy as np
31 from Cython.Build import cythonize
32 from Cython.Distutils import Extension
33
34 name = 'vispy'
35 description = 'Interactive visualization in Python'
36
37 # Special commands for building jupyter notebook extension
38 here = os.path.dirname(os.path.abspath(__file__))
39 node_root = os.path.join(here, 'js')
40 is_repo = os.path.exists(os.path.join(here, '.git'))
41
42 npm_path = os.pathsep.join([
43 os.path.join(node_root, 'node_modules', '.bin'),
44 os.environ.get('PATH', os.defpath),
45 ])
46
47
48 def set_builtin(name, value):
49 if isinstance(__builtins__, dict):
50 __builtins__[name] = value
51 else:
52 setattr(__builtins__, name, value)
53
54
55 extensions = [Extension('vispy.visuals.text._sdf_cpu',
56 sources=[op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')],
57 include_dirs=[np.get_include()],
58 cython_directives={"language_level": "3"},
59 define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")],
60 ),
61 ]
62
63 install_requires = ['numpy', 'freetype-py', 'hsluv', 'kiwisolver', 'packaging']
64 if sys.version_info < (3, 9):
65 install_requires.append("importlib-resources")
66
67 readme = open('README.rst', 'r').read()
68 setup(
69 name=name,
70 use_scm_version={
71 'write_to': 'vispy/version.py',
72 # uses setuptools_scm.version.get_local_dirty_tag (+dirty or empty string)
73 'local_scheme': 'dirty-tag',
74 },
75 author='Vispy contributors',
76 author_email='[email protected]',
77 license='BSD-3-Clause',
78 url='http://vispy.org',
79 download_url='https://pypi.python.org/pypi/vispy',
80 keywords=[
81 'visualization',
82 'OpenGl',
83 'ES',
84 'medical',
85 'imaging',
86 '3D',
87 'plotting',
88 'numpy',
89 'bigdata',
90 'ipython',
91 'jupyter',
92 'widgets',
93 ],
94 description=description,
95 long_description=readme,
96 long_description_content_type='text/x-rst',
97 platforms='any',
98 provides=['vispy'],
99 python_requires='>=3.8',
100 install_requires=install_requires,
101 extras_require={
102 'ipython-static': ['ipython'],
103 'pyglet': ['pyglet>=1.2'],
104 'pyqt5': ['pyqt5'],
105 'pyqt6': ['pyqt6'],
106 'pyside': ['PySide'],
107 'pyside2': ['PySide2'],
108 'pyside6': ['PySide6'],
109 'sdl2': ['PySDL2'],
110 'wx': ['wxPython'],
111 'tk': ['pyopengltk'],
112 'doc': ['pydata-sphinx-theme', 'numpydoc', 'sphinxcontrib-apidoc',
113 'sphinx-gallery', 'myst-parser', 'pillow', 'pytest',
114 'pyopengl'],
115 'io': ['meshio', 'Pillow'],
116 },
117 packages=find_packages(exclude=['make']),
118 ext_modules=cythonize(extensions, language_level=3),
119 package_dir={'vispy': 'vispy'},
120 data_files=[],
121 include_package_data=True,
122 package_data={
123 'vispy': [op.join('io', '_data', '*'),
124 op.join('app', 'tests', 'qt-designer.ui'),
125 op.join('util', 'fonts', 'data', '*.ttf'),
126 ],
127
128 'vispy.glsl': ['*.vert', '*.frag', "*.glsl"],
129 'vispy.glsl.antialias': ['*.vert', '*.frag', "*.glsl"],
130 'vispy.glsl.arrowheads': ['*.vert', '*.frag', "*.glsl"],
131 'vispy.glsl.arrows': ['*.vert', '*.frag', "*.glsl"],
132 'vispy.glsl.collections': ['*.vert', '*.frag', "*.glsl"],
133 'vispy.glsl.colormaps': ['*.vert', '*.frag', "*.glsl"],
134 'vispy.glsl.lines': ['*.vert', '*.frag', "*.glsl"],
135 'vispy.glsl.markers': ['*.vert', '*.frag', "*.glsl"],
136 'vispy.glsl.math': ['*.vert', '*.frag', "*.glsl"],
137 'vispy.glsl.misc': ['*.vert', '*.frag', "*.glsl"],
138 'vispy.glsl.transforms': ['*.vert', '*.frag', "*.glsl"],
139
140 },
141 zip_safe=False,
142 classifiers=[
143 'Development Status :: 3 - Alpha',
144 'Intended Audience :: Science/Research',
145 'Intended Audience :: Education',
146 'Intended Audience :: Developers',
147 'Topic :: Scientific/Engineering :: Visualization',
148 'License :: OSI Approved :: BSD License',
149 'Operating System :: MacOS :: MacOS X',
150 'Operating System :: Microsoft :: Windows',
151 'Operating System :: POSIX',
152 'Programming Language :: Python',
153 'Programming Language :: Python :: 3.8',
154 'Programming Language :: Python :: 3.9',
155 'Programming Language :: Python :: 3.10',
156 'Programming Language :: Python :: 3.11',
157 'Framework :: IPython'
158 ],
159 )
160
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -106,6 +106,7 @@
'pyside': ['PySide'],
'pyside2': ['PySide2'],
'pyside6': ['PySide6'],
+ 'glfw': ['glfw'],
'sdl2': ['PySDL2'],
'wx': ['wxPython'],
'tk': ['pyopengltk'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -106,6 +106,7 @@\n 'pyside': ['PySide'],\n 'pyside2': ['PySide2'],\n 'pyside6': ['PySide6'],\n+ 'glfw': ['glfw'],\n 'sdl2': ['PySDL2'],\n 'wx': ['wxPython'],\n 'tk': ['pyopengltk'],\n", "issue": "What's the status of GLFW?\nI see there's a `glfw` backend, but in `setup.py` it is neither listed as a dependency nor defined as an extra. Is that an oversight or deliberately?\r\n\r\nI'm packaging `vispy` for Fedora and with [glfw](https://pypi.org/project/glfw/) added as a dependency, I'm seeing `glfw` listed in the output of `vispy.sys_info()`. Tests using `glsw` as a backend also appear to work fine.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"Vispy setup script.\n\nSteps to do a new release:\n\nPreparations:\n * Test on Windows, Linux, Mac\n * Make release notes\n * Update API documentation and other docs that need updating.\n\nDefine the version and release:\n * tag the tip changeset as version x.x.x; `git tag -a 'vX.Y.Z' -m \"Version X.Y.Z\"`\n * push tag to github\n * verify that azure pipelines complete\n * verify that `.tar.gz` sdist and binary wheels are available on PyPI\n\nAnnouncing:\n * It can be worth waiting a day for eager users to report critical bugs\n * Announce in scipy-user, vispy mailing list, twitter (@vispyproject)\n\n\"\"\"\n\nimport os\nimport sys\nfrom os import path as op\nfrom setuptools import setup, find_packages\n\nimport numpy as np\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import Extension\n\nname = 'vispy'\ndescription = 'Interactive visualization in Python'\n\n# Special commands for building jupyter notebook extension\nhere = os.path.dirname(os.path.abspath(__file__))\nnode_root = os.path.join(here, 'js')\nis_repo = os.path.exists(os.path.join(here, '.git'))\n\nnpm_path = os.pathsep.join([\n os.path.join(node_root, 'node_modules', '.bin'),\n os.environ.get('PATH', os.defpath),\n])\n\n\ndef set_builtin(name, value):\n if isinstance(__builtins__, dict):\n __builtins__[name] = value\n else:\n setattr(__builtins__, name, value)\n\n\nextensions = [Extension('vispy.visuals.text._sdf_cpu',\n sources=[op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')],\n include_dirs=[np.get_include()],\n cython_directives={\"language_level\": \"3\"},\n define_macros=[(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")],\n ),\n ]\n\ninstall_requires = ['numpy', 'freetype-py', 'hsluv', 'kiwisolver', 'packaging']\nif sys.version_info < (3, 9):\n install_requires.append(\"importlib-resources\")\n\nreadme = open('README.rst', 'r').read()\nsetup(\n name=name,\n use_scm_version={\n 'write_to': 'vispy/version.py',\n # uses setuptools_scm.version.get_local_dirty_tag (+dirty or empty string)\n 'local_scheme': 'dirty-tag',\n },\n author='Vispy contributors',\n author_email='[email protected]',\n license='BSD-3-Clause',\n url='http://vispy.org',\n download_url='https://pypi.python.org/pypi/vispy',\n keywords=[\n 'visualization',\n 'OpenGl',\n 'ES',\n 'medical',\n 'imaging',\n '3D',\n 'plotting',\n 'numpy',\n 'bigdata',\n 'ipython',\n 'jupyter',\n 'widgets',\n ],\n description=description,\n long_description=readme,\n long_description_content_type='text/x-rst',\n platforms='any',\n provides=['vispy'],\n python_requires='>=3.8',\n install_requires=install_requires,\n extras_require={\n 'ipython-static': ['ipython'],\n 'pyglet': ['pyglet>=1.2'],\n 'pyqt5': ['pyqt5'],\n 'pyqt6': ['pyqt6'],\n 'pyside': ['PySide'],\n 'pyside2': ['PySide2'],\n 'pyside6': ['PySide6'],\n 'sdl2': ['PySDL2'],\n 'wx': ['wxPython'],\n 'tk': ['pyopengltk'],\n 'doc': ['pydata-sphinx-theme', 'numpydoc', 'sphinxcontrib-apidoc',\n 'sphinx-gallery', 'myst-parser', 'pillow', 'pytest',\n 'pyopengl'],\n 'io': ['meshio', 'Pillow'],\n },\n packages=find_packages(exclude=['make']),\n ext_modules=cythonize(extensions, language_level=3),\n package_dir={'vispy': 'vispy'},\n data_files=[],\n include_package_data=True,\n package_data={\n 'vispy': [op.join('io', '_data', '*'),\n op.join('app', 'tests', 'qt-designer.ui'),\n op.join('util', 'fonts', 'data', '*.ttf'),\n ],\n\n 'vispy.glsl': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.antialias': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.arrowheads': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.arrows': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.collections': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.colormaps': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.lines': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.markers': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.math': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.misc': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.transforms': ['*.vert', '*.frag', \"*.glsl\"],\n\n },\n zip_safe=False,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Education',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Framework :: IPython'\n ],\n)\n", "path": "setup.py"}]} | 2,402 | 105 |
gh_patches_debug_23613 | rasdani/github-patches | git_diff | python-discord__bot-552 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Prepend emojis indicative of success for !eval
Comments on [org-issue-141](https://github.com/python-discord/organisation/issues/141) suggests something quite useful.
Add emojis indicative of success when you use !eval.
- ✅ - if no error.
- ❌ - if exception.
- ❗ - for other cases, as it runs but there's no output



</issue>
<code>
[start of bot/cogs/snekbox.py]
1 import datetime
2 import logging
3 import re
4 import textwrap
5 from signal import Signals
6 from typing import Optional, Tuple
7
8 from discord.ext.commands import Bot, Cog, Context, command, guild_only
9
10 from bot.constants import Channels, Roles, URLs
11 from bot.decorators import in_channel
12 from bot.utils.messages import wait_for_deletion
13
14 log = logging.getLogger(__name__)
15
16 ESCAPE_REGEX = re.compile("[`\u202E\u200B]{3,}")
17 FORMATTED_CODE_REGEX = re.compile(
18 r"^\s*" # any leading whitespace from the beginning of the string
19 r"(?P<delim>(?P<block>```)|``?)" # code delimiter: 1-3 backticks; (?P=block) only matches if it's a block
20 r"(?(block)(?:(?P<lang>[a-z]+)\n)?)" # if we're in a block, match optional language (only letters plus newline)
21 r"(?:[ \t]*\n)*" # any blank (empty or tabs/spaces only) lines before the code
22 r"(?P<code>.*?)" # extract all code inside the markup
23 r"\s*" # any more whitespace before the end of the code markup
24 r"(?P=delim)" # match the exact same delimiter from the start again
25 r"\s*$", # any trailing whitespace until the end of the string
26 re.DOTALL | re.IGNORECASE # "." also matches newlines, case insensitive
27 )
28 RAW_CODE_REGEX = re.compile(
29 r"^(?:[ \t]*\n)*" # any blank (empty or tabs/spaces only) lines before the code
30 r"(?P<code>.*?)" # extract all the rest as code
31 r"\s*$", # any trailing whitespace until the end of the string
32 re.DOTALL # "." also matches newlines
33 )
34
35 MAX_PASTE_LEN = 1000
36 EVAL_ROLES = (Roles.helpers, Roles.moderator, Roles.admin, Roles.owner, Roles.rockstars, Roles.partners)
37
38
39 class Snekbox(Cog):
40 """Safe evaluation of Python code using Snekbox."""
41
42 def __init__(self, bot: Bot):
43 self.bot = bot
44 self.jobs = {}
45
46 async def post_eval(self, code: str) -> dict:
47 """Send a POST request to the Snekbox API to evaluate code and return the results."""
48 url = URLs.snekbox_eval_api
49 data = {"input": code}
50 async with self.bot.http_session.post(url, json=data, raise_for_status=True) as resp:
51 return await resp.json()
52
53 async def upload_output(self, output: str) -> Optional[str]:
54 """Upload the eval output to a paste service and return a URL to it if successful."""
55 log.trace("Uploading full output to paste service...")
56
57 if len(output) > MAX_PASTE_LEN:
58 log.info("Full output is too long to upload")
59 return "too long to upload"
60
61 url = URLs.paste_service.format(key="documents")
62 try:
63 async with self.bot.http_session.post(url, data=output, raise_for_status=True) as resp:
64 data = await resp.json()
65
66 if "key" in data:
67 return URLs.paste_service.format(key=data["key"])
68 except Exception:
69 # 400 (Bad Request) means there are too many characters
70 log.exception("Failed to upload full output to paste service!")
71
72 @staticmethod
73 def prepare_input(code: str) -> str:
74 """Extract code from the Markdown, format it, and insert it into the code template."""
75 match = FORMATTED_CODE_REGEX.fullmatch(code)
76 if match:
77 code, block, lang, delim = match.group("code", "block", "lang", "delim")
78 code = textwrap.dedent(code)
79 if block:
80 info = (f"'{lang}' highlighted" if lang else "plain") + " code block"
81 else:
82 info = f"{delim}-enclosed inline code"
83 log.trace(f"Extracted {info} for evaluation:\n{code}")
84 else:
85 code = textwrap.dedent(RAW_CODE_REGEX.fullmatch(code).group("code"))
86 log.trace(
87 f"Eval message contains unformatted or badly formatted code, "
88 f"stripping whitespace only:\n{code}"
89 )
90
91 return code
92
93 @staticmethod
94 def get_results_message(results: dict) -> Tuple[str, str]:
95 """Return a user-friendly message and error corresponding to the process's return code."""
96 stdout, returncode = results["stdout"], results["returncode"]
97 msg = f"Your eval job has completed with return code {returncode}"
98 error = ""
99
100 if returncode is None:
101 msg = "Your eval job has failed"
102 error = stdout.strip()
103 elif returncode == 128 + Signals.SIGKILL:
104 msg = "Your eval job timed out or ran out of memory"
105 elif returncode == 255:
106 msg = "Your eval job has failed"
107 error = "A fatal NsJail error occurred"
108 else:
109 # Try to append signal's name if one exists
110 try:
111 name = Signals(returncode - 128).name
112 msg = f"{msg} ({name})"
113 except ValueError:
114 pass
115
116 return msg, error
117
118 async def format_output(self, output: str) -> Tuple[str, Optional[str]]:
119 """
120 Format the output and return a tuple of the formatted output and a URL to the full output.
121
122 Prepend each line with a line number. Truncate if there are over 10 lines or 1000 characters
123 and upload the full output to a paste service.
124 """
125 log.trace("Formatting output...")
126
127 output = output.strip(" \n")
128 original_output = output # To be uploaded to a pasting service if needed
129 paste_link = None
130
131 if "<@" in output:
132 output = output.replace("<@", "<@\u200B") # Zero-width space
133
134 if "<!@" in output:
135 output = output.replace("<!@", "<!@\u200B") # Zero-width space
136
137 if ESCAPE_REGEX.findall(output):
138 return "Code block escape attempt detected; will not output result", paste_link
139
140 truncated = False
141 lines = output.count("\n")
142
143 if lines > 0:
144 output = output.split("\n")[:10] # Only first 10 cause the rest is truncated anyway
145 output = (f"{i:03d} | {line}" for i, line in enumerate(output, 1))
146 output = "\n".join(output)
147
148 if lines > 10:
149 truncated = True
150 if len(output) >= 1000:
151 output = f"{output[:1000]}\n... (truncated - too long, too many lines)"
152 else:
153 output = f"{output}\n... (truncated - too many lines)"
154 elif len(output) >= 1000:
155 truncated = True
156 output = f"{output[:1000]}\n... (truncated - too long)"
157
158 if truncated:
159 paste_link = await self.upload_output(original_output)
160
161 output = output.strip()
162 if not output:
163 output = "[No output]"
164
165 return output, paste_link
166
167 @command(name="eval", aliases=("e",))
168 @guild_only()
169 @in_channel(Channels.bot, bypass_roles=EVAL_ROLES)
170 async def eval_command(self, ctx: Context, *, code: str = None) -> None:
171 """
172 Run Python code and get the results.
173
174 This command supports multiple lines of code, including code wrapped inside a formatted code
175 block. We've done our best to make this safe, but do let us know if you manage to find an
176 issue with it!
177 """
178 if ctx.author.id in self.jobs:
179 await ctx.send(
180 f"{ctx.author.mention} You've already got a job running - "
181 "please wait for it to finish!"
182 )
183 return
184
185 if not code: # None or empty string
186 await ctx.invoke(self.bot.get_command("help"), "eval")
187 return
188
189 log.info(f"Received code from {ctx.author} for evaluation:\n{code}")
190
191 self.jobs[ctx.author.id] = datetime.datetime.now()
192 code = self.prepare_input(code)
193
194 try:
195 async with ctx.typing():
196 results = await self.post_eval(code)
197 msg, error = self.get_results_message(results)
198
199 if error:
200 output, paste_link = error, None
201 else:
202 output, paste_link = await self.format_output(results["stdout"])
203
204 msg = f"{ctx.author.mention} {msg}.\n\n```py\n{output}\n```"
205 if paste_link:
206 msg = f"{msg}\nFull output: {paste_link}"
207
208 response = await ctx.send(msg)
209 self.bot.loop.create_task(
210 wait_for_deletion(response, user_ids=(ctx.author.id,), client=ctx.bot)
211 )
212
213 log.info(f"{ctx.author}'s job had a return code of {results['returncode']}")
214 finally:
215 del self.jobs[ctx.author.id]
216
217
218 def setup(bot: Bot) -> None:
219 """Snekbox cog load."""
220 bot.add_cog(Snekbox(bot))
221 log.info("Cog loaded: Snekbox")
222
[end of bot/cogs/snekbox.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/cogs/snekbox.py b/bot/cogs/snekbox.py
--- a/bot/cogs/snekbox.py
+++ b/bot/cogs/snekbox.py
@@ -115,6 +115,16 @@
return msg, error
+ @staticmethod
+ def get_status_emoji(results: dict) -> str:
+ """Return an emoji corresponding to the status code or lack of output in result."""
+ if not results["stdout"].strip(): # No output
+ return ":warning:"
+ elif results["returncode"] == 0: # No error
+ return ":white_check_mark:"
+ else: # Exception
+ return ":x:"
+
async def format_output(self, output: str) -> Tuple[str, Optional[str]]:
"""
Format the output and return a tuple of the formatted output and a URL to the full output.
@@ -201,7 +211,8 @@
else:
output, paste_link = await self.format_output(results["stdout"])
- msg = f"{ctx.author.mention} {msg}.\n\n```py\n{output}\n```"
+ icon = self.get_status_emoji(results)
+ msg = f"{ctx.author.mention} {icon} {msg}.\n\n```py\n{output}\n```"
if paste_link:
msg = f"{msg}\nFull output: {paste_link}"
| {"golden_diff": "diff --git a/bot/cogs/snekbox.py b/bot/cogs/snekbox.py\n--- a/bot/cogs/snekbox.py\n+++ b/bot/cogs/snekbox.py\n@@ -115,6 +115,16 @@\n \n return msg, error\n \n+ @staticmethod\n+ def get_status_emoji(results: dict) -> str:\n+ \"\"\"Return an emoji corresponding to the status code or lack of output in result.\"\"\"\n+ if not results[\"stdout\"].strip(): # No output\n+ return \":warning:\"\n+ elif results[\"returncode\"] == 0: # No error\n+ return \":white_check_mark:\"\n+ else: # Exception\n+ return \":x:\"\n+\n async def format_output(self, output: str) -> Tuple[str, Optional[str]]:\n \"\"\"\n Format the output and return a tuple of the formatted output and a URL to the full output.\n@@ -201,7 +211,8 @@\n else:\n output, paste_link = await self.format_output(results[\"stdout\"])\n \n- msg = f\"{ctx.author.mention} {msg}.\\n\\n```py\\n{output}\\n```\"\n+ icon = self.get_status_emoji(results)\n+ msg = f\"{ctx.author.mention} {icon} {msg}.\\n\\n```py\\n{output}\\n```\"\n if paste_link:\n msg = f\"{msg}\\nFull output: {paste_link}\"\n", "issue": "Prepend emojis indicative of success for !eval\nComments on [org-issue-141](https://github.com/python-discord/organisation/issues/141) suggests something quite useful. \r\n\r\nAdd emojis indicative of success when you use !eval. \r\n\r\n- \u2705 - if no error.\r\n- \u274c - if exception.\r\n- \u2757 - for other cases, as it runs but there's no output\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import datetime\nimport logging\nimport re\nimport textwrap\nfrom signal import Signals\nfrom typing import Optional, Tuple\n\nfrom discord.ext.commands import Bot, Cog, Context, command, guild_only\n\nfrom bot.constants import Channels, Roles, URLs\nfrom bot.decorators import in_channel\nfrom bot.utils.messages import wait_for_deletion\n\nlog = logging.getLogger(__name__)\n\nESCAPE_REGEX = re.compile(\"[`\\u202E\\u200B]{3,}\")\nFORMATTED_CODE_REGEX = re.compile(\n r\"^\\s*\" # any leading whitespace from the beginning of the string\n r\"(?P<delim>(?P<block>```)|``?)\" # code delimiter: 1-3 backticks; (?P=block) only matches if it's a block\n r\"(?(block)(?:(?P<lang>[a-z]+)\\n)?)\" # if we're in a block, match optional language (only letters plus newline)\n r\"(?:[ \\t]*\\n)*\" # any blank (empty or tabs/spaces only) lines before the code\n r\"(?P<code>.*?)\" # extract all code inside the markup\n r\"\\s*\" # any more whitespace before the end of the code markup\n r\"(?P=delim)\" # match the exact same delimiter from the start again\n r\"\\s*$\", # any trailing whitespace until the end of the string\n re.DOTALL | re.IGNORECASE # \".\" also matches newlines, case insensitive\n)\nRAW_CODE_REGEX = re.compile(\n r\"^(?:[ \\t]*\\n)*\" # any blank (empty or tabs/spaces only) lines before the code\n r\"(?P<code>.*?)\" # extract all the rest as code\n r\"\\s*$\", # any trailing whitespace until the end of the string\n re.DOTALL # \".\" also matches newlines\n)\n\nMAX_PASTE_LEN = 1000\nEVAL_ROLES = (Roles.helpers, Roles.moderator, Roles.admin, Roles.owner, Roles.rockstars, Roles.partners)\n\n\nclass Snekbox(Cog):\n \"\"\"Safe evaluation of Python code using Snekbox.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.jobs = {}\n\n async def post_eval(self, code: str) -> dict:\n \"\"\"Send a POST request to the Snekbox API to evaluate code and return the results.\"\"\"\n url = URLs.snekbox_eval_api\n data = {\"input\": code}\n async with self.bot.http_session.post(url, json=data, raise_for_status=True) as resp:\n return await resp.json()\n\n async def upload_output(self, output: str) -> Optional[str]:\n \"\"\"Upload the eval output to a paste service and return a URL to it if successful.\"\"\"\n log.trace(\"Uploading full output to paste service...\")\n\n if len(output) > MAX_PASTE_LEN:\n log.info(\"Full output is too long to upload\")\n return \"too long to upload\"\n\n url = URLs.paste_service.format(key=\"documents\")\n try:\n async with self.bot.http_session.post(url, data=output, raise_for_status=True) as resp:\n data = await resp.json()\n\n if \"key\" in data:\n return URLs.paste_service.format(key=data[\"key\"])\n except Exception:\n # 400 (Bad Request) means there are too many characters\n log.exception(\"Failed to upload full output to paste service!\")\n\n @staticmethod\n def prepare_input(code: str) -> str:\n \"\"\"Extract code from the Markdown, format it, and insert it into the code template.\"\"\"\n match = FORMATTED_CODE_REGEX.fullmatch(code)\n if match:\n code, block, lang, delim = match.group(\"code\", \"block\", \"lang\", \"delim\")\n code = textwrap.dedent(code)\n if block:\n info = (f\"'{lang}' highlighted\" if lang else \"plain\") + \" code block\"\n else:\n info = f\"{delim}-enclosed inline code\"\n log.trace(f\"Extracted {info} for evaluation:\\n{code}\")\n else:\n code = textwrap.dedent(RAW_CODE_REGEX.fullmatch(code).group(\"code\"))\n log.trace(\n f\"Eval message contains unformatted or badly formatted code, \"\n f\"stripping whitespace only:\\n{code}\"\n )\n\n return code\n\n @staticmethod\n def get_results_message(results: dict) -> Tuple[str, str]:\n \"\"\"Return a user-friendly message and error corresponding to the process's return code.\"\"\"\n stdout, returncode = results[\"stdout\"], results[\"returncode\"]\n msg = f\"Your eval job has completed with return code {returncode}\"\n error = \"\"\n\n if returncode is None:\n msg = \"Your eval job has failed\"\n error = stdout.strip()\n elif returncode == 128 + Signals.SIGKILL:\n msg = \"Your eval job timed out or ran out of memory\"\n elif returncode == 255:\n msg = \"Your eval job has failed\"\n error = \"A fatal NsJail error occurred\"\n else:\n # Try to append signal's name if one exists\n try:\n name = Signals(returncode - 128).name\n msg = f\"{msg} ({name})\"\n except ValueError:\n pass\n\n return msg, error\n\n async def format_output(self, output: str) -> Tuple[str, Optional[str]]:\n \"\"\"\n Format the output and return a tuple of the formatted output and a URL to the full output.\n\n Prepend each line with a line number. Truncate if there are over 10 lines or 1000 characters\n and upload the full output to a paste service.\n \"\"\"\n log.trace(\"Formatting output...\")\n\n output = output.strip(\" \\n\")\n original_output = output # To be uploaded to a pasting service if needed\n paste_link = None\n\n if \"<@\" in output:\n output = output.replace(\"<@\", \"<@\\u200B\") # Zero-width space\n\n if \"<!@\" in output:\n output = output.replace(\"<!@\", \"<!@\\u200B\") # Zero-width space\n\n if ESCAPE_REGEX.findall(output):\n return \"Code block escape attempt detected; will not output result\", paste_link\n\n truncated = False\n lines = output.count(\"\\n\")\n\n if lines > 0:\n output = output.split(\"\\n\")[:10] # Only first 10 cause the rest is truncated anyway\n output = (f\"{i:03d} | {line}\" for i, line in enumerate(output, 1))\n output = \"\\n\".join(output)\n\n if lines > 10:\n truncated = True\n if len(output) >= 1000:\n output = f\"{output[:1000]}\\n... (truncated - too long, too many lines)\"\n else:\n output = f\"{output}\\n... (truncated - too many lines)\"\n elif len(output) >= 1000:\n truncated = True\n output = f\"{output[:1000]}\\n... (truncated - too long)\"\n\n if truncated:\n paste_link = await self.upload_output(original_output)\n\n output = output.strip()\n if not output:\n output = \"[No output]\"\n\n return output, paste_link\n\n @command(name=\"eval\", aliases=(\"e\",))\n @guild_only()\n @in_channel(Channels.bot, bypass_roles=EVAL_ROLES)\n async def eval_command(self, ctx: Context, *, code: str = None) -> None:\n \"\"\"\n Run Python code and get the results.\n\n This command supports multiple lines of code, including code wrapped inside a formatted code\n block. We've done our best to make this safe, but do let us know if you manage to find an\n issue with it!\n \"\"\"\n if ctx.author.id in self.jobs:\n await ctx.send(\n f\"{ctx.author.mention} You've already got a job running - \"\n \"please wait for it to finish!\"\n )\n return\n\n if not code: # None or empty string\n await ctx.invoke(self.bot.get_command(\"help\"), \"eval\")\n return\n\n log.info(f\"Received code from {ctx.author} for evaluation:\\n{code}\")\n\n self.jobs[ctx.author.id] = datetime.datetime.now()\n code = self.prepare_input(code)\n\n try:\n async with ctx.typing():\n results = await self.post_eval(code)\n msg, error = self.get_results_message(results)\n\n if error:\n output, paste_link = error, None\n else:\n output, paste_link = await self.format_output(results[\"stdout\"])\n\n msg = f\"{ctx.author.mention} {msg}.\\n\\n```py\\n{output}\\n```\"\n if paste_link:\n msg = f\"{msg}\\nFull output: {paste_link}\"\n\n response = await ctx.send(msg)\n self.bot.loop.create_task(\n wait_for_deletion(response, user_ids=(ctx.author.id,), client=ctx.bot)\n )\n\n log.info(f\"{ctx.author}'s job had a return code of {results['returncode']}\")\n finally:\n del self.jobs[ctx.author.id]\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Snekbox cog load.\"\"\"\n bot.add_cog(Snekbox(bot))\n log.info(\"Cog loaded: Snekbox\")\n", "path": "bot/cogs/snekbox.py"}]} | 3,415 | 322 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.